From 06e79aad213248169727312ae12b5f2ab1655011 Mon Sep 17 00:00:00 2001 From: zhanglihui Date: Wed, 27 Sep 2023 00:43:12 -0700 Subject: [PATCH 001/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dcomment=E4=B8=BAmysql?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=E6=A8=A1=E5=BC=8F=E4=B8=8B=E5=A4=A7=E5=B0=8F?= =?UTF-8?q?=E5=86=99=E6=95=8F=E6=84=9F=E6=97=B6,=E4=B8=BA=E5=A4=A7?= =?UTF-8?q?=E5=86=99=E8=A1=A8=E6=B7=BB=E5=8A=A0=E6=B3=A8=E9=87=8A=E6=97=B6?= =?UTF-8?q?=E6=89=BE=E4=B8=8D=E5=88=B0=E8=A1=A8=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_parser/gram.y | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 0c89553f7..eb8f4285b 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -16237,8 +16237,7 @@ CommentStmt: ; comment_type: - COLUMN { $$ = OBJECT_COLUMN; } - | DATABASE { $$ = OBJECT_DATABASE; } + DATABASE { $$ = OBJECT_DATABASE; } | SCHEMA { $$ = OBJECT_SCHEMA; } | INDEX { $$ = OBJECT_INDEX; } | SEQUENCE { $$ = OBJECT_SEQUENCE; } @@ -16262,6 +16261,7 @@ dolphin_comment_type: | FOREIGN TABLE { $$ = OBJECT_FOREIGN_TABLE; } | ROLE { $$ = OBJECT_ROLE; } | USER { $$ = OBJECT_USER; } + | COLUMN { $$ = OBJECT_COLUMN; } ; comment_text: -- Gitee From 09a0395dfb95ae4e970ea677fdd9ab45dbc5e03e Mon Sep 17 00:00:00 2001 From: totaj Date: Thu, 28 Sep 2023 16:30:07 +0800 Subject: [PATCH 002/434] Add alter.cpp --- contrib/dolphin/Makefile | 2 +- contrib/dolphin/checklist/checklist.md | 1 + .../plugin_optimizer/commands/Makefile | 2 +- .../plugin_optimizer/commands/alter.cpp | 1019 +++++++++++++++++ contrib/filelist.txt | 1 + 5 files changed, 1023 insertions(+), 2 deletions(-) create mode 100644 contrib/dolphin/plugin_optimizer/commands/alter.cpp diff --git a/contrib/dolphin/Makefile b/contrib/dolphin/Makefile index fe8912f42..50cb20df4 100644 --- a/contrib/dolphin/Makefile +++ b/contrib/dolphin/Makefile @@ -68,7 +68,7 @@ OBJS += $(prep)/prepunion.o OBJS += $(optimizer_util)/plancat.o $(optimizer_util)/relnode.o $(optimizer_util)/clauses.o -OBJS += $(commands)/functioncmds.o $(commands)/foreigncmds.o $(commands)/copy.o $(commands)/schemacmds.o $(commands)/typecmds.o $(commands)/user.o +OBJS += $(commands)/functioncmds.o $(commands)/foreigncmds.o $(commands)/copy.o $(commands)/schemacmds.o $(commands)/typecmds.o $(commands)/user.o $(commands)/alter.o OBJS += $(pl)/pl_gram.o $(pl)/pl_scanner.o $(pl)/pl_comp.o $(pl)/pl_handler.o diff --git a/contrib/dolphin/checklist/checklist.md b/contrib/dolphin/checklist/checklist.md index d765a9343..49945626c 100644 --- a/contrib/dolphin/checklist/checklist.md +++ b/contrib/dolphin/checklist/checklist.md @@ -112,6 +112,7 @@ |plugin_optimizer\commands|schemacmds.cpp |src\gausskernel\optimizer\commands\schemacmds.cpp | |plugin_optimizer\commands|typecmds.cpp |src\gausskernel\optimizer\commands\typecmds.cpp | |plugin_optimizer\commands|user.cpp |src\gausskernel\optimizer\commands\user.cpp | +|plugin_optimizer\commands|alter.cpp |src\gausskernel\optimizer\commands\alter.cpp | |plugin_optimizer\plan|pgxcplan_single.cpp |src\gausskernel\optimizer\plan\pgxcplan_single.cpp | |plugin_optimizer\plan|planner.cpp |src\gausskernel\optimizer\plan\planner.cpp | |plugin_optimizer\plan|streamwalker.cpp |src\gausskernel\optimizer\plan\streamwalker.cpp | diff --git a/contrib/dolphin/plugin_optimizer/commands/Makefile b/contrib/dolphin/plugin_optimizer/commands/Makefile index 59f87e647..04c87f736 100755 --- a/contrib/dolphin/plugin_optimizer/commands/Makefile +++ b/contrib/dolphin/plugin_optimizer/commands/Makefile @@ -19,7 +19,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif -OBJS = functioncmds.o foreigncmds.o copy.o schemacmds.o typecmds.o user.o +OBJS = functioncmds.o foreigncmds.o copy.o schemacmds.o typecmds.o user.o alter.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/contrib/dolphin/plugin_optimizer/commands/alter.cpp b/contrib/dolphin/plugin_optimizer/commands/alter.cpp new file mode 100644 index 000000000..6c9f208dd --- /dev/null +++ b/contrib/dolphin/plugin_optimizer/commands/alter.cpp @@ -0,0 +1,1019 @@ +/* ------------------------------------------------------------------------- + * + * alter.cpp + * Drivers for generic alter commands + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors + * + * + * IDENTIFICATION + * src/gausskernel/optimizer/commands/alter.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/tableam.h" +#include "catalog/dependency.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_conversion.h" +#include "catalog/pg_event_trigger.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_language.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_config_map.h" +#include "catalog/indexing.h" +#include "catalog/namespace.h" +#include "catalog/pg_largeobject.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_publication.h" +#include "catalog/pg_subscription.h" +#include "catalog/pg_synonym.h" +#include "commands/alter.h" +#include "commands/collationcmds.h" +#include "commands/conversioncmds.h" +#include "commands/dbcommands.h" +#include "commands/defrem.h" +#include "commands/directory.h" +#include "commands/event_trigger.h" +#include "commands/extension.h" +#include "commands/proclang.h" +#include "commands/publicationcmds.h" +#include "commands/schemacmds.h" +#include "commands/subscriptioncmds.h" +#include "commands/sec_rls_cmds.h" +#include "commands/tablecmds.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "commands/typecmds.h" +#include "commands/user.h" +#include "miscadmin.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/rel_gs.h" +#include "utils/syscache.h" +#include "gs_policy/gs_policy_masking.h" + +/* + * Executes an ALTER OBJECT / RENAME TO statement. Based on the object + * type, the function appropriate to that type is executed. + */ +static void +report_name_conflict(Oid classId, const char *name) +{ + char *msgfmt; + switch (classId) { + + case EventTriggerRelationId: + msgfmt = gettext_noop("event trigger \"%s\" already exists"); + break; + case ForeignDataWrapperRelationId: + msgfmt = gettext_noop("foreign-data wrapper \"%s\" already exists"); + break; + case ForeignServerRelationId: + msgfmt = gettext_noop("server \"%s\" already exists"); + break; + case LanguageRelationId: + msgfmt = gettext_noop("language \"%s\" already exists"); + break; + case PublicationRelationId: + msgfmt = gettext_noop("publication \"%s\" already exists"); + break; + case SubscriptionRelationId: + msgfmt = gettext_noop("subscription \"%s\" already exists"); + break; + default: + elog(ERROR, "unsupported object class %u", classId); + break; + } + + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg(msgfmt, name))); +} + +static void +report_namespace_conflict(Oid classId, const char *name, Oid nspOid) +{ + char *msgfmt; + Assert(OidIsValid(nspOid)); + + switch (classId) { + case ConversionRelationId: + Assert(OidIsValid(nspOid)); + msgfmt = gettext_noop("conversion \"%s\" already exists in schema \"%s\""); + break; + case TSParserRelationId: + Assert(OidIsValid(nspOid)); + msgfmt = gettext_noop("text search parser \"%s\" already exists in schema \"%s\""); + break; + case TSDictionaryRelationId: + Assert(OidIsValid(nspOid)); + msgfmt = gettext_noop("text search dictionary \"%s\" already exists in schema \"%s\""); + break; + case TSTemplateRelationId: + Assert(OidIsValid(nspOid)); + msgfmt = gettext_noop("text search template \"%s\" already exists in schema \"%s\""); + break; + case TSConfigRelationId: + Assert(OidIsValid(nspOid)); + msgfmt = gettext_noop("text search configuration \"%s\" already exists in schema \"%s\""); + break; + default: + elog(ERROR, "unsupported object class %u", classId); + break; + } + + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg(msgfmt, name, get_namespace_name(nspOid)))); +} + +/* + * AlterObjectRename_internal + * + * Generic function to rename the given object, for simple cases (won't + * work for tables, nor other cases where we need to do more than change + * the name column of a single catalog entry). + * + * rel: catalog relation containing object (RowExclusiveLock'd by caller) + * objectId: OID of object to be renamed + * new_name: CString representation of new name + */ +static void +AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) +{ + Oid classId = RelationGetRelid(rel); + int oidCacheId = get_object_catcache_oid(classId); + int nameCacheId = get_object_catcache_name(classId); + AttrNumber Anum_name = get_object_attnum_name(classId); + AttrNumber Anum_namespace = get_object_attnum_namespace(classId); + + HeapTuple oldtup; + HeapTuple newtup; + bool isnull; + Oid namespaceId; + Oid userId; + char *old_name; + AclResult aclresult; + Datum *values; + bool *nulls; + bool *replaces; + NameData nameattrdata; + Datum datum; + + oldtup = SearchSysCache1(oidCacheId, ObjectIdGetDatum(objectId)); + if (!HeapTupleIsValid(oldtup)) + elog(ERROR, "cache lookup failed for object %u of catalog \"%s\"", + objectId, RelationGetRelationName(rel)); + + datum = heap_getattr(oldtup, Anum_name, + RelationGetDescr(rel), &isnull); + Assert(!isnull); + old_name = NameStr(*(DatumGetName(datum))); + + /* Get OID of namespace */ + if (Anum_namespace > 0) { + datum = heap_getattr(oldtup, Anum_namespace, + RelationGetDescr(rel), &isnull); + Assert(!isnull); + namespaceId = DatumGetObjectId(datum); + } + else + namespaceId = InvalidOid; + + /* Permission checks ... superusers can always do it */ + if (!superuser()) { + ObjectType objType = get_object_type(classId, objectId); + if (objType == OBJECT_TSTEMPLATE) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be system admin to rename text search templates"))); + } else if (objType == OBJECT_TSPARSER) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be system admin to rename text search parser"))); + } + + userId = GetUserId(); + switch (objType) { + /*OBJECT_AGGREGATE's classid is ProcedureRelationId, so objType should be OBJECT_FUNCTION*/ + case OBJECT_FUNCTION: + if (pg_proc_aclcheck(objectId, userId, ACL_ALTER) != ACLCHECK_OK && + !pg_proc_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_PROC, old_name); + break; + case OBJECT_COLLATION: + if (pg_collation_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_COLLATION, old_name); + break; + case OBJECT_CONVERSION: + if (pg_conversion_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION, old_name); + break; + case OBJECT_EVENT_TRIGGER: + if (!pg_event_trigger_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EVENT_TRIGGER, old_name); + break; + case OBJECT_FDW: + if (!pg_foreign_data_wrapper_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_FDW, old_name); + break; + case OBJECT_FOREIGN_SERVER: + if (pg_foreign_server_aclcheck(objectId, userId, ACL_ALTER) != ACLCHECK_OK && + !pg_foreign_server_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_FOREIGN_SERVER, old_name); + break; + case OBJECT_OPCLASS: + if (!pg_opclass_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPCLASS, old_name); + break; + case OBJECT_OPFAMILY: + if (!pg_opfamily_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPFAMILY, old_name); + break; + case OBJECT_LANGUAGE: + if (pg_language_aclcheck(objectId, userId, ACL_ALTER) != ACLCHECK_OK && + !pg_language_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_LANGUAGE, old_name); + break; + case OBJECT_TSDICTIONARY: + if (!pg_ts_dict_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TSDICTIONARY, old_name); + break; + case OBJECT_TSCONFIGURATION: + if (!pg_ts_config_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TSCONFIGURATION, old_name); + break; + case OBJECT_PUBLICATION: + if (!pg_publication_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PUBLICATION, old_name); + break; + case OBJECT_SUBSCRIPTION: + if (!pg_subscription_ownercheck(objectId, userId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_SUBSCRIPTION, old_name); + break; + default: { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized object type: %d", (int)objType))); + } break; + } + /* User must have CREATE privilege on the namespace */ + if (OidIsValid(namespaceId)) { + aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), + ACL_CREATE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + get_namespace_name(namespaceId)); + } + } + + /* + * Check for duplicate name (more friendly than unique-index failure). + * Since this is just a friendliness check, we can just skip it in cases + * where there isn't suitable support. + */ + if (classId == ProcedureRelationId) { + Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(oldtup); + + IsThereFunctionInNamespace(new_name, proc->pronargs, + &(proc->proargtypes), proc->pronamespace); + } + else if (classId == CollationRelationId) { + Form_pg_collation coll = (Form_pg_collation) GETSTRUCT(oldtup); + + IsThereCollationInNamespace(new_name, coll->collnamespace); + } + else if (classId == OperatorClassRelationId) { + Form_pg_opclass opc = (Form_pg_opclass) GETSTRUCT(oldtup); + + IsThereOpClassInNamespace(new_name, opc->opcmethod, + opc->opcnamespace); + } + else if (classId == OperatorFamilyRelationId) { + Form_pg_opfamily opf = (Form_pg_opfamily) GETSTRUCT(oldtup); + + IsThereOpFamilyInNamespace(new_name, opf->opfmethod, + opf->opfnamespace); + } + else if (classId == SubscriptionRelationId) { + if (SearchSysCacheExists2(SUBSCRIPTIONNAME, u_sess->proc_cxt.MyDatabaseId, + CStringGetDatum(new_name))) + report_name_conflict(classId, new_name); + } + else if (nameCacheId >= 0) { + if (OidIsValid(namespaceId)) { + if (SearchSysCacheExists2(nameCacheId, + CStringGetDatum(new_name), + ObjectIdGetDatum(namespaceId))) + report_namespace_conflict(classId, new_name, namespaceId); + } + else { + if (SearchSysCacheExists1(nameCacheId, + CStringGetDatum(new_name))) + report_name_conflict(classId, new_name); + } + } + + /* Build modified tuple */ + values = (Datum*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(Datum)); + nulls = (bool*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(bool)); + replaces = (bool*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(bool)); + namestrcpy(&nameattrdata, new_name); + values[Anum_name - 1] = NameGetDatum(&nameattrdata); + replaces[Anum_name - 1] = true; + newtup = heap_modify_tuple(oldtup, RelationGetDescr(rel), + values, nulls, replaces); + + /* Perform actual update */ + simple_heap_update(rel, &oldtup->t_self, newtup); + CatalogUpdateIndexes(rel, newtup); + + /* Release memory */ + pfree(values); + pfree(nulls); + pfree(replaces); + heap_freetuple(newtup); + + ReleaseSysCache(oldtup); +} + +/* + * Executes an ALTER OBJECT / RENAME TO statement. Based on the object + * type, the function appropriate to that type is executed. + */ +ObjectAddress +ExecRenameStmt(RenameStmt *stmt) +{ + ObjectAddress address; + switch (stmt->renameType) { + case OBJECT_TABCONSTRAINT: + case OBJECT_DOMCONSTRAINT: + return RenameConstraint(stmt); + + case OBJECT_DATABASE: + return RenameDatabase(stmt->subname, stmt->newname); + + case OBJECT_PARTITION: + return renamePartition(stmt); + + case OBJECT_PARTITION_INDEX: + return renamePartitionIndex(stmt); + + case OBJECT_RLSPOLICY: + return RenameRlsPolicy(stmt); + + case OBJECT_ROLE: + return RenameRole(stmt->subname, stmt->newname); + + case OBJECT_USER: { + address = RenameRole(stmt->subname, stmt->newname); + + /* + * Rename user need rename the schema that has the same name which + * owned by the user. + */ + RenameSchema(stmt->subname, stmt->newname); + return address; + } + case OBJECT_SCHEMA: + return RenameSchema(stmt->subname, stmt->newname); + + case OBJECT_TABLESPACE: + return RenameTableSpace(stmt->subname, stmt->newname); + + case OBJECT_TABLE: + case OBJECT_SEQUENCE: + case OBJECT_LARGE_SEQUENCE: + case OBJECT_VIEW: + case OBJECT_CONTQUERY: + case OBJECT_MATVIEW: + case OBJECT_INDEX: + case OBJECT_FOREIGN_TABLE: + case OBJECT_STREAM: + return RenameRelation(stmt); + + case OBJECT_COLUMN: + case OBJECT_ATTRIBUTE: + return renameatt(stmt); + + case OBJECT_TRIGGER: + return renametrig(stmt); + + case OBJECT_DOMAIN: + case OBJECT_TYPE: + return RenameType(stmt); + case OBJECT_FUNCTION: + return RenameFunction(stmt->object, stmt->objarg, stmt->newname); + case OBJECT_AGGREGATE: + case OBJECT_COLLATION: + case OBJECT_CONVERSION: + case OBJECT_EVENT_TRIGGER: + case OBJECT_FDW: + case OBJECT_FOREIGN_SERVER: + case OBJECT_OPCLASS: + case OBJECT_OPFAMILY: + case OBJECT_LANGUAGE: + case OBJECT_TSCONFIGURATION: + case OBJECT_TSDICTIONARY: + case OBJECT_TSPARSER: + case OBJECT_TSTEMPLATE: + case OBJECT_PUBLICATION: + case OBJECT_SUBSCRIPTION: + { + ObjectAddress address; + Relation catalog; + Relation relation; + address = get_object_address(stmt->renameType, + stmt->object, stmt->objarg, + &relation, + AccessExclusiveLock, false); + Assert(relation == NULL); + catalog = heap_open(address.classId, RowExclusiveLock); + AlterObjectRename_internal(catalog, + address.objectId, + stmt->newname); + heap_close(catalog, RowExclusiveLock); + + return address; + } + + case OBJECT_DATA_SOURCE: + return RenameDataSource(stmt->subname, stmt->newname); + + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized rename stmt type: %d", (int)stmt->renameType))); + return InvalidObjectAddress; + } + return InvalidObjectAddress; +} + +/* + * Executes an ALTER OBJECT / SET SCHEMA statement. Based on the object + * type, the function appropriate to that type is executed. + */ +ObjectAddress ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt* stmt, ObjectAddress *oldSchemaAddr) +{ + ObjectAddress address; + Oid oldNspOid; + switch (stmt->objectType) { + case OBJECT_EXTENSION: + address = AlterExtensionNamespace(stmt->object, stmt->newschema); + break; + case OBJECT_OPERATOR: + address = AlterOperatorNamespace(stmt->object, stmt->objarg, stmt->newschema); + break; + case OBJECT_SEQUENCE: + case OBJECT_LARGE_SEQUENCE: + case OBJECT_TABLE: + case OBJECT_VIEW: + case OBJECT_CONTQUERY: + case OBJECT_MATVIEW: + case OBJECT_FOREIGN_TABLE: + case OBJECT_STREAM: + if (stmt->objectType == OBJECT_STREAM) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support feature"), + errdetail("target table is a stream"))); + address = AlterTableNamespace(stmt, oldSchemaAddr ? &oldNspOid : NULL); + break; + + /* generic code path */ + case OBJECT_AGGREGATE: + case OBJECT_COLLATION: + case OBJECT_CONVERSION: + case OBJECT_FUNCTION: + case OBJECT_OPCLASS: + case OBJECT_OPFAMILY: + case OBJECT_TSCONFIGURATION: + case OBJECT_TSDICTIONARY: + case OBJECT_TSPARSER: + case OBJECT_TSTEMPLATE: + { + Relation catalog; + Relation relation; + Oid classId; + Oid nspOid; + address = get_object_address(stmt->objectType, + stmt->object, + stmt->objarg, + &relation, + AccessExclusiveLock, + false); + Assert(relation == NULL); + classId = address.classId; + nspOid = LookupCreationNamespace(stmt->newschema); + if (stmt->objectType == OBJECT_FUNCTION) { + /* + * Check function name to ensure that it doesn't conflict with existing synonym. + */ + + Relation procRel = heap_open(ProcedureRelationId, RowExclusiveLock); + + HeapTuple tup = SearchSysCacheCopy1(PROCOID, ObjectIdGetDatum(address.objectId)); + if (!HeapTupleIsValid(tup)) + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for function %u", address.objectId))); + Form_pg_proc proc = (Form_pg_proc)GETSTRUCT(tup); + heap_close(procRel, RowExclusiveLock); + if (!IsInitdb && GetSynonymOid(NameStr(proc->proname), nspOid, true) != InvalidOid) { + + ereport(ERROR, + (errmsg("function name is already used by an existing synonym in schema \"%s\"", + get_namespace_name(nspOid)))); + } + } + catalog = heap_open(classId, RowExclusiveLock); + oldNspOid = AlterObjectNamespace_internal(catalog, address.objectId, + nspOid); + heap_close(catalog, RowExclusiveLock); + } + break; + + case OBJECT_TYPE: + case OBJECT_DOMAIN: + address = AlterTypeNamespace(stmt->object, stmt->newschema, stmt->objectType); + break; + + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized AlterObjectSchemaStmt type: %d", (int)stmt->objectType))); + return InvalidObjectAddress; + } + return address; +} + +/* + * Change an object's namespace given its classOid and object Oid. + * + * Objects that don't have a namespace should be ignored. + * + * This function is currently used only by ALTER EXTENSION SET SCHEMA, + * so it only needs to cover object types that can be members of an + * extension, and it doesn't have to deal with certain special cases + * such as not wanting to process array types --- those should never + * be direct members of an extension anyway. + * + * Returns the OID of the object's previous namespace, or InvalidOid if + * object doesn't have a schema. + */ +Oid AlterObjectNamespace_oid(Oid classId, Oid objid, Oid nspOid, ObjectAddresses* objsMoved) +{ + Oid oldNspOid = InvalidOid; + ObjectAddress dep; + + dep.classId = classId; + dep.objectId = objid; + dep.objectSubId = 0; + + switch (getObjectClass(&dep)) { + case OCLASS_CLASS: { + Relation rel; + + rel = relation_open(objid, AccessExclusiveLock); + oldNspOid = RelationGetNamespace(rel); + + AlterTableNamespaceInternal(rel, oldNspOid, nspOid, objsMoved); + + relation_close(rel, NoLock); + break; + } + + case OCLASS_PROC: + oldNspOid = AlterFunctionNamespace_oid(objid, nspOid); + break; + + case OCLASS_TYPE: + oldNspOid = AlterTypeNamespace_oid(objid, nspOid, objsMoved); + break; + + case OCLASS_COLLATION: + oldNspOid = AlterCollationNamespace_oid(objid, nspOid); + break; + + case OCLASS_CONVERSION: + oldNspOid = AlterConversionNamespace_oid(objid, nspOid); + break; + + case OCLASS_OPERATOR: + oldNspOid = AlterOperatorNamespace_oid(objid, nspOid); + break; + + case OCLASS_OPCLASS: + oldNspOid = AlterOpClassNamespace_oid(objid, nspOid); + break; + + case OCLASS_OPFAMILY: + oldNspOid = AlterOpFamilyNamespace_oid(objid, nspOid); + break; + + case OCLASS_TSPARSER: + oldNspOid = AlterTSParserNamespace_oid(objid, nspOid); + break; + + case OCLASS_TSDICT: + oldNspOid = AlterTSDictionaryNamespace_oid(objid, nspOid); + break; + + case OCLASS_TSTEMPLATE: + oldNspOid = AlterTSTemplateNamespace_oid(objid, nspOid); + break; + + case OCLASS_TSCONFIG: + oldNspOid = AlterTSConfigurationNamespace_oid(objid, nspOid); + break; + + default: + break; + } + + return oldNspOid; +} +/* + * Generic function to change the namespace of a given object, for simple + * cases (won't work for tables, nor other cases where we need to do more + * than change the namespace column of a single catalog entry). + * + * rel: catalog relation containing object (RowExclusiveLock'd by caller) + * objid: OID of object to change the namespace of + * nspOid: OID of new namespace + * + * Returns the OID of the object's previous namespace. + */ + Oid +AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid) +{ + Oid classId = RelationGetRelid(rel); + int oidCacheId = get_object_catcache_oid(classId); + int nameCacheId = get_object_catcache_name(classId); + AttrNumber Anum_name = get_object_attnum_name(classId); + AttrNumber Anum_namespace = get_object_attnum_namespace(classId); + AttrNumber Anum_owner = get_object_attnum_owner(classId); + Oid oldNspOid; + Datum name, + obj_namespace; + bool isnull; + HeapTuple tup, + newtup; + Datum *values; + bool *nulls; + bool *replaces; + + tup = SearchSysCacheCopy1(oidCacheId, ObjectIdGetDatum(objid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + elog(ERROR, "cache lookup failed for object %u of catalog \"%s\"", + objid, RelationGetRelationName(rel)); + + name = heap_getattr(tup, Anum_name, RelationGetDescr(rel), &isnull); + Assert(!isnull); + obj_namespace = (Datum)heap_getattr(tup, Anum_namespace, RelationGetDescr(rel), + &isnull); + Assert(!isnull); + oldNspOid = DatumGetObjectId(obj_namespace); + + /* + * If the object is already in the correct namespace, we don't need to do + * anything except fire the object access hook. + */ + if (oldNspOid == nspOid) { + return oldNspOid; + } + + /* Check basic namespace related issues */ + CheckSetNamespace(oldNspOid, nspOid, classId, objid); + + /* Permission checks ... superusers can always do it */ + if (!superuser()) { + Datum owner; + Oid ownerId; + AclResult aclresult; + + /* Fail if object does not have an explicit owner */ + if (Anum_owner <= 0) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to set schema of %s", + getObjectDescriptionOids(classId, objid))))); + + /* Otherwise, must be owner of the existing object */ + owner = heap_getattr(tup, Anum_owner, RelationGetDescr(rel), &isnull); + Assert(!isnull); + ownerId = DatumGetObjectId(owner); + + if (!has_privs_of_role(GetUserId(), ownerId)) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + NameStr(*(DatumGetName(name)))); + + /* User must have CREATE privilege on new namespace */ + aclresult = pg_namespace_aclcheck(nspOid, GetUserId(), ACL_CREATE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + get_namespace_name(nspOid)); + } + + /* + * Check for duplicate name (more friendly than unique-index failure). + * Since this is just a friendliness check, we can just skip it in cases + * where there isn't suitable support. + */ + if (classId == ProcedureRelationId) { + Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(tup); + + IsThereFunctionInNamespace(NameStr(proc->proname), proc->pronargs, + &proc->proargtypes, nspOid); + } + else if (classId == CollationRelationId) { + Form_pg_collation coll = (Form_pg_collation) GETSTRUCT(tup); + + IsThereCollationInNamespace(NameStr(coll->collname), nspOid); + } + else if (classId == OperatorClassRelationId) { + Form_pg_opclass opc = (Form_pg_opclass) GETSTRUCT(tup); + + IsThereOpClassInNamespace(NameStr(opc->opcname), + opc->opcmethod, nspOid); + } + else if (classId == OperatorFamilyRelationId) { + Form_pg_opfamily opf = (Form_pg_opfamily) GETSTRUCT(tup); + + IsThereOpFamilyInNamespace(NameStr(opf->opfname), + opf->opfmethod, nspOid); + } + else if (nameCacheId >= 0 && + SearchSysCacheExists2(nameCacheId, name, + ObjectIdGetDatum(nspOid))) + report_namespace_conflict(classId, + NameStr(*(DatumGetName(name))), + nspOid); + + /* Build modified tuple */ + values = (Datum*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(Datum)); + nulls = (bool*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(bool)); + replaces = (bool*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(bool)); + values[Anum_namespace - 1] = ObjectIdGetDatum(nspOid); + replaces[Anum_namespace - 1] = true; + newtup = heap_modify_tuple(tup, RelationGetDescr(rel), + values, nulls, replaces); + + /* Perform actual update */ + CatalogTupleUpdate(rel, &tup->t_self, newtup); + + /* Release memory */ + pfree(values); + pfree(nulls); + pfree(replaces); + + /* update dependencies to point to the new schema */ + changeDependencyFor(classId, objid, + NamespaceRelationId, oldNspOid, nspOid); + + return oldNspOid; +} + +/* + * Generic function to change the namespace of a given object, for simple + * cases (won't work for tables, nor other cases where we need to do more + * than change the namespace column of a single catalog entry). + * + * The AlterFooNamespace() calls just above will call a function whose job + * is to lookup the arguments for the generic function here. + * + * rel: catalog relation containing object (RowExclusiveLock'd by caller) + * oidCacheId: syscache that indexes this catalog by OID + * nameCacheId: syscache that indexes this catalog by name and namespace + * (pass -1 if there is none) + * objid: OID of object to change the namespace of + * nspOid: OID of new namespace + * Anum_name: column number of catalog's name column + * Anum_namespace: column number of catalog's namespace column + * Anum_owner: column number of catalog's owner column, or -1 if none + * acl_kind: ACL type for object, or -1 if none assigned + * + * If the object does not have an owner or permissions, pass -1 for + * Anum_owner and acl_kind. In this case the calling user must be superuser. + * + * Returns the OID of the object's previous namespace. + */ +Oid AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId, Oid objid, Oid nspOid, int Anum_name, + int Anum_namespace, int Anum_owner, AclObjectKind acl_kind) +{ + Oid classId = RelationGetRelid(rel); + Oid oldNspOid; + Datum name, nmspace; + bool isnull = false; + HeapTuple tup, newtup; + Datum* values = NULL; + bool* nulls = NULL; + bool* replaces = NULL; + + tup = SearchSysCacheCopy1(oidCacheId, ObjectIdGetDatum(objid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for object %u of catalog \"%s\"", objid, RelationGetRelationName(rel)))); + + // AM_TODO: system table access, not necessary to use API + name = tableam_tops_tuple_getattr(tup, Anum_name, RelationGetDescr(rel), &isnull); + Assert(!isnull); + nmspace = heap_getattr(tup, Anum_namespace, RelationGetDescr(rel), &isnull); + Assert(!isnull); + oldNspOid = DatumGetObjectId(nmspace); + + /* Check basic namespace related issues */ + CheckSetNamespace(oldNspOid, nspOid, classId, objid); + + /* Permission checks ... superusers can always do it */ + /* Database Security: Support separation of privilege. */ + if (!isRelSuperuser()) { + Datum owner; + Oid ownerId; + AclResult aclresult; + + /* Fail if object does not have an explicit owner */ + if (Anum_owner <= 0) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be system admin to set schema of %s", getObjectDescriptionOids(classId, objid))))); + + /* Otherwise, must be owner of the existing object */ + owner = heap_getattr(tup, Anum_owner, RelationGetDescr(rel), &isnull); + Assert(!isnull); + ownerId = DatumGetObjectId(owner); + if (!has_privs_of_role(GetUserId(), ownerId)) + aclcheck_error(ACLCHECK_NOT_OWNER, acl_kind, NameStr(*(DatumGetName(name)))); + + /* User must have CREATE privilege on new namespace */ + aclresult = pg_namespace_aclcheck(nspOid, GetUserId(), ACL_CREATE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(nspOid)); + } + + /* + * Check for duplicate name (more friendly than unique-index failure). + * Since this is just a friendliness check, we can just skip it in cases + * where there isn't a suitable syscache available. + */ + if (nameCacheId >= 0 && SearchSysCacheExists2(nameCacheId, name, ObjectIdGetDatum(nspOid))) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("%s already exists in schema \"%s\"", + getObjectDescriptionOids(classId, objid), + get_namespace_name(nspOid)))); + + /* Build modified tuple */ + values = (Datum*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(Datum)); + nulls = (bool*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(bool)); + replaces = (bool*)palloc0(RelationGetNumberOfAttributes(rel) * sizeof(bool)); + values[Anum_namespace - 1] = ObjectIdGetDatum(nspOid); + replaces[Anum_namespace - 1] = true; + newtup = (HeapTuple) tableam_tops_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); + + /* Perform actual update */ + simple_heap_update(rel, &tup->t_self, newtup); + CatalogUpdateIndexes(rel, newtup); + + /* Release memory */ + pfree_ext(values); + pfree_ext(nulls); + pfree_ext(replaces); + + /* update dependencies to point to the new schema */ + (void)changeDependencyFor(classId, objid, NamespaceRelationId, oldNspOid, nspOid); + + return oldNspOid; +} + +/* + * Executes an ALTER OBJECT / OWNER TO statement. Based on the object + * type, the function appropriate to that type is executed. + */ +ObjectAddress ExecAlterOwnerStmt(AlterOwnerStmt* stmt) +{ + const char* newOwnerName = stmt->newowner; + Oid newowner; + if (strcmp(newOwnerName, "current_user") == 0) { + /* CURRENT_USER */ + newowner = GetUserId(); + } else if (strcmp(newOwnerName, "session_user") == 0) { + /* SESSION_USER */ + newowner = GetSessionUserId(); + } else { + /* Normal User */ + newowner = get_role_oid(newOwnerName, false); + } + + switch (stmt->objectType) { + case OBJECT_AGGREGATE: + /* Given ordered set aggregate with no direct args, aggr_args variable is modified in gram.y. + So the parse of aggr_args should be changed. See gram.y for detail. */ + stmt->objarg = (List*)linitial(stmt->objarg); + return AlterAggregateOwner(stmt->object, stmt->objarg, newowner); + + case OBJECT_COLLATION: + return AlterCollationOwner(stmt->object, newowner); + + case OBJECT_CONVERSION: + return AlterConversionOwner(stmt->object, newowner); + + case OBJECT_DATABASE: + return AlterDatabaseOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_EVENT_TRIGGER: + return AlterEventTriggerOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_FUNCTION: + return AlterFunctionOwner(stmt->object, stmt->objarg, newowner); + + case OBJECT_PACKAGE: + return AlterPackageOwner(stmt->object, newowner); + break; + case OBJECT_LANGUAGE: + return AlterLanguageOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_LARGEOBJECT: + return LargeObjectAlterOwner(oidparse((Node*)linitial(stmt->object)), newowner); + + case OBJECT_OPERATOR: + Assert(list_length(stmt->objarg) == 2); + return AlterOperatorOwner( + stmt->object, (TypeName*)linitial(stmt->objarg), (TypeName*)lsecond(stmt->objarg), newowner); + + case OBJECT_OPCLASS: + { + List* object_names; + object_names=list_copy(stmt->object); + object_names=list_delete_first(object_names); + ObjectAddress obj_opclass=AlterOpClassOwner(object_names, ((Value*)linitial(stmt->object))->val.str, newowner); + list_free_ext(object_names); + return obj_opclass; + } + + case OBJECT_OPFAMILY: + { + List* object_names; + object_names=list_copy(stmt->object); + object_names=list_delete_first(object_names); + ObjectAddress obj_opfamily=AlterOpFamilyOwner(object_names, ((Value*)linitial(stmt->object))->val.str, newowner); + list_free_ext(object_names); + return obj_opfamily; + } + case OBJECT_SCHEMA: + return AlterSchemaOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_TABLESPACE: + return AlterTableSpaceOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_TYPE: + case OBJECT_DOMAIN: /* same as TYPE */ + return AlterTypeOwner(stmt->object, newowner, stmt->objectType, true); + + case OBJECT_TSDICTIONARY: + return AlterTSDictionaryOwner(stmt->object, newowner); + + case OBJECT_TSCONFIGURATION: + return AlterTSConfigurationOwner(stmt->object, newowner); + + case OBJECT_FDW: + return AlterForeignDataWrapperOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_FOREIGN_SERVER: + return AlterForeignServerOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_DATA_SOURCE: + return AlterDataSourceOwner(strVal(linitial(stmt->object)), newowner); + case OBJECT_DIRECTORY: + return AlterDirectoryOwner(strVal(linitial(stmt->object)), newowner); + + case OBJECT_SYNONYM: + return AlterSynonymOwner(stmt->object, newowner); + + case OBJECT_PUBLICATION: + return AlterPublicationOwner(strVal(linitial(stmt->object)), newowner); + break; + + case OBJECT_SUBSCRIPTION: + return AlterSubscriptionOwner(strVal(linitial(stmt->object)), newowner); + break; + + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized AlterOwnerStmt type: %d", (int)stmt->objectType))); + return InvalidObjectAddress; + } +} diff --git a/contrib/filelist.txt b/contrib/filelist.txt index 71520b90d..c74440bb0 100644 --- a/contrib/filelist.txt +++ b/contrib/filelist.txt @@ -252,3 +252,4 @@ weightstring.cpp windowfuncs.cpp year.cpp year.h +alter.cpp -- Gitee From 551825a00441d314dbb8e502e2de9714e1b0d186 Mon Sep 17 00:00:00 2001 From: totaj Date: Thu, 28 Sep 2023 16:32:53 +0800 Subject: [PATCH 003/434] Support rename same name. --- contrib/dolphin/expected/test_alter_table.out | 222 ++++++++++++++++++ contrib/dolphin/plugin_parser/gram.y | 14 +- .../dolphin/plugin_parser/parse_utilcmd.cpp | 11 +- contrib/dolphin/sql/test_alter_table.sql | 41 ++++ contrib/dolphin/tablecmds.cpp | 24 +- 5 files changed, 301 insertions(+), 11 deletions(-) diff --git a/contrib/dolphin/expected/test_alter_table.out b/contrib/dolphin/expected/test_alter_table.out index 81617d79e..61150e07f 100644 --- a/contrib/dolphin/expected/test_alter_table.out +++ b/contrib/dolphin/expected/test_alter_table.out @@ -32,6 +32,107 @@ NOTICE: relation "not_exists_tbl" does not exist, skipping -- test multi-cmd alter table alter_table_tbl1 add column key int, rename index new_alter_table_tbl_b_ind to alter_table_tbl_b_ind; alter table alter_table_tbl1 drop column key, drop key alter_table_tbl_b_ind; +-- alter table - rename column +ALTER TABLE alter_table_tbl1 RENAME COLUMN a TO AB; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + AB | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree ("AB") TABLESPACE pg_default + +ALTER TABLE alter_table_tbl1 RENAME COLUMN ab TO Ab; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + Ab | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree ("Ab") TABLESPACE pg_default + +ALTER TABLE alter_table_tbl1 RENAME AB TO AB; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + AB | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree ("AB") TABLESPACE pg_default + +ALTER TABLE alter_table_tbl1 RENAME ab TO ab; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + ab | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree (ab) TABLESPACE pg_default + +ALTER TABLE if exists alter_table_tbl1 RENAME COLUMN AB TO Ab; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + Ab | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree ("Ab") TABLESPACE pg_default + +ALTER TABLE if exists alter_table_tbl1 RENAME COLUMN Ab TO ab; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + ab | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree (ab) TABLESPACE pg_default + +ALTER TABLE if exists alter_table_tbl1 RENAME AB TO ab; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + ab | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree (ab) TABLESPACE pg_default + +ALTER TABLE if exists alter_table_tbl1 RENAME Ab TO AB; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + AB | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree ("AB") TABLESPACE pg_default + +ALTER TABLE alter_table_tbl1 CHANGE AB ab int; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + ab | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree (ab) TABLESPACE pg_default + +ALTER TABLE alter_table_tbl1 CHANGE COLUMN AB AB int; +\d alter_table_tbl1 +Table "db_alter_table.alter_table_tbl1" + Column | Type | Modifiers +--------+---------+----------- + AB | integer | not null + b | integer | +Indexes: + "alter_table_tbl1_pkey" PRIMARY KEY, btree ("AB") TABLESPACE pg_default + drop table alter_table_tbl1, alter_table_tbl2; set dolphin.sql_mode='pipes_as_concat,pad_char_to_full_length'; create table table_ddl_0030_01(col1 int primary key,col2 varchar(20)); @@ -77,6 +178,127 @@ select * from foreign_key_table_002 order by 1,2,3; 3 | dddd | 1 | 2020-07-20 00:00:00-07 | t | dddd | 2.2 (2 rows) +-- alter table - rename column +ALTER FOREIGN TABLE foreign_key_table_002 RENAME COLUMN COL_1 TO col_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + col_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (col_1) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE foreign_key_table_002 RENAME COLUMN COL_1 TO Col_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + Col_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (`Col_1`) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE foreign_key_table_002 RENAME col_1 TO col_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + col_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (col_1) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE foreign_key_table_002 RENAME COL_1 TO COL_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + COL_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (`COL_1`) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME COLUMN cOL_1 TO cOL_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + cOL_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (`cOL_1`) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME COLUMN col_1 TO Col_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + Col_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (`Col_1`) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME COL_1 TO COL_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + COL_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (`COL_1`) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME Col_1 TO col_1; +\d foreign_key_table_002 + Table "db_alter_table.foreign_key_table_002" + Column | Type | Modifiers +--------+--------------------------+----------- + col_1 | smallint | default 3 + COL_2 | character(30) | + COL_3 | integer | + COL_4 | timestamp with time zone | + COL_5 | boolean | + COL_6 | character(30) | + COL_7 | real | +Foreign-key constraints: + "foreign_key_table_002_col_1_fkey" FOREIGN KEY (col_1) REFERENCES foreign_key_table_001(`COL_1`) ON DELETE SET DEFAULT + drop table foreign_key_table_001,foreign_key_table_002; show dolphin.sql_mode; dolphin.sql_mode diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 0c89553f7..a868f0b50 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -5714,7 +5714,7 @@ alter_table_cmd: n->def = (Node *)def; $$ = (Node *)n; } - | CHANGE opt_column ColId DolphinColColId Typename opt_charset ColQualList opt_column_options add_column_first_after + | CHANGE opt_column DolphinColColId DolphinColColId Typename opt_charset ColQualList opt_column_options add_column_first_after { #ifdef ENABLE_MULTIPLE_NODES const char* message = "Un-support feature"; @@ -23236,7 +23236,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = true; $$ = (Node *)n; } - | ALTER TABLE relation_expr RENAME name TO name + | ALTER TABLE relation_expr RENAME DolphinColColId TO DolphinColColId { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_COLUMN; @@ -23247,7 +23247,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = false; $$ = (Node *)n; } - | ALTER TABLE relation_expr RENAME COLUMN name TO name + | ALTER TABLE relation_expr RENAME COLUMN DolphinColColId TO DolphinColColId { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_COLUMN; @@ -23258,7 +23258,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = false; $$ = (Node *)n; } - | ALTER TABLE IF_P EXISTS relation_expr RENAME name TO name + | ALTER TABLE IF_P EXISTS relation_expr RENAME DolphinColColId TO DolphinColColId { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_COLUMN; @@ -23269,7 +23269,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = true; $$ = (Node *)n; } - | ALTER TABLE IF_P EXISTS relation_expr RENAME COLUMN name TO name + | ALTER TABLE IF_P EXISTS relation_expr RENAME COLUMN DolphinColColId TO DolphinColColId { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_COLUMN; @@ -23359,7 +23359,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name $$ = (Node *)n; } - | ALTER FOREIGN TABLE relation_expr RENAME opt_column name TO name + | ALTER FOREIGN TABLE relation_expr RENAME opt_column DolphinColColId TO DolphinColColId { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_COLUMN; @@ -23370,7 +23370,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = false; $$ = (Node *)n; } - | ALTER FOREIGN TABLE IF_P EXISTS relation_expr RENAME opt_column name TO name + | ALTER FOREIGN TABLE IF_P EXISTS relation_expr RENAME opt_column DolphinColColId TO DolphinColColId { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_COLUMN; diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index c14bc0732..e1a07986e 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -9023,7 +9023,16 @@ static void TransformModifyColumndef(CreateStmtContext* cxt, AlterTableCmd* cmd) // drop old auto_increment DropModifyColumnAutoIncrement(cxt, cxt->rel, cmd->name); /* for CHANGE column */ - if (strcmp(cmd->name, def->colname) != 0) { +#ifndef DOLPHIN + /* + * we need to do rename even though the name is same, cause we don't care about column's case now. + * it may do some change even though the name is same. + * create table t1(ABC int); + * alter table t1 change abc abc int; -- should lead to rename + */ + if (strcmp(cmd->name, def->colname) != 0) +#endif + { RenameStmt *rename = makeNode(RenameStmt); rename->renameType = OBJECT_COLUMN; rename->relationType = OBJECT_TABLE; diff --git a/contrib/dolphin/sql/test_alter_table.sql b/contrib/dolphin/sql/test_alter_table.sql index 941fb9023..780a2768d 100644 --- a/contrib/dolphin/sql/test_alter_table.sql +++ b/contrib/dolphin/sql/test_alter_table.sql @@ -41,6 +41,28 @@ alter table if exists not_exists_tbl rename new_not_exists_tbl; alter table alter_table_tbl1 add column key int, rename index new_alter_table_tbl_b_ind to alter_table_tbl_b_ind; alter table alter_table_tbl1 drop column key, drop key alter_table_tbl_b_ind; +-- alter table - rename column +ALTER TABLE alter_table_tbl1 RENAME COLUMN a TO AB; +\d alter_table_tbl1 +ALTER TABLE alter_table_tbl1 RENAME COLUMN ab TO Ab; +\d alter_table_tbl1 +ALTER TABLE alter_table_tbl1 RENAME AB TO AB; +\d alter_table_tbl1 +ALTER TABLE alter_table_tbl1 RENAME ab TO ab; +\d alter_table_tbl1 +ALTER TABLE if exists alter_table_tbl1 RENAME COLUMN AB TO Ab; +\d alter_table_tbl1 +ALTER TABLE if exists alter_table_tbl1 RENAME COLUMN Ab TO ab; +\d alter_table_tbl1 +ALTER TABLE if exists alter_table_tbl1 RENAME AB TO ab; +\d alter_table_tbl1 +ALTER TABLE if exists alter_table_tbl1 RENAME Ab TO AB; +\d alter_table_tbl1 +ALTER TABLE alter_table_tbl1 CHANGE AB ab int; +\d alter_table_tbl1 +ALTER TABLE alter_table_tbl1 CHANGE COLUMN AB AB int; +\d alter_table_tbl1 + drop table alter_table_tbl1, alter_table_tbl2; set dolphin.sql_mode='pipes_as_concat,pad_char_to_full_length'; @@ -80,6 +102,25 @@ insert into foreign_key_table_002 values(2,'eeee',2,'2020-07-20',true,'eeee',2.2 delete from foreign_key_table_001 where COL_1=1; select * from foreign_key_table_002 order by 1,2,3; + +-- alter table - rename column +ALTER FOREIGN TABLE foreign_key_table_002 RENAME COLUMN COL_1 TO col_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE foreign_key_table_002 RENAME COLUMN COL_1 TO Col_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE foreign_key_table_002 RENAME col_1 TO col_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE foreign_key_table_002 RENAME COL_1 TO COL_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME COLUMN cOL_1 TO cOL_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME COLUMN col_1 TO Col_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME COL_1 TO COL_1; +\d foreign_key_table_002 +ALTER FOREIGN TABLE if exists foreign_key_table_002 RENAME Col_1 TO col_1; +\d foreign_key_table_002 + drop table foreign_key_table_001,foreign_key_table_002; show dolphin.sql_mode; diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 4e6983585..60322d878 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -566,7 +566,11 @@ static void ATPrepAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode); static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, ColumnDef* colDef, bool isOid, bool recurse, bool recursing, bool is_first, char *after_name, LOCKMODE lockmode); -static void check_for_column_name_collision(Relation rel, const char* colname); +static void check_for_column_name_collision(Relation rel, const char* colname, +#ifdef DOLPHIN + int2 old_att_num = InvalidAttrNumber +#endif + ); static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid); static void add_column_collation_dependency(Oid relid, int32 attnum, Oid collid); static void ATPrepAddOids(List** wqueue, Relation rel, bool recurse, AlterTableCmd* cmd, LOCKMODE lockmode); @@ -5989,7 +5993,11 @@ static AttrNumber renameatt_internal(Oid myrelid, const char* oldattname, const } /* new name should not already exist */ - check_for_column_name_collision(targetrelation, newattname); + check_for_column_name_collision(targetrelation, newattname, +#ifdef DOLPHIN + attnum +#endif + ); /* new name should not conflict with system columns */ if (CHCHK_PSORT_RESERVE_COLUMN(newattname)) { @@ -12782,7 +12790,11 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat * If a new or renamed column will collide with the name of an existing * column, error out. */ -static void check_for_column_name_collision(Relation rel, const char* colname) +static void check_for_column_name_collision(Relation rel, const char* colname, +#ifdef DOLPHIN + int2 old_att_num +#endif +) { HeapTuple attTuple; int attnum; @@ -12802,6 +12814,12 @@ static void check_for_column_name_collision(Relation rel, const char* colname) attnum = ((Form_pg_attribute)GETSTRUCT(attTuple))->attnum; ReleaseSysCache(attTuple); +#ifdef DOLPHIN + /* allow rename current column name with a same name */ + if (old_att_num != InvalidAttrNumber && old_att_num == attnum) { + return; + } +#endif /* * We throw a different error message for conflicts with system column -- Gitee From 5d49df778ab025961aa07c01ed0479b071feda33 Mon Sep 17 00:00:00 2001 From: zlh21343 Date: Sat, 7 Oct 2023 03:03:25 +0000 Subject: [PATCH 004/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=20contrib/dolphin/expected/case=5Fsensitive?= =?UTF-8?q?=5Ftest/create=5Ftable=5Flike.out.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: zlh21343 --- .../case_sensitive_test/create_table_like.out | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/contrib/dolphin/expected/case_sensitive_test/create_table_like.out b/contrib/dolphin/expected/case_sensitive_test/create_table_like.out index 7abc43640..15d27f519 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_table_like.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_table_like.out @@ -119,9 +119,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "Ctlt1_pkey" for CREATE INDEX ctlt1_b_key ON Ctlt1 (b); CREATE INDEX ctlt1_fnidx ON Ctlt1 ((a || b)); COMMENT ON COLUMN Ctlt1.a IS 'A'; -ERROR: relation "ctlt1" does not exist COMMENT ON COLUMN Ctlt1.b IS 'B'; -ERROR: relation "ctlt1" does not exist COMMENT ON CONSTRAINT ctlt1_a_check ON Ctlt1 IS 't1_a_check'; ERROR: constraint "ctlt1_a_check" for table "Ctlt1" does not exist COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; @@ -131,14 +129,11 @@ ALTER TABLE Ctlt1 ALTER COLUMN a SET STORAGE MAIN; CREATE TABLE Ctlt2 (c text); ALTER TABLE Ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; COMMENT ON COLUMN Ctlt2.c IS 'C'; -ERROR: relation "ctlt2" does not exist CREATE TABLE Ctlt3 (a text CHECK (length(a) < 5), c text); ALTER TABLE Ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; ALTER TABLE Ctlt3 ALTER COLUMN a SET STORAGE MAIN; COMMENT ON COLUMN Ctlt3.a IS 'A3'; -ERROR: relation "ctlt3" does not exist COMMENT ON COLUMN Ctlt3.c IS 'C'; -ERROR: relation "ctlt3" does not exist COMMENT ON CONSTRAINT ctlt3_a_check ON Ctlt3 IS 't3_a_check'; ERROR: constraint "ctlt3_a_check" for table "Ctlt3" does not exist CREATE TABLE Ctlt4 (a text, c text); @@ -159,9 +154,9 @@ CREATE TABLE Ctlt12_comments (LIKE Ctlt1 INCLUDING COMMENTS, LIKE Ctlt2 INCLUDIN Table "public.Ctlt12_comments" Column | Type | Modifiers | Storage | Stats target | Description --------+------+-----------+----------+--------------+------------- - a | text | not null | extended | | - b | text | | extended | | - c | text | | extended | | + a | text | not null | extended | | A + b | text | | extended | | B + c | text | | extended | | C Has OIDs: no Options: orientation=row, compression=no @@ -188,8 +183,8 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "Ctlt_all_pkey" f Table "public.Ctlt_all" Column | Type | Modifiers | Storage | Stats target | Description --------+------+-----------+----------+--------------+------------- - a | text | not null | main | | - b | text | | extended | | + a | text | not null | main | | A + b | text | | extended | | B Indexes: "Ctlt_all_pkey" PRIMARY KEY, btree (a) TABLESPACE pg_default "Ctlt_all_b_idx" btree (b) TABLESPACE pg_default -- Gitee From 5d7c682cc97f3a8c57a8efb2d16d0e6eb6a11d80 Mon Sep 17 00:00:00 2001 From: zlh21343 Date: Sat, 7 Oct 2023 03:09:15 +0000 Subject: [PATCH 005/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=20contrib/dolphin/expected/case=5Fsensitive?= =?UTF-8?q?=5Ftest=5Fbackquote/create=5Ftable=5Flike.out.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: zlh21343 --- .../create_table_like.out | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out b/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out index 16af22162..756ff2fdd 100644 --- a/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out +++ b/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out @@ -119,9 +119,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "Ctlt1_pkey" for CREATE INDEX `ctlt1_b_key` ON `Ctlt1` (`b`); CREATE INDEX `ctlt1_fnidx` ON `Ctlt1` ((`a` || `b`)); COMMENT ON COLUMN `Ctlt1`.a IS 'A'; -ERROR: relation "ctlt1" does not exist COMMENT ON COLUMN `Ctlt1`.b IS 'B'; -ERROR: relation "ctlt1" does not exist COMMENT ON CONSTRAINT ctlt1_a_check ON `Ctlt1` IS 't1_a_check'; ERROR: constraint "ctlt1_a_check" for table "Ctlt1" does not exist COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; @@ -138,9 +136,7 @@ CREATE TABLE `Ctlt3` (`a` text CHECK (length(`a`) < 5), `c` text); ALTER TABLE `Ctlt3` ALTER COLUMN `c` SET STORAGE EXTERNAL; ALTER TABLE `Ctlt3` ALTER COLUMN `a` SET STORAGE MAIN; COMMENT ON COLUMN `Ctlt3`.`a` IS 'A3'; -ERROR: relation "ctlt3" does not exist COMMENT ON COLUMN `Ctlt3`.`c` IS 'C'; -ERROR: relation "ctlt3" does not exist COMMENT ON CONSTRAINT `ctlt3_a_check` ON `Ctlt3` IS 't3_a_check'; ERROR: constraint "ctlt3_a_check" for table "Ctlt3" does not exist CREATE TABLE `Ctlt4` (`a` text, `c` text); @@ -161,8 +157,8 @@ CREATE TABLE `Ctlt12_comments` (LIKE `Ctlt1` INCLUDING COMMENTS, LIKE `Ctlt2` IN Table "public.Ctlt12_comments" Column | Type | Modifiers | Storage | Stats target | Description --------+------+-----------+----------+--------------+------------- - a | text | not null | extended | | - b | text | | extended | | + a | text | not null | extended | | A + b | text | | extended | | B c | text | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -190,8 +186,8 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "Ctlt_all_pkey" f Table "public.Ctlt_all" Column | Type | Modifiers | Storage | Stats target | Description --------+------+-----------+----------+--------------+------------- - a | text | not null | main | | - b | text | | extended | | + a | text | not null | main | | A + b | text | | extended | | B Indexes: "Ctlt_all_pkey" PRIMARY KEY, btree (a) TABLESPACE pg_default "Ctlt_all_b_idx" btree (b) TABLESPACE pg_default -- Gitee From e9bff8d27bd72c869b6ed25263a56f293c51a0dd Mon Sep 17 00:00:00 2001 From: totaj Date: Sat, 7 Oct 2023 14:34:43 +0800 Subject: [PATCH 006/434] Fix DDL/DML concurrent in partition table. --- contrib/dolphin/plugin_utils/adt/partitionfuncs.cpp | 3 ++- contrib/dolphin/tablecmds.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/plugin_utils/adt/partitionfuncs.cpp b/contrib/dolphin/plugin_utils/adt/partitionfuncs.cpp index 2968be3f7..8ed68332c 100644 --- a/contrib/dolphin/plugin_utils/adt/partitionfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/partitionfuncs.cpp @@ -252,7 +252,8 @@ Datum RebuildPartition(PG_FUNCTION_ARGS) reindexPartition(relid, partOid, REINDEX_REL_SUPPRESS_INDEX_USE, REINDEX_ALL_INDEX); } releasePartitionList(rel, &partList, AccessExclusiveLock); - relation_close(rel, AccessShareLock); + /* hold lock until transaction commit, cause we still need lock before doing RelationForgetRelation */ + relation_close(rel, NoLock); RelationForgetRelation(relid); PG_RETURN_TEXT_P(cstring_to_text(tableName)); } diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 4e6983585..bae67e1aa 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -33605,7 +33605,8 @@ void ExecRemovePartition(Oid relid, char* tableName) REINDEX_REL_PROCESS_TOAST | REINDEX_REL_SUPPRESS_INDEX_USE | REINDEX_REL_CHECK_CONSTRAINTS, REINDEX_ALL_INDEX, NULL, NULL)) ereport(NOTICE, (errmsg("The table has no indexes"))); - relation_close(rel, AccessExclusiveLock); + /* hold lock until transaction commit, cause we still need lock before doing RelationForgetRelation */ + relation_close(rel, NoLock); RelationForgetRelation(relid); list_free_ext(tempTableOidList); list_free_ext(indexOidList); -- Gitee From c75996cdcf6e4cda61f344af53ff8080282867d1 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Sat, 7 Oct 2023 15:41:39 +0800 Subject: [PATCH 007/434] dolphin300 --- contrib/dolphin/CMakeLists.txt | 22 ++++++---------- contrib/dolphin/Makefile | 17 +++--------- contrib/dolphin/cmake.sh | 26 +++++++++---------- contrib/dolphin/dolphin.control | 2 +- .../rollback_script/dolphin--3.0--2.0.sql | 3 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 3 +++ 6 files changed, 32 insertions(+), 41 deletions(-) create mode 100644 contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql create mode 100644 contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql diff --git a/contrib/dolphin/CMakeLists.txt b/contrib/dolphin/CMakeLists.txt index edec8e6cf..ed9ed0390 100755 --- a/contrib/dolphin/CMakeLists.txt +++ b/contrib/dolphin/CMakeLists.txt @@ -160,34 +160,28 @@ set_target_properties(dolphin PROPERTIES PREFIX "") install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin.control DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--3.0.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--1.0.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--1.0--1.1.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--1.0--2.0.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--1.1--1.0.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0--1.0.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--1.1--2.0.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/openGauss_expr_dolphin.ir - DESTINATION share/postgresql/extension/ -) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/openGauss_expr_dolphin.ir +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0--1.1.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/upgrade_script/dolphin--1.1--2.0.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0--3.0.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/upgrade_script/dolphin--2.0--2.0.1.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--3.0--2.0.sql DESTINATION share/postgresql/extension/ ) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/rollback_script/dolphin--2.0--1.1.sql - DESTINATION share/postgresql/extension/ -) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/rollback_script/dolphin--2.0.1--2.0.sql +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/openGauss_expr_dolphin.ir DESTINATION share/postgresql/extension/ ) install(TARGETS dolphin DESTINATION lib/postgresql) diff --git a/contrib/dolphin/Makefile b/contrib/dolphin/Makefile index 50cb20df4..309b93d6b 100644 --- a/contrib/dolphin/Makefile +++ b/contrib/dolphin/Makefile @@ -130,20 +130,11 @@ extra_clean: make clean -C $(catalog) clean: extra_clean -dolphin--1.0.sql: sql_script/* sql_script_post/* +dolphin--3.0.sql: sql_script/* sql_script_post/* upgrade_script/dolphin--1.0--1.1.sql upgrade_script/dolphin--1.1--2.0.sql upgrade_script/dolphin--2.0--3.0.sql cat $^ > $@ -dolphin--2.0.sql: dolphin--1.0.sql upgrade_script/dolphin--1.0--1.1.sql upgrade_script/dolphin--1.1--2.0.sql - cat $^ > $@ -dolphin--1.0--2.0.sql: upgrade_script/dolphin--1.0--1.1.sql upgrade_script/dolphin--1.1--2.0.sql - cat $^ > $@ -dolphin--2.0--1.0.sql: rollback_script/dolphin--2.0--1.1.sql rollback_script/dolphin--1.1--1.0.sql - cat $^ > $@ -dolphin--1.1--2.0.sql: upgrade_script/dolphin--1.1--2.0.sql - cp $^ $@ -dolphin--2.0--1.1.sql: rollback_script/dolphin--2.0--1.1.sql - cp $^ $@ -DATA_built = dolphin--1.0.sql dolphin--2.0.sql dolphin--1.0--2.0.sql dolphin--2.0--1.0.sql dolphin--1.1--2.0.sql dolphin--2.0--1.1.sql openGauss_expr_dolphin.ir -DATA = upgrade_script/dolphin--2.0--2.0.1.sql rollback_script/dolphin--2.0.1--2.0.sql + +DATA_built = dolphin--3.0.sql openGauss_expr_dolphin.ir +DATA = upgrade_script/dolphin--1.0--1.1.sql rollback_script/dolphin--1.1--1.0.sql upgrade_script/dolphin--1.1--2.0.sql rollback_script/dolphin--2.0--1.1.sql upgrade_script/dolphin--2.0--3.0.sql rollback_script/dolphin--3.0--2.0.sql upgrade_script/dolphin--2.0--2.0.1.sql rollback_script/dolphin--2.0.1--2.0.sql OBJS += plugin_postgres.o plugin_postgres.o: plugin_postgres.cpp diff --git a/contrib/dolphin/cmake.sh b/contrib/dolphin/cmake.sh index 26852a3c9..3ff82b452 100644 --- a/contrib/dolphin/cmake.sh +++ b/contrib/dolphin/cmake.sh @@ -1,23 +1,23 @@ #!/bin/bash CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON" cpus_num=$(grep -w processor /proc/cpuinfo|wc -l) -rm -f dolphin--1.0.sql -touch dolphin--1.0.sql -for i in `ls sql_script`; do cat sql_script/$i >> dolphin--1.0.sql; done -for i in `ls sql_script_post`; do cat sql_script_post/$i >> dolphin--1.0.sql; done -rm -f dolphin--2.0.sql -touch dolphin--2.0.sql -cat dolphin--1.0.sql upgrade_script/dolphin--1.0--1.1.sql upgrade_script/dolphin--1.1--2.0.sql >> dolphin--2.0.sql -rm -f dolphin--1.0--2.0.sql -touch dolphin--1.0--2.0.sql -cat upgrade_script/dolphin--1.0--1.1.sql upgrade_script/dolphin--1.1--2.0.sql >> dolphin--1.0--2.0.sql -rm -f dolphin--2.0--1.0.sql -touch dolphin--2.0--1.0.sql -cat rollback_script/dolphin--2.0--1.1.sql rollback_script/dolphin--1.1--1.0.sql >> dolphin--2.0--1.0.sql +rm -f dolphin--3.0.sql +touch dolphin--3.0.sql +for i in `ls sql_script`; do cat sql_script/$i >> dolphin--3.0.sql; done +for i in `ls sql_script_post`; do cat sql_script_post/$i >> dolphin--3.0.sql; done +cat upgrade_script/dolphin--1.0--1.1.sql upgrade_script/dolphin--1.1--2.0.sql upgrade_script/dolphin--2.0--3.0.sql >> dolphin--3.0.sql +rm -f dolphin--1.0--1.1.sql +cp upgrade_script/dolphin--1.0--1.1.sql dolphin--1.0--1.1.sql +rm -f dolphin--1.1--1.0.sql +cp rollback_script/dolphin--1.1--1.0.sql dolphin--1.1--1.0.sql rm -f dolphin--1.1--2.0.sql cp upgrade_script/dolphin--1.1--2.0.sql dolphin--1.1--2.0.sql rm -f dolphin--2.0--1.1.sql cp rollback_script/dolphin--2.0--1.1.sql dolphin--2.0--1.1.sql +rm -f dolphin--2.0--3.0.sql +cp upgrade_script/dolphin--2.0--3.0.sql dolphin--2.0--3.0.sql +rm -f dolphin--3.0--2.0.sql +cp rollback_script/dolphin--3.0--2.0.sql dolphin--3.0--2.0.sql cp llvmir/openGauss_expr_dolphin_${BUILD_TUPLE}.ir openGauss_expr_dolphin.ir DOLPHIN_CMAKE_BUILD_DIR=`pwd`/tmp_build [ -d "${DOLPHIN_CMAKE_BUILD_DIR}" ] && rm -rf ${DOLPHIN_CMAKE_BUILD_DIR} diff --git a/contrib/dolphin/dolphin.control b/contrib/dolphin/dolphin.control index a19a89603..c1b71c00b 100644 --- a/contrib/dolphin/dolphin.control +++ b/contrib/dolphin/dolphin.control @@ -1,5 +1,5 @@ # dolphin extension comment = 'sql engine' -default_version = '2.0' +default_version = '3.0' module_pathname = '$libdir/dolphin' relocatable = false \ No newline at end of file diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql new file mode 100644 index 000000000..12468ba0a --- /dev/null +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -0,0 +1,3 @@ +DROP FUNCTION IF EXISTS pg_catalog.dolphin_invoke(); +CREATE FUNCTION pg_catalog.dolphin_invoke() + RETURNS VOID AS '$libdir/dolphin','dolphin_invoke' LANGUAGE C STRICT; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql new file mode 100644 index 000000000..12468ba0a --- /dev/null +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -0,0 +1,3 @@ +DROP FUNCTION IF EXISTS pg_catalog.dolphin_invoke(); +CREATE FUNCTION pg_catalog.dolphin_invoke() + RETURNS VOID AS '$libdir/dolphin','dolphin_invoke' LANGUAGE C STRICT; -- Gitee From ed1a8966aa11b59550ee1870875e407d86563387 Mon Sep 17 00:00:00 2001 From: totaj Date: Sun, 8 Oct 2023 17:58:54 +0800 Subject: [PATCH 008/434] Fix timediff core. --- .../timediff_illegal_participation.out | 52 ++++++++++++++++--- .../dolphin/plugin_utils/adt/timestamp.cpp | 13 +++-- .../sql/timediff_illegal_participation.sql | 8 +++ 3 files changed, 63 insertions(+), 10 deletions(-) diff --git a/contrib/dolphin/expected/timediff_illegal_participation.out b/contrib/dolphin/expected/timediff_illegal_participation.out index af1cbe39b..3cbdbda96 100644 --- a/contrib/dolphin/expected/timediff_illegal_participation.out +++ b/contrib/dolphin/expected/timediff_illegal_participation.out @@ -38,6 +38,12 @@ CONTEXT: referenced column: c insert into test values(timediff(B'1100', B'101')); ERROR: Truncated incorrect time value CONTEXT: referenced column: c +insert into test values(TIMEDIFF(CURRENT_DATE, '2004-12-07')); +ERROR: time field value out of range +CONTEXT: referenced column: c +insert into test values(TIMEDIFF('2004-12-07', CURRENT_DATE)); +ERROR: time field value out of range +CONTEXT: referenced column: c insert ignore into test values(timediff(B'101', B'1010')); WARNING: Truncated incorrect time value CONTEXT: referenced column: c @@ -74,9 +80,33 @@ CONTEXT: referenced column: c insert ignore into test values(timediff(B'1100', B'101')); WARNING: Truncated incorrect time value CONTEXT: referenced column: c +insert ignore into test values(TIMEDIFF(CURRENT_DATE, '2004-12-07')); +WARNING: time field value out of range +CONTEXT: referenced column: c +insert ignore into test values(TIMEDIFF('2004-12-07', CURRENT_DATE)); +WARNING: time field value out of range +CONTEXT: referenced column: c +SELECT TIMEDIFF(CURRENT_DATE, '2004-12-07'); +WARNING: time field value out of range +CONTEXT: referenced column: timediff + timediff +----------- + 838:59:59 +(1 row) + +SELECT TIMEDIFF('2004-12-07', CURRENT_DATE); +WARNING: time field value out of range +CONTEXT: referenced column: timediff + timediff +------------ + -838:59:59 +(1 row) + select c from test order by c; - c ---- + c +------------ + -838:59:59 + 838:59:59 @@ -89,7 +119,7 @@ select c from test order by c; -(12 rows) +(14 rows) set dolphin.sql_mode = ''; select timediff('asdf', '00:00:00'); @@ -253,9 +283,19 @@ CONTEXT: referenced column: c insert into test values(timediff(B'1100', B'101')); WARNING: Truncated incorrect time value CONTEXT: referenced column: c +insert into test values(TIMEDIFF(CURRENT_DATE, '2004-12-07')); +WARNING: time field value out of range +CONTEXT: referenced column: c +insert into test values(TIMEDIFF('2004-12-07', CURRENT_DATE)); +WARNING: time field value out of range +CONTEXT: referenced column: c select c from test order by c; - c ---- + c +------------ + -838:59:59 + -838:59:59 + 838:59:59 + 838:59:59 @@ -280,7 +320,7 @@ select c from test order by c; -(24 rows) +(28 rows) drop table test; reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 8b23dcec3..e58fe93a2 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -7033,7 +7033,7 @@ Oid convert_unknown_to_datetime_time(const char* str, Timestamp *datetime, TimeA { *datetime = DatumGetTimestamp( DirectFunctionCall1(date_timestamp, DirectFunctionCall1(date_in, CStringGetDatum(start)))); - return DATEOID; + typid = DATEOID; } PG_CATCH(); { @@ -7041,9 +7041,10 @@ Oid convert_unknown_to_datetime_time(const char* str, Timestamp *datetime, TimeA *time = DatumGetTimeADT( DirectFunctionCall3(time_in, CStringGetDatum(start), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); check_b_format_time_range_with_ereport(*time); - return TIMEOID; + typid = TIMEOID; } PG_END_TRY(); + return typid; } /** @@ -7272,11 +7273,11 @@ Datum timediff(PG_FUNCTION_ARGS) Timestamp datetime1, datetime2; Oid val_type1, val_type2; TimeADT result; + int level = fcinfo->can_ignore || !SQL_MODE_STRICT() ? WARNING : ERROR; val_type1 = get_fn_expr_argtype(fcinfo->flinfo, 0); val_type2 = get_fn_expr_argtype(fcinfo->flinfo, 1); if (!get_time(val_type1, PG_GETARG_DATUM(0)) || !get_time(val_type2, PG_GETARG_DATUM(1))) { - int level = fcinfo->can_ignore || !SQL_MODE_STRICT() ? WARNING : ERROR; ereport(level, (errmsg("Truncated incorrect time value"))); PG_RETURN_NULL(); } @@ -7320,7 +7321,11 @@ Datum timediff(PG_FUNCTION_ARGS) errmsg("unsupported input data type: %s", format_type_be(val_type1)))); } } - check_b_format_time_range_with_ereport(result); + if (result < -B_FORMAT_TIME_MAX_VALUE || result > B_FORMAT_TIME_MAX_VALUE) { + ereport(level, (errcode(ERRCODE_DATETIME_FIELD_OVERFLOW), + errmsg("time field value out of range"))); + result = result < -B_FORMAT_TIME_MAX_VALUE ? -B_FORMAT_TIME_MAX_VALUE : B_FORMAT_TIME_MAX_VALUE; + } PG_RETURN_TIMEADT(result); } diff --git a/contrib/dolphin/sql/timediff_illegal_participation.sql b/contrib/dolphin/sql/timediff_illegal_participation.sql index 852b579f6..8303039ab 100644 --- a/contrib/dolphin/sql/timediff_illegal_participation.sql +++ b/contrib/dolphin/sql/timediff_illegal_participation.sql @@ -14,6 +14,8 @@ insert into test values(timediff(B'101', B'1011')); insert into test values(timediff(B'1011', B'101')); insert into test values(timediff(B'101', B'1100')); insert into test values(timediff(B'1100', B'101')); +insert into test values(TIMEDIFF(CURRENT_DATE, '2004-12-07')); +insert into test values(TIMEDIFF('2004-12-07', CURRENT_DATE)); insert ignore into test values(timediff(B'101', B'1010')); insert ignore into test values(timediff(B'1010', B'101')); insert ignore into test values(timediff(B'101', B'100000')); @@ -26,6 +28,10 @@ insert ignore into test values(timediff(B'101', B'1011')); insert ignore into test values(timediff(B'1011', B'101')); insert ignore into test values(timediff(B'101', B'1100')); insert ignore into test values(timediff(B'1100', B'101')); +insert ignore into test values(TIMEDIFF(CURRENT_DATE, '2004-12-07')); +insert ignore into test values(TIMEDIFF('2004-12-07', CURRENT_DATE)); +SELECT TIMEDIFF(CURRENT_DATE, '2004-12-07'); +SELECT TIMEDIFF('2004-12-07', CURRENT_DATE); select c from test order by c; set dolphin.sql_mode = ''; select timediff('asdf', '00:00:00'); @@ -62,6 +68,8 @@ insert into test values(timediff(B'101', B'1011')); insert into test values(timediff(B'1011', B'101')); insert into test values(timediff(B'101', B'1100')); insert into test values(timediff(B'1100', B'101')); +insert into test values(TIMEDIFF(CURRENT_DATE, '2004-12-07')); +insert into test values(TIMEDIFF('2004-12-07', CURRENT_DATE)); select c from test order by c; drop table test; reset current_schema; -- Gitee From 4e7602d3fd37cd9f42e7e885579087a331305f23 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Sun, 8 Oct 2023 19:13:23 +0800 Subject: [PATCH 009/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbit=E8=BD=ACuint?= =?UTF-8?q?=E7=BC=BA=E9=99=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/uint_cast.out | 26 +++++++++++++++++++++ contrib/dolphin/plugin_utils/adt/varbit.cpp | 12 +++++++--- contrib/dolphin/sql/uint_cast.sql | 7 ++++++ 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/uint_cast.out b/contrib/dolphin/expected/uint_cast.out index 03d14796d..15fd9b478 100644 --- a/contrib/dolphin/expected/uint_cast.out +++ b/contrib/dolphin/expected/uint_cast.out @@ -1666,6 +1666,32 @@ select (1)::uint8::bool; t (1 row) +set dolphin.b_compatibility_mode = on; +select '-125'::bit(64)::uint1; +WARNING: tinyint unsigned out of range +CONTEXT: referenced column: uint1 + uint1 +------- + 255 +(1 row) + +select '-125'::bit(64)::uint2; +WARNING: smallint unsigned out of range +CONTEXT: referenced column: uint2 + uint2 +------- + 65535 +(1 row) + +select '-123456'::bit(64)::uint4; +WARNING: int unsigned out of range +CONTEXT: referenced column: uint4 + uint4 +------------ + 4294967295 +(1 row) + +reset dolphin.b_compatibility_mode; -- ä¸¥æ ¼æ¨¡å¼ drop table if exists t_longtext; NOTICE: table "t_longtext" does not exist, skipping diff --git a/contrib/dolphin/plugin_utils/adt/varbit.cpp b/contrib/dolphin/plugin_utils/adt/varbit.cpp index de86a68ca..7e608f6d4 100644 --- a/contrib/dolphin/plugin_utils/adt/varbit.cpp +++ b/contrib/dolphin/plugin_utils/adt/varbit.cpp @@ -47,6 +47,9 @@ static int32 bit_cmp(VarBit* arg1, VarBit* arg2, int leadingZeroLen1 = -1, int l extern "C" Datum ui8toi1(PG_FUNCTION_ARGS); extern "C" Datum ui8toi2(PG_FUNCTION_ARGS); extern "C" Datum ui8toi4(PG_FUNCTION_ARGS); +extern "C" Datum ui8toui1(PG_FUNCTION_ARGS); +extern "C" Datum ui8toui2(PG_FUNCTION_ARGS); +extern "C" Datum ui8toui4(PG_FUNCTION_ARGS); extern "C" Datum date_int8(PG_FUNCTION_ARGS); extern "C" Datum datetime_float(PG_FUNCTION_ARGS); extern "C" Datum timestamptz_int8(PG_FUNCTION_ARGS); @@ -1577,7 +1580,8 @@ Datum bittotinyint(VarBit* arg, bool isUnsigned) int errlevel = SQL_MODE_STRICT() ? ERROR : WARNING; if (GetSessionContext()->enableBCmptMode) { - result = (uint8)DirectFunctionCall1(ui8toi1, Int64GetDatum(bittobigint(arg, true))); + result = isUnsigned ? (uint8)DirectFunctionCall1(ui8toui1, Int64GetDatum(bittobigint(arg, true))) : + (uint8)DirectFunctionCall1(ui8toi1, Int64GetDatum(bittobigint(arg, true))); PG_RETURN_INT8(result); } @@ -1611,7 +1615,8 @@ Datum bittosmallint(VarBit* arg, bool isUnsigned) int errlevel = SQL_MODE_STRICT() ? ERROR : WARNING; if (GetSessionContext()->enableBCmptMode) { - result = (uint16)DirectFunctionCall1(ui8toi2, Int64GetDatum(bittobigint(arg, true))); + result = isUnsigned ? (uint16)DirectFunctionCall1(ui8toui2, Int64GetDatum(bittobigint(arg, true))) : + (uint16)DirectFunctionCall1(ui8toi2, Int64GetDatum(bittobigint(arg, true))); PG_RETURN_INT16(result); } @@ -1645,7 +1650,8 @@ Datum bittoint(VarBit* arg, bool isUnsigned) int errlevel = SQL_MODE_STRICT() ? ERROR : WARNING; if (GetSessionContext()->enableBCmptMode) { - result = (uint32)DirectFunctionCall1(ui8toi4, Int64GetDatum(bittobigint(arg, true))); + result = isUnsigned ? (uint32)DirectFunctionCall1(ui8toui4, Int64GetDatum(bittobigint(arg, true))) : + (uint32)DirectFunctionCall1(ui8toi4, Int64GetDatum(bittobigint(arg, true))); PG_RETURN_INT32(result); } diff --git a/contrib/dolphin/sql/uint_cast.sql b/contrib/dolphin/sql/uint_cast.sql index afb22937b..eece79331 100644 --- a/contrib/dolphin/sql/uint_cast.sql +++ b/contrib/dolphin/sql/uint_cast.sql @@ -283,6 +283,13 @@ select (1)::uint4::bool; select (0)::uint8::bool; select (1)::uint8::bool; + +set dolphin.b_compatibility_mode = on; +select '-125'::bit(64)::uint1; +select '-125'::bit(64)::uint2; +select '-123456'::bit(64)::uint4; +reset dolphin.b_compatibility_mode; + -- ä¸¥æ ¼æ¨¡å¼ drop table if exists t_longtext; create table t_longtext(a longtext); -- Gitee From 7cfcd9a8e9a696a44e6d307b837e6b705a6239dc Mon Sep 17 00:00:00 2001 From: laishenghao Date: Mon, 9 Oct 2023 10:16:55 +0800 Subject: [PATCH 010/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5server=20PR=204249?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_parser/analyze.cpp | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index 6094f31a9..b39e3b108 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -1691,11 +1691,15 @@ static void SetUpsertAttrnoState(ParseState* pstate, List *targetList) } } -static RightRefState* MakeRightRefState() +static RightRefState* MakeRightRefStateIfSupported(SelectStmt* selectStmt) { - RightRefState* refState = (RightRefState*)palloc0(sizeof(RightRefState)); - refState->isSupported = !IsInitdb && DB_IS_CMPT(B_FORMAT); - return refState; + bool isSupported = DB_IS_CMPT(B_FORMAT) && selectStmt && selectStmt->valuesLists && !IsInitdb; + if (isSupported) { + RightRefState* refState = (RightRefState*)palloc0(sizeof(RightRefState)); + refState->isSupported = true; + return refState; + } + return nullptr; } /* @@ -1724,7 +1728,7 @@ static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt) /* There can't be any outer WITH to worry about */ AssertEreport(pstate->p_ctenamespace == NIL, MOD_OPT, "para should be NIL"); - RightRefState* rightRefState = MakeRightRefState(); + RightRefState* rightRefState = MakeRightRefStateIfSupported((SelectStmt*)stmt->selectStmt); qry->commandType = CMD_INSERT; pstate->p_is_insert = true; @@ -2241,7 +2245,7 @@ static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt) exprList = transformInsertRow(pstate, exprList, stmt->cols, icolumns, attrnos); } - if (rightRefState->isSupported) { + if (IS_SUPPORT_RIGHT_REF(rightRefState)) { SetInsertAttrnoState(pstate, attrnos, list_length(exprList)); } @@ -2338,7 +2342,7 @@ static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt) } else { qry->rightRefState = nullptr; pstate->rightRefState = nullptr; - pfree(rightRefState); + pfree_ext(rightRefState); } return qry; -- Gitee From 5e5fc9ef4517bb5c6a54e9fee75fc0ebca4c8a4b Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Mon, 9 Oct 2023 15:28:27 +0800 Subject: [PATCH 011/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=95=B0=E5=80=BC?= =?UTF-8?q?=E8=BD=ACdate=E6=8A=A5=E9=94=99=E7=BC=BA=E9=99=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 28 ++- contrib/dolphin/expected/uint_cast3.out | 178 ++++++++++++++---- contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_utils/adt/date.cpp | 17 +- contrib/dolphin/sql/uint_cast3.sql | 9 + 5 files changed, 193 insertions(+), 41 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index 8b454f041..e8b466876 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -307,8 +307,13 @@ SELECT 10228::date; (1 row) SELECT 10229::date; -ERROR: Out of range value for date +WARNING: Out of range value for date CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + SELECT 991231::date; date ------------ @@ -328,14 +333,29 @@ SELECT 21000228::date; (1 row) SELECT 21000229::date; -ERROR: Out of range value for date +WARNING: Out of range value for date CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + SELECT 100000000::date; -ERROR: Out of range value for date +WARNING: Out of range value for date CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + SELECT 100101231::date; -ERROR: Out of range value for date +WARNING: Out of range value for date CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + -- test time -- '[-][D] hh:mm:ss.fsec' SELECT time(7)'12:12:12.123456'; diff --git a/contrib/dolphin/expected/uint_cast3.out b/contrib/dolphin/expected/uint_cast3.out index 63e586177..b0eeb7c25 100644 --- a/contrib/dolphin/expected/uint_cast3.out +++ b/contrib/dolphin/expected/uint_cast3.out @@ -1,7 +1,5 @@ -drop database if exists uint_cast3; -NOTICE: database "uint_cast3" does not exist, skipping -create database uint_cast3 dbcompatibility 'b'; -\c uint_cast3 +create schema uint_cast3; +set current_schema to 'uint_cast3'; select 1::uint1::int16; int16 ------- @@ -99,9 +97,9 @@ select 1::clob::uint8; (1 row) select 1::uint4::abstime; - abstime ------------------------------- - Wed Dec 31 16:00:01 1969 PST + abstime +------------------------ + 1969-12-31 16:00:01-08 (1 row) select 1::abstime::uint4; @@ -341,43 +339,115 @@ select 151503::uint4::time; select 19990101::uint4::date; date ------------ - 01-01-1999 + 1999-01-01 (1 row) select 19990101::uint4::timestamp; - timestamptz ------------------------------- - Fri Jan 01 00:00:00 1999 PST + timestamptz +------------------------ + 1999-01-01 00:00:00-08 (1 row) select 19990101::uint4::timestamptz; - timestamptz ------------------------------- - Fri Jan 01 00:00:00 1999 PST + timestamptz +------------------------ + 1999-01-01 00:00:00-08 (1 row) select 19990101::uint4::datetime; - timestamp --------------------------- - Fri Jan 01 00:00:00 1999 + timestamp +--------------------- + 1999-01-01 00:00:00 (1 row) select 19990101222222::uint8::timestamp; - timestamptz ------------------------------- - Fri Jan 01 22:22:22 1999 PST + timestamptz +------------------------ + 1999-01-01 22:22:22-08 (1 row) select 19990101222222::uint8::timestamptz; - timestamptz ------------------------------- - Fri Jan 01 22:22:22 1999 PST + timestamptz +------------------------ + 1999-01-01 22:22:22-08 (1 row) select 19990101222222::uint8::datetime; - timestamp --------------------------- - Fri Jan 01 22:22:22 1999 + timestamp +--------------------- + 1999-01-01 22:22:22 +(1 row) + +select '-1'::uint1::date; +WARNING: Cast to tinyint unsigned converted negative integer to it's positive complement +CONTEXT: referenced column: date +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select '256'::uint1::date; +WARNING: tinyint unsigned out of range +CONTEXT: referenced column: date +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select 2004::uint2::date; +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select '65536'::uint2::date; +WARNING: smallint unsigned out of range +CONTEXT: referenced column: date +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select '65535'::uint2::date; +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select 2067::uint8::date; +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select '-1'::uint8::date; +WARNING: Cast to bigint unsigned converted negative integer to it's positive complement +CONTEXT: referenced column: date +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 +(1 row) + +select '18446744073709551615'::uint8::date; +WARNING: Out of range value for date +CONTEXT: referenced column: date + date +------------ + 0000-00-00 (1 row) select 1999::uint4::year; @@ -393,24 +463,32 @@ select 1999::year::uint4; (1 row) select (-1)::text::uint1; +WARNING: Cast to tinyint unsigned converted negative integer to it's positive complement +CONTEXT: referenced column: uint1 uint1 ------- 255 (1 row) select (-1)::text::uint2; +WARNING: Cast to smallint unsigned converted negative integer to it's positive complement +CONTEXT: referenced column: uint2 uint2 ------- 65535 (1 row) select (-1)::text::uint4; +WARNING: Cast to int unsigned converted negative integer to it's positive complement +CONTEXT: referenced column: uint4 uint4 ------------ 4294967295 (1 row) select (-1)::text::uint8; +WARNING: Cast to bigint unsigned converted negative integer to it's positive complement +CONTEXT: referenced column: uint8 uint8 ---------------------- 18446744073709551615 @@ -441,16 +519,52 @@ select '18446744073709551615'::text::uint8; (1 row) select '256'::text::uint1; -ERROR: tinyint unsigned out of range +WARNING: tinyint unsigned out of range CONTEXT: referenced column: uint1 + uint1 +------- + 255 +(1 row) + select '65536'::text::uint2; -ERROR: smallint unsigned out of range +WARNING: smallint unsigned out of range CONTEXT: referenced column: uint2 + uint2 +------- + 65535 +(1 row) + select '4294967296'::text::uint4; -ERROR: int unsigned out of range +WARNING: int unsigned out of range CONTEXT: referenced column: uint4 + uint4 +------------ + 4294967295 +(1 row) + select '18446744073709551616'::text::uint8; -ERROR: bigint unsigned out of range +WARNING: bigint unsigned out of range CONTEXT: referenced column: uint8 -\c postgres -drop database uint_cast3; + uint8 +---------------------- + 18446744073709551615 +(1 row) + +SET dolphin.sql_mode = ''; +select cast('-0' as unsigned); + uint8 +------- + 0 +(1 row) + +create table t_uint(a uint1, b uint2, c uint4, d uint8); +insert into t_uint values('-0', '-0', '-0', '-0'); +select * from t_uint; + a | b | c | d +---+---+---+--- + 0 | 0 | 0 | 0 +(1 row) + +drop table t_uint; +drop schema uint_cast3 cascade; +reset current_schema; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index c5bbc99c9..fe1410cdc 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -40,7 +40,7 @@ test: builtin_funcs/bin builtin_funcs/char builtin_funcs/char_length builtin_fun test: builtin_funcs/db_b_hex builtin_funcs/db_b_if builtin_funcs/elt builtin_funcs/field builtin_funcs/find_in_set builtin_funcs/soundex builtin_funcs/space builtin_funcs/make_set builtin_funcs/between builtin_funcs/not_between -test: uint_agg uint_and uint_cast uint_cast2 uint_div uint_mi uint_mod uint_mod2 uint_mul uint_numeric uint_operator uint_or uint_partition uint_pl uint_xor +test: uint_agg uint_and uint_cast uint_cast2 uint_cast3 uint_div uint_mi uint_mod uint_mod2 uint_mul uint_numeric uint_operator uint_or uint_partition uint_pl uint_xor test: uint_procedure_col_bypass uint_smp uint_index uint_sql_mode uint_ignore uint_join signed_unsigned_cast uint_in implicit_cast uint_auto_increment signed_cast diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 900247e54..06acd22b2 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -673,19 +673,28 @@ Datum int32_b_format_date(PG_FUNCTION_ARGS) int4 date = PG_GETARG_INT32(0); DateADT result; struct pg_tm tt, *tm = &tt; + int errlevel = SQL_MODE_STRICT() ? ERROR : WARNING; if (int32_b_format_date_internal(tm, date, true)) { - ereport(ERROR, + ereport(errlevel, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("Out of range value for date"))); + tm->tm_year = 0; + tm->tm_mon = 0; + tm->tm_mday = 0; } if (date == 0 && !SQL_MODE_STRICT() && SQL_MODE_NO_ZERO_DATE()) { ereport(WARNING, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("Out of range value for date"))); } - if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday)) - ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("date out of range: \"%d\"", date))); - + if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday)) { + ereport(errlevel, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("date out of range: \"%d\"", date))); + tm->tm_year = 0; + tm->tm_mon = 0; + tm->tm_mday = 0; + } result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; PG_RETURN_DATEADT(result); } diff --git a/contrib/dolphin/sql/uint_cast3.sql b/contrib/dolphin/sql/uint_cast3.sql index 65df9c4be..6eab983be 100644 --- a/contrib/dolphin/sql/uint_cast3.sql +++ b/contrib/dolphin/sql/uint_cast3.sql @@ -76,6 +76,15 @@ select 19990101222222::uint8::timestamp; select 19990101222222::uint8::timestamptz; select 19990101222222::uint8::datetime; +select '-1'::uint1::date; +select '256'::uint1::date; +select 2004::uint2::date; +select '65536'::uint2::date; +select '65535'::uint2::date; +select 2067::uint8::date; +select '-1'::uint8::date; +select '18446744073709551615'::uint8::date; + select 1999::uint4::year; select 1999::year::uint4; -- Gitee From 76c693a9810da16920762b49089d579d9e8b4e7d Mon Sep 17 00:00:00 2001 From: yuchao Date: Wed, 27 Sep 2023 10:52:03 +0800 Subject: [PATCH 012/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddate=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E7=94=A8=E4=BA=8Eselect=E8=AF=AD=E5=8F=A5=EF=BC=8C?= =?UTF-8?q?=E9=83=A8=E5=88=86=E7=BB=93=E6=9E=9C=E8=BF=94=E5=9B=9E=E6=9C=89?= =?UTF-8?q?=E8=AF=AF=EF=BC=8C=E5=92=8CM=E4=BE=A7=E4=B8=8D=E4=B8=80?= =?UTF-8?q?=E6=A0=B7=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 72 +++---------------- .../expected/builtin_funcs/db_b_if.out | 7 +- .../string_func_test/db_b_nameconst_test.out | 8 +-- contrib/dolphin/include/plugin_utils/date.h | 1 + .../dolphin/include/plugin_utils/datetime.h | 5 ++ .../dolphin/include/plugin_utils/timestamp.h | 2 + contrib/dolphin/plugin_parser/gram.y | 24 +++++-- contrib/dolphin/plugin_utils/adt/date.cpp | 37 ++++++++-- contrib/dolphin/plugin_utils/adt/datetime.cpp | 13 ++++ .../dolphin/plugin_utils/adt/timestamp.cpp | 18 ++++- .../rollback_script/dolphin--2.0--1.1.sql | 2 +- .../rollback_script/dolphin--3.0--2.0.sql | 3 + .../upgrade_script/dolphin--2.0--3.0.sql | 6 ++ 13 files changed, 110 insertions(+), 88 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index 8b454f041..17caa3444 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -124,25 +124,11 @@ SELECT date'2000-2-29'; (1 row) SELECT date'2001-2-29'; -WARNING: date/time field value out of range: "2001-2-29" -LINE 1: SELECT date'2001-2-29'; - ^ +ERROR: date/time field value out of range: "2001-2-29" CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'2100-2-29'; -WARNING: date/time field value out of range: "2100-2-29" -LINE 1: SELECT date'2100-2-29'; - ^ +ERROR: date/time field value out of range: "2100-2-29" CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'0000-1-1'; date ------------ @@ -150,15 +136,8 @@ SELECT date'0000-1-1'; (1 row) SELECT date'01-01'; -WARNING: invalid input syntax for type date: "01-01" -LINE 1: SELECT date'01-01'; - ^ +ERROR: invalid input syntax for type date: "01-01" CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'9-01-01'; date ------------ @@ -191,15 +170,8 @@ SELECT date'99991231'; (1 row) SELECT date'100000101'; -WARNING: date/time field value out of range: "100000101" -LINE 1: SELECT date'100000101'; - ^ +ERROR: date/time field value out of range: "100000101" CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'10000101'; date ------------ @@ -207,16 +179,9 @@ SELECT date'10000101'; (1 row) SELECT date'9990101'; -WARNING: date/time field value out of range: "9990101" -LINE 1: SELECT date'9990101'; - ^ +ERROR: date/time field value out of range: "9990101" HINT: Perhaps you need a different "datestyle" setting. CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'701010'; date ------------ @@ -230,26 +195,12 @@ SELECT date'691010'; (1 row) SELECT date'691310'; -WARNING: date/time field value out of range: "691310" -LINE 1: SELECT date'691310'; - ^ +ERROR: date/time field value out of range: "691310" HINT: Perhaps you need a different "datestyle" setting. CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'691131'; -WARNING: date/time field value out of range: "691131" -LINE 1: SELECT date'691131'; - ^ +ERROR: date/time field value out of range: "691131" CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'10101'; date ------------ @@ -257,16 +208,9 @@ SELECT date'10101'; (1 row) SELECT date'10130'; -WARNING: date/time field value out of range: "10130" -LINE 1: SELECT date'10130'; - ^ +ERROR: date/time field value out of range: "10130" HINT: Perhaps you need a different "datestyle" setting. CONTEXT: referenced column: date - date ------------- - 0000-00-00 -(1 row) - SELECT date'00120101'; date ------------ diff --git a/contrib/dolphin/expected/builtin_funcs/db_b_if.out b/contrib/dolphin/expected/builtin_funcs/db_b_if.out index 88fa252da..1200d2d5b 100644 --- a/contrib/dolphin/expected/builtin_funcs/db_b_if.out +++ b/contrib/dolphin/expected/builtin_funcs/db_b_if.out @@ -27,7 +27,8 @@ CREATE VIEW test_view as select '2022-01-30' as text_type, date '2022-01-30' as text_type | text | | extended | date_type | date | | plain | View definition: - SELECT '2022-01-30'::text AS text_type, '2022-01-30'::date AS date_type; + SELECT '2022-01-30'::text AS text_type, + date_cast('2022-01-30'::cstring, true) AS date_type; -- string to date select if (true, '2022-01-30', date '2022-01-30') as a, if (false, '2022-01-30', date '2022-01-30') as b; @@ -104,7 +105,7 @@ select if (true, 2.2::float8, 1::int4) as a, if (false, 2.2::float8, 1::int4) as select if (true, 'aaa'::binary(5), date '2022-01-30') as a, if (false, 'aaa'::binary(5), date '2022-01-30') as b; ERROR: CASE/ELSE could not convert type date to "binary" LINE 1: select if (true, 'aaa'::binary(5), date '2022-01-30') as a, ... - ^ + ^ CONTEXT: referenced column: a -- binary to numeric select if (true, 'aaa'::binary(5), 1.1::numeric(10, 2)) as a, if (false, 'aaa'::binary(5), 1.1::numeric(10, 2)) as b; @@ -128,7 +129,7 @@ CONTEXT: referenced column: a select if (true, 'aaa'::varbinary(5), date '2022-01-30') as a, if (false, 'aaa'::varbinary(5), date '2022-01-30') as b; ERROR: CASE/ELSE could not convert type date to "varbinary" LINE 1: select if (true, 'aaa'::varbinary(5), date '2022-01-30') as ... - ^ + ^ CONTEXT: referenced column: a -- varbinary to numeric select if (true, 'aaa'::varbinary(5), 1.1::numeric(10, 2)) as a, if (false, 'aaa'::varbinary(5), 1.1::numeric(10, 2)) as b; diff --git a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out index c88d933f5..aefc9cd55 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out @@ -273,11 +273,9 @@ SELECT NAME_CONST('1.23'::money, 123); (1 row) SELECT NAME_CONST(date '2021-12-31', E'\\u4F60\\u597D'); - 2021-12-31 --------------- - \u4F60\u597D -(1 row) - +ERROR: Incorrect arguments to NAME_CONST +HINT: 'NAME_CONST' function does not accept input of type FuncExpr +CONTEXT: referenced column: name_const -- å˜é‡æµ‹è¯• SET enable_set_variable_b_format = 1; SET @var1 = 100; diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index a8fd32b76..330110f76 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -82,6 +82,7 @@ extern void adjust_time_range(pg_tm *tm, fsec_t &fsec, bool &warnings); extern TimeADT time_in_with_flag(char *str, unsigned int date_flag); extern bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag); extern bool date_add_interval(DateADT date, Interval *span, DateADT *result); +extern Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst); extern "C" Datum time_float(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum date_enum(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum timestamp_enum(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/include/plugin_utils/datetime.h b/contrib/dolphin/include/plugin_utils/datetime.h index c928bbc61..009c81e6e 100644 --- a/contrib/dolphin/include/plugin_utils/datetime.h +++ b/contrib/dolphin/include/plugin_utils/datetime.h @@ -120,6 +120,11 @@ static const time_flags TIME_INVALID_DATES = 64; /* Allow 2000-02-31 */ extern bool cstring_to_datetime(const char* str, time_flags flags, int &tm_type, pg_tm *tm, fsec_t &fsec, int &nano, bool &warnings, bool *null_func_result, int* tzp = NULL, int* invalid_tz = NULL); +#ifdef DOLPHIN +extern void DateTimeParseErrorWithFlag(int dterr, const char* str, const char* datatype, bool can_ignore = false, + bool is_error = false); +extern void DateTimeParseErrorInternal(int dterr, const char* str, const char* datatype, int level); +#endif extern bool datetime_add_nanoseconds_with_round(pg_tm *tm, fsec_t &fsec, int nano); extern bool cstring_to_tm(const char *expr, pg_tm *tm, fsec_t &fsec, int* tzp = NULL, int* invalid_tz = NULL); diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 746697848..f44ffad20 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -135,6 +135,7 @@ extern bool datetime_in_with_sql_mode_internal(char *str, struct pg_tm *tm, fsec extern bool datetime_in_range(Timestamp datetime); extern int128 timestamp_int128(Timestamp timestamp); extern int128 timestamptz_int128(TimestampTz timestampTz); +extern Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst); extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); #endif @@ -142,5 +143,6 @@ extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); extern Datum datetime_text(PG_FUNCTION_ARGS); extern Datum time_text(PG_FUNCTION_ARGS); + #endif // !FRONTEND_PARSER #endif /* TIMESTAMP_H */ diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 72288f273..ad83cd793 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -36910,17 +36910,29 @@ AexprConst_without_Sconst: Iconst } | DATE_P SCONST { - TypeName * tmp = NULL; if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + TypeName * tmp = NULL; tmp = SystemTypeName("timestamp"); tmp->typmods = list_make1(makeIntConst(0,-1)); + tmp->location = @1; + tmp->end_location = @1 + DATE_LEN; + $$ = makeStringConstCast($2, @2, tmp); + } + else { + FuncCall *n = makeNode(FuncCall); + n->funcname = SystemFuncName("date_cast"); + n->colname = pstrdup("date"); + n->args = list_make2(makeStringConst($2, @2), makeBoolAConst(TRUE, -1)); + n->agg_order = NIL; + n->agg_star = FALSE; + n->agg_distinct = FALSE; + n->func_variadic = FALSE; + n->over = NULL; + n->location = @1; + n->call_func = false; + $$ = (Node *)n; } - else - tmp = SystemTypeName("date"); - tmp->location = @1; - tmp->end_location = @1 + DATE_LEN; - $$ = makeStringConstCast($2, @2, tmp); } | SMALLDATETIME SCONST { diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 900247e54..420c8ed53 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -66,7 +66,7 @@ bool check_pg_tm_time_part(pg_tm *tm, fsec_t fsec); extern const char* extract_numericstr(const char* str); extern "C" DLL_PUBLIC Datum uint8out(PG_FUNCTION_ARGS); static char* adjust_b_format_time(char *str, int *timeSign, int *D, bool *hasD); -int DatetimeDate(char *str, pg_tm *tm); +int DatetimeDate(char *str, pg_tm *tm, bool is_date_sconst = false); PG_FUNCTION_INFO_V1_PUBLIC(int8_b_format_time); extern "C" DLL_PUBLIC Datum int8_b_format_time(PG_FUNCTION_ARGS); @@ -192,6 +192,8 @@ PG_FUNCTION_INFO_V1_PUBLIC(time_float); extern "C" DLL_PUBLIC Datum time_float(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(date_int); extern "C" DLL_PUBLIC Datum date_int(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(date_cast); +extern "C" DLL_PUBLIC Datum date_cast(PG_FUNCTION_ARGS); #endif /* common code for timetypmodin and timetztypmodin */ static int32 anytime_typmodin(bool istz, ArrayType* ta) @@ -440,6 +442,18 @@ Datum b_db_statement_start_time(PG_FUNCTION_ARGS) * Given date text string, convert to internal date format. */ Datum date_in(PG_FUNCTION_ARGS) +#ifdef DOLPHIN +{ + return date_internal(fcinfo, false); +} + +Datum date_cast(PG_FUNCTION_ARGS) +{ + return date_internal(fcinfo, true); +} + +Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst) +#endif { char* str = PG_GETARG_CSTRING(0); DateADT date; @@ -481,7 +495,7 @@ Datum date_in(PG_FUNCTION_ARGS) */ dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr != 0) { - DateTimeParseError(dterr, str, "date", fcinfo->can_ignore); + DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, is_date_sconst); /* * if reporting warning in DateTimeParseError, return 1970-01-01 */ @@ -489,14 +503,14 @@ Datum date_in(PG_FUNCTION_ARGS) } if (dterr == 0) { if (ftype[0] == DTK_NUMBER && nf == 1) { - dterr = DatetimeDate(field[0], tm); + dterr = DatetimeDate(field[0], tm, is_date_sconst); dtype = DTK_DATE; } else { dterr = DecodeDateTimeForBDatabase(field, ftype, nf, &dtype, tm, &fsec, &tzp); } } if (dterr != 0) { - DateTimeParseError(dterr, str, "date", fcinfo->can_ignore); + DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, is_date_sconst); PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); } switch (dtype) { @@ -579,11 +593,20 @@ Datum input_date_in(char* str, bool can_ignore) } #ifdef DOLPHIN -int DatetimeDate(char *str, pg_tm *tm) +extern "C" DLL_PUBLIC Datum timestamp_cast(PG_FUNCTION_ARGS); + +int DatetimeDate(char *str, pg_tm *tm, bool is_date_sconst) { fsec_t fsec; - Datum datetime = DirectFunctionCall3(timestamp_in, CStringGetDatum(str), - ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + Datum datetime; + if (is_date_sconst) { + datetime = DirectFunctionCall3(timestamp_cast, CStringGetDatum(str), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + } else { + datetime = DirectFunctionCall3(timestamp_in, CStringGetDatum(str), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + } + if (timestamp2tm(datetime, NULL, tm, &fsec, NULL, NULL) != 0) { return ERRCODE_DATETIME_VALUE_OUT_OF_RANGE; } diff --git a/contrib/dolphin/plugin_utils/adt/datetime.cpp b/contrib/dolphin/plugin_utils/adt/datetime.cpp index cbaa5d3bb..7fa2d303e 100644 --- a/contrib/dolphin/plugin_utils/adt/datetime.cpp +++ b/contrib/dolphin/plugin_utils/adt/datetime.cpp @@ -3540,6 +3540,19 @@ int DecodeUnits(int field, const char* lowtoken, int* val) void DateTimeParseError(int dterr, const char* str, const char* datatype, bool can_ignore) { int level = can_ignore || !SQL_MODE_STRICT() ? WARNING : ERROR; +#ifdef DOLPHIN + DateTimeParseErrorInternal(dterr, str, datatype, level); +} + +void DateTimeParseErrorWithFlag(int dterr, const char* str, const char* datatype, bool can_ignore, bool is_error) +{ + int level = !is_error && (can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR; + DateTimeParseErrorInternal(dterr, str, datatype, level); +} + +void DateTimeParseErrorInternal(int dterr, const char* str, const char* datatype, int level) +{ +#endif switch (dterr) { case DTERR_FIELD_OVERFLOW: ereport(level, diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index e58fe93a2..d1eb03527 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -326,6 +326,8 @@ PG_FUNCTION_INFO_V1_PUBLIC(timestamp_xor_transfn); extern "C" DLL_PUBLIC Datum timestamp_xor_transfn(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(timestamp_agg_finalfn); extern "C" DLL_PUBLIC Datum timestamp_agg_finalfn(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_cast); +extern "C" DLL_PUBLIC Datum timestamp_cast(PG_FUNCTION_ARGS); #endif /* b format datetime and timestamp type */ @@ -517,6 +519,18 @@ bool TimestampTypeCheck(char* str, bool can_ignore, struct pg_tm* tm, Timestamp * Convert a string to internal form. */ Datum timestamp_in(PG_FUNCTION_ARGS) +#ifdef DOLPHIN +{ + return timestamp_internal(fcinfo, false); +} + +Datum timestamp_cast(PG_FUNCTION_ARGS) +{ + return timestamp_internal(fcinfo, true); +} + +Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) +#endif { char* str = PG_GETARG_CSTRING(0); @@ -569,7 +583,7 @@ Datum timestamp_in(PG_FUNCTION_ARGS) */ dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr != 0) { - DateTimeParseError(dterr, str, "timestamp", fcinfo->can_ignore); + DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, is_date_sconst); /* * if error ignorable, function DateTimeParseError reports warning instead, then return current timestamp. */ @@ -585,7 +599,7 @@ Datum timestamp_in(PG_FUNCTION_ARGS) } } if (dterr != 0) { - DateTimeParseError(dterr, str, "timestamp", fcinfo->can_ignore); + DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, is_date_sconst); PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } switch (dtype) { diff --git a/contrib/dolphin/rollback_script/dolphin--2.0--1.1.sql b/contrib/dolphin/rollback_script/dolphin--2.0--1.1.sql index 34741db60..46e0e12ed 100644 --- a/contrib/dolphin/rollback_script/dolphin--2.0--1.1.sql +++ b/contrib/dolphin/rollback_script/dolphin--2.0--1.1.sql @@ -1919,4 +1919,4 @@ begin end if; end; $$ -language plpgsql; \ No newline at end of file +language plpgsql; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 12468ba0a..c6572ace7 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -1,3 +1,6 @@ DROP FUNCTION IF EXISTS pg_catalog.dolphin_invoke(); CREATE FUNCTION pg_catalog.dolphin_invoke() RETURNS VOID AS '$libdir/dolphin','dolphin_invoke' LANGUAGE C STRICT; + +DROP FUNCTION IF EXISTS pg_catalog.date_cast(cstring, boolean); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_cast(cstring, oid, integer, boolean); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 12468ba0a..d13546fbb 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1,3 +1,9 @@ DROP FUNCTION IF EXISTS pg_catalog.dolphin_invoke(); CREATE FUNCTION pg_catalog.dolphin_invoke() RETURNS VOID AS '$libdir/dolphin','dolphin_invoke' LANGUAGE C STRICT; + +DROP FUNCTION IF EXISTS pg_catalog.date_cast(cstring, boolean); +CREATE OR REPLACE FUNCTION pg_catalog.date_cast(cstring, boolean) RETURNS date LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'date_cast'; + +DROP FUNCTION IF EXISTS pg_catalog.timestamp_cast(cstring, oid, integer, boolean); +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_cast(cstring, oid, integer, boolean) RETURNS timestamp without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'timestamp_cast'; -- Gitee From 65c35c97ef5e2374f1090ae0058e8dd74c4f4008 Mon Sep 17 00:00:00 2001 From: lukeman Date: Mon, 9 Oct 2023 21:50:07 +0800 Subject: [PATCH 013/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dvarlena=5Fcast?= =?UTF-8?q?=E7=B3=BB=E5=88=97=E5=87=BD=E6=95=B0=E4=BC=A0=E5=85=A5=E6=95=B0?= =?UTF-8?q?=E5=80=BC=E7=B1=BB=E5=9E=8B=E5=AF=BC=E8=87=B4=E6=95=B0=E6=8D=AE?= =?UTF-8?q?=E5=BA=93core=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/uint_cast2.out | 61 ++++++++++++++++++++ contrib/dolphin/plugin_utils/adt/int8.cpp | 14 ++++- contrib/dolphin/plugin_utils/adt/varlena.cpp | 24 ++++++-- contrib/dolphin/sql/uint_cast2.sql | 12 ++++ 4 files changed, 104 insertions(+), 7 deletions(-) diff --git a/contrib/dolphin/expected/uint_cast2.out b/contrib/dolphin/expected/uint_cast2.out index 222011ba8..e86845c69 100644 --- a/contrib/dolphin/expected/uint_cast2.out +++ b/contrib/dolphin/expected/uint_cast2.out @@ -835,5 +835,66 @@ select * from t1; reset dolphin.sql_mode; drop table if exists t1; +--test for db core +select varlena_cast_ui1(1); + varlena_cast_ui1 +------------------ + 1 +(1 row) + +select varlena_cast_ui2(1); + varlena_cast_ui2 +------------------ + 1 +(1 row) + +select varlena_cast_ui4(1); + varlena_cast_ui4 +------------------ + 1 +(1 row) + +select varlena_cast_ui8(1); + varlena_cast_ui8 +------------------ + 1 +(1 row) + +select varlena_cast_int8(1); + varlena_cast_int8 +------------------- + 1 +(1 row) + +select varlena2float8(1); + varlena2float8 +---------------- + 1 +(1 row) + +select varlena2numeric(1); + varlena2numeric +----------------- + 1 +(1 row) + +select Varlena2Bpchar(1); + varlena2bpchar +---------------- + 1 +(1 row) + +select Varlena2Varchar(1); + varlena2varchar +----------------- + 1 +(1 row) + +select Varlena2Text(1); + varlena2text +-------------- + 1 +(1 row) + drop schema uint_cast2 cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/int8.cpp b/contrib/dolphin/plugin_utils/adt/int8.cpp index 903c5fc50..d379d2c9f 100644 --- a/contrib/dolphin/plugin_utils/adt/int8.cpp +++ b/contrib/dolphin/plugin_utils/adt/int8.cpp @@ -1803,8 +1803,18 @@ Datum text_cast_int8(PG_FUNCTION_ARGS) Datum varlena_cast_int8(PG_FUNCTION_ARGS) { - Datum data = DirectFunctionCall1(textout, PG_GETARG_DATUM(0)); - int128 result = DatumGetInt128(DirectFunctionCall1(int16in, data)); + Datum txt = PG_GETARG_DATUM(0); + char* tmp = NULL; + int128 result; + Oid typeOutput = InvalidOid; + bool typIsVarlena = false; + getTypeOutputInfo(fcinfo->argTypes[0], &typeOutput, &typIsVarlena); + if (typIsVarlena) { + tmp = DatumGetCString(DirectFunctionCall1(textout, txt)); + } else { + tmp = DatumGetCString(OidOutputFunctionCall(typeOutput, txt)); + } + result = DatumGetInt128(DirectFunctionCall1(int16in, CStringGetDatum(tmp))); PG_RETURN_INT64(checkSignedRange(result, fcinfo)); } #endif diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 04644de29..c31c2fd5c 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10591,10 +10591,24 @@ Datum blob_any_value(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(vlena); } +static char* AnyElementGetCString(Oid anyOid, Datum anyDatum) +{ + char* data = NULL; + Oid typeOutput = InvalidOid; + bool typIsVarlena = false; + getTypeOutputInfo(anyOid, &typeOutput, &typIsVarlena); + if (typIsVarlena) { + data = DatumGetCString(DirectFunctionCall1(textout, anyDatum)); + } else { + data = DatumGetCString(OidOutputFunctionCall(typeOutput, anyDatum)); + } + return data; +} + Datum Varlena2Float8(PG_FUNCTION_ARGS) { char* data = NULL; - data = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); bool hasError = false; char* endptr = NULL; @@ -10611,7 +10625,7 @@ Datum Varlena2Float8(PG_FUNCTION_ARGS) Datum Varlena2Numeric(PG_FUNCTION_ARGS) { char* data = NULL; - data = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); Datum result; result = DirectFunctionCall3(numeric_in, CStringGetDatum(data), ObjectIdGetDatum(0), Int32GetDatum(-1)); @@ -10623,7 +10637,7 @@ Datum Varlena2Numeric(PG_FUNCTION_ARGS) Datum Varlena2Bpchar(PG_FUNCTION_ARGS) { char* data = NULL; - data = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); Datum result; result = DirectFunctionCall3(bpcharin, CStringGetDatum(data), ObjectIdGetDatum(0), Int32GetDatum(-1)); @@ -10635,7 +10649,7 @@ Datum Varlena2Bpchar(PG_FUNCTION_ARGS) Datum Varlena2Varchar(PG_FUNCTION_ARGS) { char* data = NULL; - data = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); Datum result; result = DirectFunctionCall3(varcharin, CStringGetDatum(data), ObjectIdGetDatum(0), Int32GetDatum(-1)); @@ -10647,7 +10661,7 @@ Datum Varlena2Varchar(PG_FUNCTION_ARGS) Datum Varlena2Text(PG_FUNCTION_ARGS) { char* data = NULL; - data = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); Datum result; result = DirectFunctionCall3(textin, CStringGetDatum(data), ObjectIdGetDatum(0), Int32GetDatum(-1)); diff --git a/contrib/dolphin/sql/uint_cast2.sql b/contrib/dolphin/sql/uint_cast2.sql index e4f2afb7f..7b267ed39 100644 --- a/contrib/dolphin/sql/uint_cast2.sql +++ b/contrib/dolphin/sql/uint_cast2.sql @@ -435,5 +435,17 @@ reset dolphin.sql_mode; drop table if exists t1; +--test for db core +select varlena_cast_ui1(1); +select varlena_cast_ui2(1); +select varlena_cast_ui4(1); +select varlena_cast_ui8(1); +select varlena_cast_int8(1); +select varlena2float8(1); +select varlena2numeric(1); +select Varlena2Bpchar(1); +select Varlena2Varchar(1); +select Varlena2Text(1); + drop schema uint_cast2 cascade; reset current_schema; \ No newline at end of file -- Gitee From 0942bc39d537b2b809d06d5a7d8034c70c9e4d49 Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Thu, 12 Oct 2023 20:40:26 +0800 Subject: [PATCH 014/434] =?UTF-8?q?=E5=B0=8F=E5=9E=8B=E5=8C=96=E6=94=AF?= =?UTF-8?q?=E6=8C=815.0.1=E5=8D=87=E7=BA=A7=E5=88=B05.1.1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/CMakeLists.txt | 6 ++++++ contrib/dolphin/cmake.sh | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/contrib/dolphin/CMakeLists.txt b/contrib/dolphin/CMakeLists.txt index ed9ed0390..845ec9517 100755 --- a/contrib/dolphin/CMakeLists.txt +++ b/contrib/dolphin/CMakeLists.txt @@ -181,6 +181,12 @@ install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0--3.0.sql install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--3.0--2.0.sql DESTINATION share/postgresql/extension/ ) +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0--2.0.1.sql + DESTINATION share/postgresql/extension/ +) +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dolphin--2.0.1--2.0.sql + DESTINATION share/postgresql/extension/ +) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/openGauss_expr_dolphin.ir DESTINATION share/postgresql/extension/ ) diff --git a/contrib/dolphin/cmake.sh b/contrib/dolphin/cmake.sh index 3ff82b452..a188b1caa 100644 --- a/contrib/dolphin/cmake.sh +++ b/contrib/dolphin/cmake.sh @@ -18,6 +18,10 @@ rm -f dolphin--2.0--3.0.sql cp upgrade_script/dolphin--2.0--3.0.sql dolphin--2.0--3.0.sql rm -f dolphin--3.0--2.0.sql cp rollback_script/dolphin--3.0--2.0.sql dolphin--3.0--2.0.sql +rm -f dolphin--2.0--2.0.1.sql +cp upgrade_script/dolphin--2.0--2.0.1.sql dolphin--2.0--2.0.1.sql +rm -f dolphin--2.0.1--2.0.sql +cp rollback_script/dolphin--2.0.1--2.0.sql dolphin--2.0.1--2.0.sql cp llvmir/openGauss_expr_dolphin_${BUILD_TUPLE}.ir openGauss_expr_dolphin.ir DOLPHIN_CMAKE_BUILD_DIR=`pwd`/tmp_build [ -d "${DOLPHIN_CMAKE_BUILD_DIR}" ] && rm -rf ${DOLPHIN_CMAKE_BUILD_DIR} -- Gitee From f4d381bb12261cd15b3aea5cdff50ce540d2302f Mon Sep 17 00:00:00 2001 From: Mijamind Date: Mon, 16 Oct 2023 09:42:24 +0800 Subject: [PATCH 015/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E5=A4=9A=E6=9C=BA=E5=B9=B6=E8=A1=8Cbugfix=E3=80=91=20?= =?UTF-8?q?1.reset=20direct=20read=202.remote=20static=20func=203.set=20re?= =?UTF-8?q?lkind=20for=20RangeTblEntry=204.code=20cc?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/include/spqplugin.h | 1 + .../spq_plugin/src/executor/spq_seqscan.cpp | 218 +----------------- .../src/spq_optimizer_util/spq_wrappers.cpp | 10 +- .../translate/CTranslatorDXLToPlStmt.cpp | 3 + contrib/spq_plugin/src/spqplugin.cpp | 8 - 5 files changed, 11 insertions(+), 229 deletions(-) diff --git a/contrib/spq_plugin/include/spqplugin.h b/contrib/spq_plugin/include/spqplugin.h index 1c16d2ad2..d38553ff1 100644 --- a/contrib/spq_plugin/include/spqplugin.h +++ b/contrib/spq_plugin/include/spqplugin.h @@ -16,5 +16,6 @@ extern "C" void _PG_init(void); extern "C" void _PG_fini(void); extern "C" void spqplugin_invoke(void); +extern THR_LOCAL MemoryContext OptimizerMemoryContext; #endif // SPQPLUGIN_H diff --git a/contrib/spq_plugin/src/executor/spq_seqscan.cpp b/contrib/spq_plugin/src/executor/spq_seqscan.cpp index 37c3fdfd9..7caab8d63 100644 --- a/contrib/spq_plugin/src/executor/spq_seqscan.cpp +++ b/contrib/spq_plugin/src/executor/spq_seqscan.cpp @@ -17,13 +17,12 @@ * ExecEndSeqScan releases any storage allocated. * ExecReScanSeqScan rescans the relation */ +#include "postgres.h" +#include "storage/lock/lock.h" #include "storage/predicate.h" #include "access/valid.h" #include "utils/guc.h" #include "utils/builtins.h" -#include "libpq/pqformat.h" -#include "pgxc/execRemote.h" -#include "libpq/libpq.h" #include "storage/smgr/segment.h" #include "mpmcqueue.h" #include "executor/executor.h" @@ -275,112 +274,6 @@ struct DirectReadBuff { char* buff; }; -class SpqDirectReadBlockManager : public SpqPageManager { -public: - HeapScanDesc scan; - MpmcBoundedQueue pagequeue; - DirectReadBuff *currentPages; -public: - SpqDirectReadBlockManager(HeapScanDesc scan, ScanDirection direction) - : SpqPageManager(direction), scan(scan), pagequeue(PAGE_QUEUE_SIZE), currentPages(nullptr) { - scan->rs_base.rs_cbuf = InvalidBuffer; - scan->rs_base.rs_snapshot = SnapshotAny; - } - - SpqState FetchBlocks(uint32 start, uint32 end) - { - uint32 step = 0; - - do { - start = start + step; - step = seg_direct_read_get_range(start); - if (start + step - 1 >= end) { - step = end - start + 1; - } - - DirectReadBuff *buffer = (DirectReadBuff*)palloc(sizeof(DirectReadBuff) + BLOCKSIZE * step); - if (buffer == nullptr) { - elog(ERROR, "SpqDirectReadBlockManager: try palloc memory failed."); - } - bool enqueued = false; - for (int i = 0; i < MAX_ENQUEUE_TIME; ++i) { - if (pagequeue.Enqueue(buffer)) { - enqueued = true; - break; - } - } - if (!enqueued) { - pfree(buffer); - elog(ERROR, "SpqDirectReadBlockManager: try push buffer to page queue failed."); - } - buffer->buff = (char *)(buffer + 1); - buffer->start = start; - buffer->size = step; - buffer->current = InvalidBlockNumber; - buffer->currentPage = buffer->buff; - // sync read - seg_direct_read(scan->rs_base.rs_rd->rd_smgr, MAIN_FORKNUM, start, step, buffer->buff, buffer->locStart); - } while (start + step - 1 < end); - return SpqState::SPQ_SUCCESS; - } - - SpqState GetNewPage() - { - if (pagequeue.Empty() && currentPages == nullptr) { - return SpqState::SPQ_QUEUE_EMPTY; - } - - while (true) { - // if currentPage is empty, try get a new page from pagequeue - if (currentPages == nullptr) { - if (!pagequeue.Dequeue(currentPages)) { - return SpqState::SPQ_QUEUE_EMPTY; - } - } - - if (currentPages->current == InvalidBlockNumber) { - currentPages->current = 0; - } else { - currentPages->current++; - } - while (currentPages->current < currentPages->size) { - currentPages->currentPage = currentPages->buff + BLOCKSIZE * currentPages->current; - if (PageIsVerified(currentPages->currentPage, currentPages->locStart + currentPages->current)) { - if (ScanDirectionIsForward(direction)) { - currentPages->lineOff = FirstOffsetNumber; - } else if (ScanDirectionIsBackward(direction)) { - currentPages->lineOff = PageGetMaxOffsetNumber(currentPages->currentPage); - } else { - return SpqState::SPQ_QUERY_END; - } - return SpqState::SPQ_SUCCESS; - } - currentPages->current++; - } - - pfree(currentPages); - currentPages = nullptr; - } - } - - bool GetTupleFromPage(TupleTableSlot* slot) - { - if (currentPages == nullptr) { - return false; - } - - return GetNextTupleFromPage(scan, currentPages->currentPage, direction, currentPages->lineOff, slot); - } - - void Rescan(TableScanDesc scanDesc) - { - while (pagequeue.Dequeue(currentPages)) { - pfree(currentPages); - } - currentPages = nullptr; - } -}; - class SpqLocalBlockManager : public SpqBlockManager { public: uint32 instanceID; @@ -438,104 +331,6 @@ public: } } }; -SpqAdpScanPagesRes adps_get_adps_response(int plan_node_id, ScanDirection direction, uint32 nblocks, int64_t iter_no) -{ - StringInfoData buffer; - SpqAdpScanPagesRes seqRes; - int request_size, response_size, code; - char *encoded_msg; - const char *msg_buffer; - SpqAdpScanPagesReq req = { - .plan_node_id = plan_node_id, - .direction = direction, - .nblocks = nblocks, - .cur_scan_iter_no = iter_no, - }; - - request_size = sizeof(SpqAdpScanPagesReq); - encoded_msg = (char *)palloc(request_size * 2 + 1); - hex_encode((const char *)&req, request_size, encoded_msg); - encoded_msg[request_size * 2] = '\0'; - pq_beginmessage(&buffer, 'a'); - pq_sendstring(&buffer, encoded_msg); - pfree(encoded_msg); - pq_endmessage_reuse(&buffer); - - /* Must flush this message */ - if (EOF == pq_flush()) { - elog(ERROR, "block_iter: can`t send paging response"); - goto finish; - } - response_size = sizeof(SpqAdpScanPagesRes); - - code = pq_getbyte(); - if (code == EOF) { - elog(ERROR, "block_iter: can`t get paging response"); - goto finish; - } - if (code != ADPS_RESPONSE_PAGE) { - elog(ERROR, "block_iter: get an error paging response, code:%d", code); - goto finish; - } - if (EOF == pq_getmessage(&buffer, response_size + sizeof(int))) { - elog(ERROR, "block_iter: can`t get paging response"); - goto finish; - } - msg_buffer = pq_getmsgbytes(&buffer, response_size); - memcpy(&seqRes, msg_buffer, response_size); - -finish: - pfree(buffer.data); - - return seqRes; -} - -class SpqAdaptiveBlockManager : public SpqBlockManager { -public: - uint32 maxBlockNum; - int plan_node_id; - int64_t iter_no; - bool isBlockEnd; - uint32 end; -public: - SpqAdaptiveBlockManager(uint32 maxBlockNum, ScanDirection direction, int plan_node_id, uint32 step) - : SpqBlockManager(direction, step), maxBlockNum(maxBlockNum), plan_node_id(plan_node_id) - { - isBlockEnd = false; - iter_no = 0; - end = InvalidBlockNumber; - } - - SpqState GetBlockIDs(uint32 &start, uint32 &end) - { - SpqAdpScanPagesRes response = adps_get_adps_response(plan_node_id, SpqBlockManager::direction, maxBlockNum, iter_no); - if (response.success == false) { - isBlockEnd = true; - return SPQ_SUCCESS; - } - start = response.page_start; - end = response.page_end; - this->end = end; - - return SPQ_SUCCESS; - } - - bool IsBlockEnd() - { - if (ScanDirectionIsNoMovement(direction)) { - // has no direction, means will not get new page for scanning. - return true; - } else { - return isBlockEnd; - } - } - - void Rescan() - { - ++iter_no; - isBlockEnd = false; - } -}; TupleTableSlot* SpqScanNext(ScanState* node) { @@ -645,8 +440,6 @@ SpqSeqScanState* ExecInitSpqSeqScan(SpqSeqScan* node, EState* estate, int eflags ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("error relation type."))); } spqScan->pageManager = New(CurrentMemoryContext) SpqBufmgrPageManager(scanDesc, estate->es_direction); - } else { - spqScan->pageManager = New(CurrentMemoryContext) SpqDirectReadBlockManager(scanDesc, estate->es_direction); } SpqBlockManager* blockManager = nullptr; @@ -657,11 +450,6 @@ SpqSeqScanState* ExecInitSpqSeqScan(SpqSeqScan* node, EState* estate, int eflags seqScan->ss_currentScanDesc->rs_nblocks, estate->es_direction, FETCH_BLOCK_NUM); - } else if (node->isAdaptiveScan) { - blockManager = New(CurrentMemoryContext) SpqAdaptiveBlockManager(seqScan->ss_currentScanDesc->rs_nblocks, - estate->es_direction, - node->scan.plan.plan_node_id, - FETCH_BLOCK_NUM); } else { int sliceNumber; int instanceID; @@ -726,4 +514,4 @@ void restore_spqseqscan_hook() exec_spqscan_hook = backup_exec_spqscan_hook; end_spqscan_hook = backup_end_spqscan_hook; spqscan_rescan_hook = backup_spqscan_rescan_hook; -} \ No newline at end of file +} diff --git a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp index 5fbbe758f..30b92728d 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp @@ -21,6 +21,7 @@ //--------------------------------------------------------------------------- #include "spq_optimizer_util/spq_wrappers.h" +#include "spqplugin.h" #include "spqos/base.h" #include "spqos/error/CAutoExceptionStack.h" @@ -34,6 +35,7 @@ #include "utils/memutils.h" #include "utils/numeric.h" #include "utils/lsyscache.h" +#include "utils/selfuncs.h" #include "spq/spq_util.h" #include "spq/spq_hash.h" #include "optimizer/clauses.h" @@ -55,10 +57,6 @@ } else { \ SPQOS_RAISE(spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError); \ } - -extern double extern_convert_timevalue_to_scalar(Datum value, Oid typid); -extern double extern_numeric_to_double_no_overflow(Numeric num); - using namespace spqos; bool @@ -1734,7 +1732,7 @@ spqdb::NumericToDoubleNoOverflow(Numeric num) { SPQ_WRAP_START; { - return extern_numeric_to_double_no_overflow(num); + return numeric_to_double_no_overflow(num); } SPQ_WRAP_END; return 0.0; @@ -1756,7 +1754,7 @@ spqdb::ConvertTimeValueToScalar(Datum datum, Oid typid) { SPQ_WRAP_START; { - return extern_convert_timevalue_to_scalar(datum, typid); + return convert_timevalue_to_scalar(datum, typid); } SPQ_WRAP_END; return 0.0; diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index 9fb9d60a6..ced7f3416 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -4845,6 +4845,9 @@ CTranslatorDXLToPlStmt::TranslateDXLTblDescrToRangeTblEntry( base_table_context->SetOID(oid); base_table_context->SetRelIndex(index); + /* spq */ + rte->relkind = get_rel_relkind(rte->relid); + Alias *alias = MakeNode(Alias); alias->colnames = NIL; diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index 6947d9024..306d16f38 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -27,8 +27,6 @@ PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(spqplugin_invoke); -const uint SPQ_VERSION_NUM = 92904; - THR_LOCAL spq_planner_hook_type backup_spq_planner_hook = NULL; THR_LOCAL bool HOOK_INIT = false; THR_LOCAL MemoryContext OptimizerMemoryContext = NULL; @@ -230,12 +228,6 @@ PlannedStmt* spq_optimize_query(Query* parse, int cursorOptions, ParamListInfo b void _PG_init(void) { - if (SPQ_VERSION_NUM != GRAND_VERSION_NUM) { - ereport(WARNING, (errmsg("SPQ_VERSION_NUM:%d and GRAND_VERSION_NUM:%d do not match!", - SPQ_VERSION_NUM, GRAND_VERSION_NUM))); - return; - } - if (!HOOK_INIT) { backup_spq_planner_hook = spq_planner_hook; spq_planner_hook = spq_optimize_query; -- Gitee From 3e516ceee27956c89be8e6228d1bb33f710910aa Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 16 Oct 2023 20:31:32 +0800 Subject: [PATCH 016/434] Fix test case bug. --- contrib/dolphin/expected/pad_char_to_full_length.out | 2 +- contrib/dolphin/input/sqlmode_strict.source | 2 +- contrib/dolphin/output/sqlmode_strict.source | 2 +- contrib/dolphin/sql/pad_char_to_full_length.sql | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/dolphin/expected/pad_char_to_full_length.out b/contrib/dolphin/expected/pad_char_to_full_length.out index 2d9d44db4..e385cc714 100644 --- a/contrib/dolphin/expected/pad_char_to_full_length.out +++ b/contrib/dolphin/expected/pad_char_to_full_length.out @@ -1,4 +1,4 @@ -create schema pad_char_to_full_length; +create schema pad_char_to_full_length CHARACTER SET ='utf8' COLLATE ='utf8_bin'; set current_schema to 'pad_char_to_full_length'; create table t (name char(100)); insert into t values('test'); diff --git a/contrib/dolphin/input/sqlmode_strict.source b/contrib/dolphin/input/sqlmode_strict.source index d3bff253a..dd305ed33 100755 --- a/contrib/dolphin/input/sqlmode_strict.source +++ b/contrib/dolphin/input/sqlmode_strict.source @@ -1,5 +1,5 @@ -- test strict mode -create schema strict_mode_test; +create schema strict_mode_test CHARACTER SET ='utf8' COLLATE ='utf8_bin'; set current_schema to 'strict_mode_test'; set dolphin.b_compatibility_mode = on; set dolphin.sql_mode = 'sql_mode_strict,ansi_quotes,auto_recompile_function'; diff --git a/contrib/dolphin/output/sqlmode_strict.source b/contrib/dolphin/output/sqlmode_strict.source index 014a6c47d..d9abc71cf 100644 --- a/contrib/dolphin/output/sqlmode_strict.source +++ b/contrib/dolphin/output/sqlmode_strict.source @@ -1,5 +1,5 @@ -- test strict mode -create schema strict_mode_test; +create schema strict_mode_test CHARACTER SET ='utf8' COLLATE ='utf8_bin'; set current_schema to 'strict_mode_test'; set dolphin.b_compatibility_mode = on; set dolphin.sql_mode = 'sql_mode_strict,ansi_quotes,auto_recompile_function'; diff --git a/contrib/dolphin/sql/pad_char_to_full_length.sql b/contrib/dolphin/sql/pad_char_to_full_length.sql index 4592c44ee..269acb0e2 100644 --- a/contrib/dolphin/sql/pad_char_to_full_length.sql +++ b/contrib/dolphin/sql/pad_char_to_full_length.sql @@ -1,4 +1,4 @@ -create schema pad_char_to_full_length; +create schema pad_char_to_full_length CHARACTER SET ='utf8' COLLATE ='utf8_bin'; set current_schema to 'pad_char_to_full_length'; create table t (name char(100)); -- Gitee From a78c62c17b3596c5d3df67df9aac3d6ded3a2539 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Mon, 16 Oct 2023 11:37:41 +0800 Subject: [PATCH 017/434] =?UTF-8?q?=E8=A7=A3=E5=86=B3spqplugin=E5=92=8Cdol?= =?UTF-8?q?phin=E7=BC=96=E8=AF=91=E5=86=B2=E7=AA=81=20&=20add=20orca=20get?= =?UTF-8?q?=5Fdistribute=5Fkey=20error=20handle?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/src/guc_spq.cpp | 1 + contrib/spq_plugin/src/spq/spq_mutate.cpp | 6 +++++- contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp | 3 ++- .../translate/CTranslatorRelcacheToDXL.cpp | 4 ++-- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 65da97389..60bda85eb 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -27,6 +27,7 @@ #include "spq/spq_vars.h" #include "spq/spq_util.h" +#include "pgxc/nodemgr.h" const char* sync_guc_names_array[] = { "log_min_messages", diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index 6ba1cc08b..e2e24d91d 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -573,7 +573,11 @@ List* make_distributed_key_by_groupingset(PlannerInfo* root, Plan *subplan, List return nullptr; } double multiple = 0.0; - return get_distributekey_from_tlist(root, subplan->targetlist, groupcls, subplan->plan_rows, &multiple, nullptr); + List* distributed = spq_get_distributekey_from_tlist(root, subplan->targetlist, groupcls, subplan->plan_rows, &multiple, nullptr); + if (distributed == nullptr) { + ereport(ERROR, (errmsg("get_distributekey_from_tlist fail"))); + } + return distributed; } Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion) diff --git a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp index 30b92728d..d5b38c398 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp @@ -45,6 +45,7 @@ #include "catalog/pg_aggregate.h" #include "parser/parse_agg.h" #include "spqos/error/CAutoExceptionStack.h" +#include "parser/parse_coerce.h" #define SPQ_WRAP_START \ sigjmp_buf local_sigjmp_buf; \ @@ -975,7 +976,7 @@ spqdb::GetComparisonOperator(Oid left_oid, Oid right_oid, unsigned int cmpt) SPQ_WRAP_START; { /* catalog tables: pg_amop */ - return get_comparison_operator(left_oid, right_oid, (CmpType) cmpt); + return get_comparison_operator(left_oid, right_oid, (SPQCmpType) cmpt); } SPQ_WRAP_END; return InvalidOid; diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp index 5e0d89c6f..4faadf2ad 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp @@ -1793,7 +1793,7 @@ CTranslatorRelcacheToDXL::RetrieveScOp(CMemoryPool *mp, IMDId *mdid) } // get comparison type - CmpType cmpt = (CmpType) spqdb::GetComparisonType(op_oid); + SPQCmpType cmpt = (SPQCmpType) spqdb::GetComparisonType(op_oid); IMDType::ECmpType cmp_type = ParseCmpType(cmpt); // get func oid @@ -2818,7 +2818,7 @@ CTranslatorRelcacheToDXL::RetrieveScCmp(CMemoryPool *mp, IMDId *mdid) OID left_oid = CMDIdSPQDB::CastMdid(mdid_left)->Oid(); OID right_oid = CMDIdSPQDB::CastMdid(mdid_right)->Oid(); - CmpType cmpt = (CmpType) GetComparisonType(cmp_type); + SPQCmpType cmpt = (SPQCmpType) GetComparisonType(cmp_type); OID scalar_cmp_oid = spqdb::GetComparisonOperator(left_oid, right_oid, cmpt); -- Gitee From 515c7da2cf99d52a3fb92cb89eb7868a16eb512c Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 16 Oct 2023 14:53:37 +0800 Subject: [PATCH 018/434] Sync server code. d13021a1512cc0430447400c85a2fc904b49c638 --- contrib/dolphin/expected/charset_gbk_b_db.out | 24 ++-- .../dolphin/expected/charset_utf8mb4_b_db.out | 24 ++-- contrib/dolphin/include/builtin_funcs.ini | 4 +- .../dolphin/include/plugin_nodes/parsenodes.h | 29 +++- .../include/plugin_nodes/parsenodes_common.h | 15 ++- .../include/plugin_optimizer/clauses.h | 11 +- .../include/plugin_optimizer/plancat.h | 4 + .../include/plugin_optimizer/planmain.h | 5 + .../include/plugin_optimizer/planner.h | 9 ++ .../include/plugin_parser/parse_coerce.h | 3 + .../dolphin/include/plugin_utils/plpgsql.h | 1 + contrib/dolphin/plugin_executor/execQual.cpp | 30 ++++- .../plugin_optimizer/commands/copy.cpp | 40 +++++- .../plugin_optimizer/commands/typecmds.cpp | 14 +- .../dolphin/plugin_optimizer/plan/planner.cpp | 108 ++++++++++++++- .../dolphin/plugin_optimizer/util/clauses.cpp | 107 +++++++++++++++ .../dolphin/plugin_optimizer/util/plancat.cpp | 41 ++++++ contrib/dolphin/plugin_parser/analyze.cpp | 3 + contrib/dolphin/plugin_parser/gram.y | 20 +-- .../dolphin/plugin_parser/parse_target.cpp | 3 + contrib/dolphin/plugin_pl/plpgsql/src/gram.y | 64 ++++++--- contrib/dolphin/plugin_utils/adt/numeric.cpp | 3 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 8 ++ contrib/dolphin/plugin_utils/adt/selfuncs.cpp | 4 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 6 +- contrib/dolphin/tablecmds.cpp | 125 ++++++++++++++++-- 26 files changed, 617 insertions(+), 88 deletions(-) diff --git a/contrib/dolphin/expected/charset_gbk_b_db.out b/contrib/dolphin/expected/charset_gbk_b_db.out index e2df8df89..81307c807 100644 --- a/contrib/dolphin/expected/charset_gbk_b_db.out +++ b/contrib/dolphin/expected/charset_gbk_b_db.out @@ -991,13 +991,13 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk' SELECT CONCAT('高斯DB', opengauss_version()) result, collation for(result); result | pg_collation_for ---------------+------------------ - 高斯DB5.1.0 | utf8_general_ci + 高斯DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), '高斯DB') result, collation for(result); result | pg_collation_for ---------------+------------------ - 5.1.0高斯DB | utf8_general_ci + 5.1.1高斯DB | utf8_general_ci (1 row) SELECT CONCAT('高斯DB', 123) result, collation for(result); @@ -1040,13 +1040,13 @@ SELECT CONCAT(NULL, '高斯DB') result, collation for(result); SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, collation for(result); result | pg_collation_for -------------+------------------ - ¸ß˹DB5.1.0 | utf8_general_ci + ¸ß˹DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, collation for(result); result | pg_collation_for -------------+------------------ - 5.1.0¸ß˹DB | utf8_general_ci + 5.1.1¸ß˹DB | utf8_general_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', 123) result, collation for(result); @@ -1090,13 +1090,13 @@ SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, collation for(result); SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, collation for(result); result | pg_collation_for ---------------+------------------ - 高斯DB5.1.0 | utf8_general_ci + 高斯DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, collation for(result); result | pg_collation_for ---------------+------------------ - 5.1.0高斯DB | utf8_general_ci + 5.1.1高斯DB | utf8_general_ci (1 row) SELECT CONCAT(CONCAT('高斯DB'), 123) result, collation for(result); @@ -1139,13 +1139,13 @@ SELECT CONCAT(NULL, CONCAT('高斯DB')) result, collation for(result); SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, collation for(result); result | pg_collation_for -------------+------------------ - ¸ß˹DB5.1.0 | utf8_general_ci + ¸ß˹DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, collation for(result); result | pg_collation_for -------------+------------------ - 5.1.0¸ß˹DB | utf8_general_ci + 5.1.1¸ß˹DB | utf8_general_ci (1 row) SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, collation for(result); @@ -2014,13 +2014,13 @@ LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+-------------------- - 高斯db5.1.0 | utf8mb4_unicode_ci + 高斯db5.1.1 | utf8mb4_unicode_ci (1 row) SELECT CONCAT(opengauss_version(), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+-------------------- - 5.1.0高斯db | utf8mb4_unicode_ci + 5.1.1高斯db | utf8mb4_unicode_ci (1 row) SELECT CONCAT(fgbk_chi, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; @@ -2107,13 +2107,13 @@ LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; SELECT CONCAT(fgbk_chi, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+------------------ - 高斯db5.1.0 | gbk_chinese_ci + 高斯db5.1.1 | gbk_chinese_ci (1 row) SELECT CONCAT(opengauss_version(), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+------------------ - 5.1.0高斯DB | gbk_bin + 5.1.1高斯DB | gbk_bin (1 row) SELECT CONCAT(futf8_uni, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index e2b990d1d..64b89030c 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -1381,13 +1381,13 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk' SELECT CONCAT('高斯DB', opengauss_version()) result, collation for(result); result | pg_collation_for -------------+------------------ - 高斯DB5.1.0 | utf8_general_ci + 高斯DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), '高斯DB') result, collation for(result); result | pg_collation_for -------------+------------------ - 5.1.0高斯DB | utf8_general_ci + 5.1.1高斯DB | utf8_general_ci (1 row) SELECT CONCAT('高斯DB', 123) result, collation for(result); @@ -1430,13 +1430,13 @@ SELECT CONCAT(NULL, '高斯DB') result, collation for(result); SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, collation for(result); result | pg_collation_for ---------------+------------------ - 楂樻柉DB5.1.0 | utf8_general_ci + 楂樻柉DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, collation for(result); result | pg_collation_for ---------------+------------------ - 5.1.0楂樻柉DB | utf8_general_ci + 5.1.1楂樻柉DB | utf8_general_ci (1 row) SELECT CONCAT(_gbk'高斯DB', 123) result, collation for(result); @@ -1480,13 +1480,13 @@ SELECT CONCAT(NULL, _gbk'高斯DB') result, collation for(result); SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, collation for(result); result | pg_collation_for -------------+------------------ - 高斯DB5.1.0 | utf8_general_ci + 高斯DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, collation for(result); result | pg_collation_for -------------+------------------ - 5.1.0高斯DB | utf8_general_ci + 5.1.1高斯DB | utf8_general_ci (1 row) SELECT CONCAT(CONCAT('高斯DB'), 123) result, collation for(result); @@ -1529,13 +1529,13 @@ SELECT CONCAT(NULL, CONCAT('高斯DB')) result, collation for(result); SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, collation for(result); result | pg_collation_for ---------------+------------------ - 楂樻柉DB5.1.0 | utf8_general_ci + 楂樻柉DB5.1.1 | utf8_general_ci (1 row) SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, collation for(result); result | pg_collation_for ---------------+------------------ - 5.1.0楂樻柉DB | utf8_general_ci + 5.1.1楂樻柉DB | utf8_general_ci (1 row) SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, collation for(result); @@ -2962,13 +2962,13 @@ SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, collation for(result) FROM t SELECT CONCAT(futf8_uni, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- - 高斯db5.1.0 | utf8mb4_unicode_ci + 高斯db5.1.1 | utf8mb4_unicode_ci (1 row) SELECT CONCAT(opengauss_version(), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- - 5.1.0高斯db | utf8mb4_unicode_ci + 5.1.1高斯db | utf8mb4_unicode_ci (1 row) SELECT CONCAT(futf8_uni, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; @@ -3075,13 +3075,13 @@ LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; SELECT CONCAT(fgbk_chi, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ - 高斯db5.1.0 | gbk_chinese_ci + 高斯db5.1.1 | gbk_chinese_ci (1 row) SELECT CONCAT(opengauss_version(), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ - 5.1.0高斯DB | gbk_bin + 5.1.1高斯DB | gbk_bin (1 row) SELECT CONCAT(fgbk_chi, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index d4c58715a..06396d5b3 100644 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -11001,8 +11001,8 @@ AddFuncGroup( ), AddFuncGroup( "standby_statement_history", 2, - AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(70, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(70, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(70, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(71, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(71, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(71, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(71, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(71, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(71, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","parent_query_id"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(72, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(72, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(72, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","parent_query_id"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "statement_detail_decode", 1, diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index 3a55fbd35..612cf6f2c 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -207,9 +207,15 @@ typedef enum RTEKind { #ifdef PGXC RTE_REMOTE_DUMMY, /* RTEs created by remote plan reduction */ #endif /* PGXC */ - RTE_RESULT /* RTE represents an empty FROM clause; such + RTE_RESULT, /* RTE represents an empty FROM clause; such * RTEs are added by the planner, they're not * present during parsing or rewriting */ +#ifdef USE_SPQ + RTE_NAMEDTUPLESTORE, + RTE_TABLEFUNC, /* TableFunc(.., column list) */ + RTE_VOID, /* CDB: deleted RTE */ + RTE_TABLEFUNCTION /* CDB: Functions over multiset input */ +#endif } RTEKind; typedef struct RangeTblEntry { @@ -374,6 +380,9 @@ typedef struct RangeTblEntry { * Select * from table_name subpartition (subpartition_name); * or delete from table_name partition (partition_name, ...) */ +#ifdef USE_SPQ + bool forceDistRandom; +#endif } RangeTblEntry; /* @@ -2390,6 +2399,24 @@ typedef struct GetDiagStmt { List *condNum; } GetDiagStmt; +#ifdef USE_SPQ +typedef struct RangeTblFunction { + NodeTag type; + Node *funcexpr; /* expression tree for func call */ + int funccolcount; /* number of columns it contributes to RTE */ + /* These fields record the contents of a column definition list, if any: */ + List *funccolnames; /* column names (list of String) */ + List *funccoltypes; /* OID list of column type OIDS */ + List *funccoltypmods; /* integer list of column typmods */ + List *funccolcollations; /* OID list of column collation OIDS */ + + bytea *funcuserdata; /* describe function user data. assume bytea */ + + /* This is set during planning for use by the executor: */ + Bitmapset *funcparams; /* PARAM_EXEC Param IDs affecting this func */ +} RangeTblFunction; +#endif + extern inline NodeTag transform_node_tag(Node* raw_parse_tree) { if (!raw_parse_tree) { diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 08c380e08..83d29a974 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -1998,7 +1998,13 @@ typedef struct RightRefState { /* **************************************************************************** * Query Tree * *************************************************************************** */ - +#ifdef USE_SPQ +typedef uint8 ParentStmtType; +#define PARENTSTMTTYPE_NONE 0 +#define PARENTSTMTTYPE_CTAS 1 +#define PARENTSTMTTYPE_COPY 2 +#define PARENTSTMTTYPE_REFRESH_MATVIEW 3 +#endif /* * Query - * Parse analysis turns all statements into a Query tree @@ -2131,6 +2137,12 @@ typedef struct Query { RightRefState* rightRefState; List* withCheckOptions; /* a list of WithCheckOption's */ List* indexhintList; /* a list of b mode index hint members */ + +#ifdef USE_SPQ + void* intoPolicy; + ParentStmtType parentStmtType; + bool is_support_spq; +#endif } Query; /* ---------------------- @@ -2412,6 +2424,7 @@ typedef struct RenameStmt { char* subname; /* name of contained object (column, rule, * trigger, etc) */ char* newname; /* the new name */ + char* newschema; /* the new schema name */ DropBehavior behavior; /* RESTRICT or CASCADE behavior */ bool missing_ok; /* skip error if missing? */ List* renameTargetList = NULL; diff --git a/contrib/dolphin/include/plugin_optimizer/clauses.h b/contrib/dolphin/include/plugin_optimizer/clauses.h index f6a3c40a0..97486390a 100644 --- a/contrib/dolphin/include/plugin_optimizer/clauses.h +++ b/contrib/dolphin/include/plugin_optimizer/clauses.h @@ -50,6 +50,11 @@ typedef struct { List* active_fns; Node* case_val; bool estimate; +#ifdef USE_SPQ + bool recurse_queries; /* recurse into query structures */ + bool recurse_sublink_testexpr; /* recurse into sublink test expressions */ + Size max_size; /* max constant binary size in bytes, 0: no restrictions */ +#endif } eval_const_expressions_context; typedef enum { UNIQUE_CONSTRAINT, NOT_NULL_CONSTRAINT } constraintType; @@ -157,7 +162,11 @@ extern List* get_quals_lists(Node *jtnode); extern bool isTableofType(Oid typeOid, Oid* base_oid, Oid* indexbyType); extern Expr* simplify_function(Oid funcid, Oid result_type, int32 result_typmod, Oid result_collid, Oid input_collid, List** args_p, bool process_args, bool allow_non_const, eval_const_expressions_context* context); - +#ifdef USE_SPQ +extern Query *fold_constants(PlannerInfo *root, Query *q, ParamListInfo boundParams, Size max_size); +extern Query *flatten_join_alias_var_optimizer(Query *query, int queryLevel); +extern Expr *transform_array_Const_to_ArrayExpr(Const *c); +#endif #ifdef DOLPHIN extern List* dolphin_add_function_defaults(List* args, HeapTuple func_tuple); #endif diff --git a/contrib/dolphin/include/plugin_optimizer/plancat.h b/contrib/dolphin/include/plugin_optimizer/plancat.h index 9e74112a4..d36ffcfe0 100644 --- a/contrib/dolphin/include/plugin_optimizer/plancat.h +++ b/contrib/dolphin/include/plugin_optimizer/plancat.h @@ -55,4 +55,8 @@ extern bool HasStoredGeneratedColumns(const PlannerInfo *root, Index rti); extern PlannerInfo *get_cte_root(PlannerInfo *root, int levelsup, char *ctename); +#ifdef USE_SPQ +extern double spq_estimate_partitioned_numtuples(Relation rel); +#endif + #endif /* PLANCAT_H */ diff --git a/contrib/dolphin/include/plugin_optimizer/planmain.h b/contrib/dolphin/include/plugin_optimizer/planmain.h index 93ca09fde..817203195 100644 --- a/contrib/dolphin/include/plugin_optimizer/planmain.h +++ b/contrib/dolphin/include/plugin_optimizer/planmain.h @@ -210,4 +210,9 @@ extern List* find_all_internal_tableOids(Oid parentOid); extern bool check_agg_optimizable(Aggref* aggref, int16* strategy); extern void check_hashjoinable(RestrictInfo* restrictinfo); +#ifdef USE_SPQ +extern void spq_extract_plan_dependencies(PlannerInfo *root, Plan *plan); +extern List* spq_make_null_eq_clause(List* joinqual, List** otherqual, List* nullinfo); +#endif + #endif /* PLANMAIN_H */ diff --git a/contrib/dolphin/include/plugin_optimizer/planner.h b/contrib/dolphin/include/plugin_optimizer/planner.h index e5c7a8f4d..1c7800c81 100644 --- a/contrib/dolphin/include/plugin_optimizer/planner.h +++ b/contrib/dolphin/include/plugin_optimizer/planner.h @@ -56,6 +56,10 @@ extern PlannedStmt* standard_planner(Query* parse, int cursorOptions, ParamListI typedef void (*planner_hook_type) (Query* parse, int cursorOptions, ParamListInfo boundParams); typedef void (*ndp_pushdown_hook_type) (Query* querytree, PlannedStmt *stmt); extern THR_LOCAL PGDLLIMPORT ndp_pushdown_hook_type ndp_pushdown_hook; +#ifdef USE_SPQ +typedef PlannedStmt *(*spq_planner_hook_type) (Query* parse, int cursorOptions, ParamListInfo boundParams); +extern THR_LOCAL PGDLLIMPORT spq_planner_hook_type spq_planner_hook; +#endif extern Plan* subquery_planner(PlannerGlobal* glob, Query* parse, PlannerInfo* parent_root, bool hasRecursion, double tuple_fraction, PlannerInfo** subroot, int options = SUBQUERY_NORMAL, ItstDisKey* diskeys = NULL, @@ -213,4 +217,9 @@ extern bool queryIsReadOnly(Query* query); typedef PlannedStmt* (*plannerFunc)(Query* parse, int cursorOptions, ParamListInfo boundParams); +#ifdef USE_SPQ +extern List* spq_get_distributekey_from_tlist( + PlannerInfo* root, List* tlist, List* groupcls, double rows, double* result_multiple, void* skew_info = NULL); +#endif + #endif /* PLANNER_H */ diff --git a/contrib/dolphin/include/plugin_parser/parse_coerce.h b/contrib/dolphin/include/plugin_parser/parse_coerce.h index 2726d83f6..a50758873 100644 --- a/contrib/dolphin/include/plugin_parser/parse_coerce.h +++ b/contrib/dolphin/include/plugin_parser/parse_coerce.h @@ -85,4 +85,7 @@ extern char *getEnumLableByOrder(Oid enumOid, int order); extern Node *transferConstToAconst(Node *node); extern Const* setValueToConstExpr(SetVariableExpr* set); +#ifdef USE_SPQ +extern bool get_cast_func(Oid oidSrc, Oid oidDest, bool *is_binary_coercible, Oid *oidCastFunc, CoercionPathType *pathtype); +#endif #endif /* PARSE_COERCE_H */ diff --git a/contrib/dolphin/include/plugin_utils/plpgsql.h b/contrib/dolphin/include/plugin_utils/plpgsql.h index e72f7c8c0..6a5b74115 100644 --- a/contrib/dolphin/include/plugin_utils/plpgsql.h +++ b/contrib/dolphin/include/plugin_utils/plpgsql.h @@ -378,6 +378,7 @@ typedef struct PLpgSQL_expr { /* SQpL Query to plan and execute */ bool is_have_tableof_index_func; /* dno maybe is 0, so need an extra variable */ int tableof_func_dno; + uint64 unique_sql_id; } PLpgSQL_expr; typedef struct { /* openGauss data type */ diff --git a/contrib/dolphin/plugin_executor/execQual.cpp b/contrib/dolphin/plugin_executor/execQual.cpp index 906e42ad3..d75c721ec 100644 --- a/contrib/dolphin/plugin_executor/execQual.cpp +++ b/contrib/dolphin/plugin_executor/execQual.cpp @@ -1988,7 +1988,7 @@ static TupleDesc get_cached_rowtype(Oid type_id, int32 typmod, TupleDesc* cache_ /* * Callback function to release a tupdesc refcount at expression tree shutdown */ -static void ShutdownTupleDescRef(Datum arg) +void ShutdownTupleDescRef(Datum arg) { TupleDesc* cache_field = (TupleDesc*)DatumGetPointer(arg); @@ -7374,3 +7374,31 @@ void ExecCopyDataToDatum(PLpgSQL_datum** datums, int dno, Cursor_Data* source_cu cursor_var->value = Int32GetDatum(source_cursor->row_count); cursor_var->isnull = source_cursor->null_open; } + +#ifdef USE_SPQ +bool IsJoinExprNull(List *joinExpr, ExprContext *econtext) +{ + ListCell *lc; + bool joinkeys_null = true; + + Assert(joinExpr != nullptr); + + foreach(lc, joinExpr) { + ExprState *keyexpr = (ExprState *) lfirst(lc); + bool isNull = false; + + /* + * Evaluate the current join attribute value of the tuple + */ + ExecEvalExpr(keyexpr, econtext, &isNull, NULL); + + if (!isNull) { + /* Found at least one non-null join expression, we're done */ + joinkeys_null = false; + break; + } + } + + return joinkeys_null; +} +#endif diff --git a/contrib/dolphin/plugin_optimizer/commands/copy.cpp b/contrib/dolphin/plugin_optimizer/commands/copy.cpp index d2d4f89d6..23b23a928 100644 --- a/contrib/dolphin/plugin_optimizer/commands/copy.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/copy.cpp @@ -4387,9 +4387,9 @@ uint64 CopyFrom(CopyState cstate) if ((resultRelInfo->ri_TrigDesc != NULL && (resultRelInfo->ri_TrigDesc->trig_insert_before_row || resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) || #ifdef DOLPHIN - cstate->volatile_defexprs || is_single_insert) { + cstate->volatile_defexprs || isForeignTbl || is_single_insert) { #else - cstate->volatile_defexprs) { + cstate->volatile_defexprs || isForeignTbl) { #endif useHeapMultiInsert = false; } else { @@ -4927,6 +4927,8 @@ uint64 CopyFrom(CopyState cstate) if (!skip_tuple && isForeignTbl) { resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate, resultRelInfo, slot, NULL); + Assert(!useHeapMultiInsert); + resetPerTupCxt = true; processed++; } else if (!skip_tuple) { /* @@ -5180,6 +5182,14 @@ uint64 CopyFrom(CopyState cstate) * tuples inserted by an INSERT command. */ processed++; + } else {/*skip_tupe == true*/ + /* + * only the before row insert trigget would make skip_tupe==true + * which useHeapMultiInsert must be false + * so we can safely reset the per-tuple memory context in next iteration + */ + Assert(useHeapMultiInsert == false); + resetPerTupCxt = true; } #ifdef PGXC } @@ -7243,6 +7253,7 @@ static bool CopyReadLineTextTemplate(CopyState cstate) for (;;) { int prev_raw_ptr; char c; + char sec = '\0'; /* * Load more data if needed. Ideally we would just force four bytes @@ -7280,6 +7291,27 @@ static bool CopyReadLineTextTemplate(CopyState cstate) /* OK to fetch a character */ prev_raw_ptr = raw_buf_ptr; c = copy_raw_buf[raw_buf_ptr++]; + if (raw_buf_ptr < copy_buf_len) { + sec = copy_raw_buf[raw_buf_ptr]; + } + if (IS_TEXT(cstate) && (cstate->copy_dest == COPY_NEW_FE) && !cstate->is_load_copy) { + if (c == '\\') { + char c2; + IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(0); + + /* get next character */ + c2 = copy_raw_buf[raw_buf_ptr]; + + /* + * If the following character is a newline or CRLF, + * skip the '\\'. + */ + if (c2 == '\n' || c2 == '\r' || + (c2 == '\r' && (raw_buf_ptr + 1) < copy_buf_len && copy_raw_buf[raw_buf_ptr + 1] == '\n')) { + continue; + } + } + } if (csv_mode) { /* @@ -7640,10 +7672,12 @@ static bool CopyReadLineTextTemplate(CopyState cstate) * high-bit set, so as an optimization we can avoid this block * entirely if it is not set. */ - if (cstate->encoding_embeds_ascii && IS_HIGHBIT_SET(c)) { + if ((cstate->encoding_embeds_ascii || cstate->file_encoding == PG_GBK || cstate->file_encoding == PG_GB18030) + && IS_HIGHBIT_SET(c)) { int mblen; mblen_str[0] = c; + mblen_str[1] = sec; /* All our encodings only read the first byte to get the length */ mblen = pg_encoding_mblen(cstate->file_encoding, mblen_str); IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(mblen - 1); diff --git a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp index 0a9533145..c52086b39 100644 --- a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp @@ -3653,7 +3653,7 @@ Oid AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses* objsMoved) * Returns the type's old namespace OID. */ Oid AlterTypeNamespaceInternal( - Oid typeOid, Oid nspOid, bool isImplicitArray, bool errorOnTableType, ObjectAddresses* objsMoved) + Oid typeOid, Oid nspOid, bool isImplicitArray, bool errorOnTableType, ObjectAddresses* objsMoved, char* newTypeName) { Relation rel; HeapTuple tup; @@ -3687,7 +3687,8 @@ Oid AlterTypeNamespaceInternal( CheckSetNamespace(oldNspOid, nspOid, TypeRelationId, typeOid); /* check for duplicate name (more friendly than unique-index failure) */ - if (SearchSysCacheExists2(TYPENAMENSP, CStringGetDatum(NameStr(typform->typname)), ObjectIdGetDatum(nspOid))) + char* checkTypeName = (newTypeName == NULL) ? NameStr(typform->typname) : newTypeName; + if (SearchSysCacheExists2(TYPENAMENSP, CStringGetDatum(checkTypeName), ObjectIdGetDatum(nspOid))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("type \"%s\" already exists in schema \"%s\"", @@ -3709,6 +3710,9 @@ Oid AlterTypeNamespaceInternal( * tup is a copy, so we can scribble directly on it */ typform->typnamespace = nspOid; + if (newTypeName != NULL) { + (void)namestrcpy(&(typform->typname), newTypeName); + } simple_heap_update(rel, &tup->t_self, tup); CatalogUpdateIndexes(rel, tup); @@ -3756,8 +3760,10 @@ Oid AlterTypeNamespaceInternal( add_exact_object_address(&thisobj, objsMoved); /* Recursively alter the associated array type, if any */ - if (OidIsValid(arrayOid)) - AlterTypeNamespaceInternal(arrayOid, nspOid, true, true, objsMoved); + if (OidIsValid(arrayOid)) { + AlterTypeNamespaceInternal(arrayOid, nspOid, true, true, objsMoved, + (newTypeName == NULL) ? NULL : makeArrayTypeName(newTypeName, nspOid)); + } return oldNspOid; } diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index 3b4f5b2d4..d8df1a761 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -96,6 +96,9 @@ /* Hook for plugins to get control in planner() */ THR_LOCAL ndp_pushdown_hook_type ndp_pushdown_hook = NULL; +#ifdef USE_SPQ +THR_LOCAL spq_planner_hook_type spq_planner_hook = NULL; +#endif #ifndef MIN #define MIN(A, B) ((B) < (A) ? (B) : (A)) @@ -381,6 +384,12 @@ PlannedStmt* planner(Query* parse, int cursorOptions, ParamListInfo boundParams) instr_time starttime; double totaltime = 0; +#ifdef USE_SPQ + if (spq_planner_hook) { + return (*spq_planner_hook) (parse, cursorOptions, boundParams); + } +#endif + INSTR_TIME_SET_CURRENT(starttime); #ifdef PGXC @@ -3536,12 +3545,14 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction) wflists, &needSecondLevelAgg, collectiveGroupExpr); -#ifdef ENABLE_MULTIPLE_NODES - /* - * grouping_tlist was modified by build_groupingsets_plan, - * we have to change tlist at the same time. - */ - tlist = grouping_tlist; +#if defined(ENABLE_MULTIPLE_NODES) || defined(USE_SPQ) + if (IS_SPQ_RUNNING) { + /* + * grouping_tlist was modified by build_groupingsets_plan, + * we have to change tlist at the same time. + */ + tlist = grouping_tlist; + } #endif /* Delete eq class expr after grouping */ delete_eq_member(root, tlist, collectiveGroupExpr); @@ -16000,3 +16011,88 @@ adjust_plan_for_srfs(PlannerInfo *root, Plan *plan, List *targets, List *targets } return newplan; } + +#ifdef USE_SPQ +static Node* get_spq_multiple_from_expr( + PlannerInfo* root, Node* expr, double rows, double* skew_multiple, double* bias_multiple) +{ + List* groupExprs = NIL; + Oid datatype = exprType((Node*)(expr)); + bool use_skew_multiple = true; + + if (!OidIsValid(datatype) || !IsSpqTypeDistributable(datatype)) + return NULL; + + groupExprs = list_make1(expr); + get_multiple_from_exprlist(root, groupExprs, rows, &use_skew_multiple, true, skew_multiple, bias_multiple); + list_free_ext(groupExprs); + + return expr; +} + + +List* spq_get_distributekey_from_tlist( + PlannerInfo* root, List* tlist, List* groupcls, double rows, double* result_multiple, void* skew_info) +{ + ListCell* lcell = NULL; + List* distkey = NIL; + double multiple = 0.0; + double bias_multiple = 0.0; + double skew_multiple = 0.0; + List* exprMultipleList = NIL; + + foreach (lcell, groupcls) { + Node* expr = (Node*)lfirst(lcell); + + if (IsA(expr, SortGroupClause)) + expr = get_sortgroupclause_expr((SortGroupClause*)expr, tlist); + + expr = get_spq_multiple_from_expr(root, expr, rows, &skew_multiple, &bias_multiple); + if (expr != NULL) { + /* + * we can't estimate skew of grouping sets because there's + * null added, so just add all columns and set mutiple to 1 + */ + if (root->parse->groupingSets) { + distkey = lappend(distkey, expr); + *result_multiple = 1; + continue; + } + if ((skew_multiple == 1.0) && (bias_multiple <= 1.0)) { + *result_multiple = 1; + list_free_ext(exprMultipleList); + return list_make1(expr); + } else if ((u_sess->pgxc_cxt.NumDataNodes == skew_multiple) && + (u_sess->pgxc_cxt.NumDataNodes == + bias_multiple)) { /* All the expr are const, return the first expr. */ + if (distkey == NULL) + distkey = lappend(distkey, expr); + *result_multiple = u_sess->pgxc_cxt.NumDataNodes; + + continue; + } else { + if (skew_multiple == 1.0) { + /* + * If distinct num of multiple has no skew, we should use bias multiple to + * compute mix multiple. + */ + multiple = bias_multiple; + } + else if (bias_multiple <= 1.0) /* mcf has no skew, handle skew_multiple */ + multiple = skew_multiple; + else + multiple = Max(bias_multiple, skew_multiple); + + exprMultipleList = add_multiple_to_list(expr, multiple, exprMultipleList); + } + } + } + + if (exprMultipleList != NULL) { + distkey = get_mix_diskey_by_exprlist(root, exprMultipleList, rows, result_multiple, (AggSkewInfo*)skew_info); + list_free_ext(exprMultipleList); + } + + return distkey; +} +#endif diff --git a/contrib/dolphin/plugin_optimizer/util/clauses.cpp b/contrib/dolphin/plugin_optimizer/util/clauses.cpp index a3cf485ca..5cae44e73 100644 --- a/contrib/dolphin/plugin_optimizer/util/clauses.cpp +++ b/contrib/dolphin/plugin_optimizer/util/clauses.cpp @@ -5468,6 +5468,113 @@ List *get_quals_lists(Node *jtnode) return quallist; } +#ifdef USE_SPQ + +/* + * fold_constants + * + * Recurses into query tree and folds all constant expressions. + */ +Query *fold_constants(PlannerInfo *root, Query *q, ParamListInfo boundParams, Size max_size) +{ + eval_const_expressions_context context; + + context.root = root; + context.boundParams = boundParams; + context.active_fns = NIL; /* nothing being recursively simplified */ + context.case_val = NULL; /* no CASE being examined */ + context.estimate = false; /* safe transformations only */ + context.recurse_queries = true; /* recurse into query structures */ + context.recurse_sublink_testexpr = false; /* do not recurse into sublink test expressions */ + + context.max_size = max_size; + + return (Query *) query_or_expression_tree_mutator( + (Node *) q, + (Node* (*)(Node*, void*)) eval_const_expressions_mutator, + &context,0); +} + +/* + * flatten_join_alias_var_optimizer + * Replace Vars that reference JOIN outputs with references to the original + * relation variables instead. + */ +Query * flatten_join_alias_var_optimizer(Query *query, int queryLevel) +{ + Query *queryNew = (Query *) copyObject(query); + + /* + * Flatten join alias for expression in + * 1. targetlist + * 2. returningList + * 3. having qual + * 4. scatterClause + * 5. limit offset + * 6. limit count + * + * We flatten the above expressions since these entries may be moved during the query + * normalization step before algebrization. In contrast, the planner flattens alias + * inside quals to allow predicates involving such vars to be pushed down. + * + * Here we ignore the flattening of quals due to the following reasons: + * 1. we assume that the function will be called before Query->DXL translation: + * 2. the quals never gets moved from old query to the new top-level query in the + * query normalization phase before algebrization. In other words, the quals hang of + * the same query structure that is now the new derived table. + * 3. the algebrizer can resolve the abiquity of join aliases in quals since we maintain + * all combinations of to DXL-ColId during Query->DXL translation. + * + */ + + return queryNew; +} + +Expr *transform_array_Const_to_ArrayExpr(Const *c) +{ + Oid elemtype; + int16 elemlen; + bool elembyval; + char elemalign; + int nelems; + Datum *elems; + bool *nulls; + ArrayType *ac; + ArrayExpr *aexpr; + int i; + + Assert(IsA(c, Const)); + + /* Does it look like the right kind of an array Const? */ + if (c->constisnull) + return (Expr *)c; /* NULL const */ + + elemtype = get_element_type(c->consttype); + if (elemtype == InvalidOid) + return (Expr *)c; /* not an array */ + + ac = DatumGetArrayTypeP(c->constvalue); + nelems = ArrayGetNItems(ARR_NDIM(ac), ARR_DIMS(ac)); + + /* All set, extract the elements, and an ArrayExpr to hold them. */ + get_typlenbyvalalign(elemtype, &elemlen, &elembyval, &elemalign); + deconstruct_array(ac, elemtype, elemlen, elembyval, elemalign, &elems, &nulls, &nelems); + + aexpr = makeNode(ArrayExpr); + aexpr->array_typeid = c->consttype; + aexpr->element_typeid = elemtype; + aexpr->multidims = false; + aexpr->location = c->location; + + for (i = 0; i < nelems; i++) { + aexpr->elements = + lappend(aexpr->elements, makeConst(elemtype, -1, c->constcollid, elemlen, elems[i], nulls[i], elembyval)); + } + + return (Expr *)aexpr; +} +#endif + #ifdef DOLPHIN List* dolphin_add_function_defaults(List* args, HeapTuple func_tuple) { diff --git a/contrib/dolphin/plugin_optimizer/util/plancat.cpp b/contrib/dolphin/plugin_optimizer/util/plancat.cpp index 0d9f144c4..8c3ce4c1f 100644 --- a/contrib/dolphin/plugin_optimizer/util/plancat.cpp +++ b/contrib/dolphin/plugin_optimizer/util/plancat.cpp @@ -61,6 +61,9 @@ #ifdef PGXC #include "pgxc/pgxc.h" #endif +#ifdef USE_SPQ +#include "catalog/pg_inherits_fn.h" +#endif #define ESTIMATE_PARTITION_NUMBER 10 #define ESTIMATE_PARTITION_NUMBER_THRESHOLD 5 @@ -2084,3 +2087,41 @@ PlannerInfo *get_cte_root(PlannerInfo *root, int levelsup, char *ctename) } return cteroot; } + +#ifdef USE_SPQ +double spq_estimate_partitioned_numtuples(Relation rel) +{ + List *inheritors; + ListCell *lc; + double totaltuples; + + if (rel->rd_rel->reltuples > 0) + return rel->rd_rel->reltuples; + + inheritors = find_all_inheritors(RelationGetRelid(rel), AccessShareLock, NULL); + totaltuples = 0; + foreach (lc, inheritors) { + Oid childid = lfirst_oid(lc); + Relation childrel; + double childtuples; + + if (childid != RelationGetRelid(rel)) + childrel = try_table_open(childid, NoLock); + else + childrel = rel; + + childtuples = childrel->rd_rel->reltuples; + + if (childtuples == 0 && rel_is_external_table(RelationGetRelid(childrel))) { +#define DEFAULT_EXTERNAL_TABLE_TUPLES 1000000 + childtuples = DEFAULT_EXTERNAL_TABLE_TUPLES; + } + totaltuples += childtuples; + + if (childrel != rel) + heap_close(childrel, NoLock); + } + return totaltuples; +} + +#endif diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index b39e3b108..bb8b8aff9 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -3175,6 +3175,9 @@ static Query* transformSelectStmt(ParseState* pstate, SelectStmt* stmt, bool isF ListCell* l = NULL; qry->commandType = CMD_SELECT; +#ifdef USE_SPQ + qry->is_support_spq = true; +#endif if (stmt->startWithClause != NULL) { pstate->p_addStartInfo = true; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index ad83cd793..a647d7ea9 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -22904,43 +22904,47 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = true; $$ = (Node *)n; } - | ALTER TABLE relation_expr RENAME DolphinColId + | ALTER TABLE relation_expr RENAME dolphin_qualified_name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_TABLE; n->relation = $3; n->subname = NULL; - n->newname = GetDolphinObjName($5->str, $5->is_quoted); + n->newname = $5->relname; + n->newschema = $5->schemaname; n->missing_ok = false; $$ = (Node *)n; } - | ALTER TABLE relation_expr RENAME to_or_as DolphinColId + | ALTER TABLE relation_expr RENAME to_or_as dolphin_qualified_name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_TABLE; n->relation = $3; n->subname = NULL; - n->newname = GetDolphinObjName($6->str, $6->is_quoted); + n->newname = $6->relname; + n->newschema = $6->schemaname; n->missing_ok = false; $$ = (Node *)n; } - | ALTER TABLE IF_P EXISTS relation_expr RENAME DolphinColId + | ALTER TABLE IF_P EXISTS relation_expr RENAME dolphin_qualified_name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_TABLE; n->relation = $5; n->subname = NULL; - n->newname = GetDolphinObjName($7->str, $7->is_quoted); + n->newname = $7->relname; + n->newschema = $7->schemaname; n->missing_ok = true; $$ = (Node *)n; } - | ALTER TABLE IF_P EXISTS relation_expr RENAME to_or_as DolphinColId + | ALTER TABLE IF_P EXISTS relation_expr RENAME to_or_as dolphin_qualified_name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_TABLE; n->relation = $5; n->subname = NULL; - n->newname = GetDolphinObjName($8->str, $8->is_quoted); + n->newname = $8->relname; + n->newschema = $8->schemaname; n->missing_ok = true; $$ = (Node *)n; } diff --git a/contrib/dolphin/plugin_parser/parse_target.cpp b/contrib/dolphin/plugin_parser/parse_target.cpp index 6291223a2..fe2f6dcb0 100644 --- a/contrib/dolphin/plugin_parser/parse_target.cpp +++ b/contrib/dolphin/plugin_parser/parse_target.cpp @@ -1714,6 +1714,9 @@ static int FigureColnameInternal(Node* node, char** name) case ANY_SUBLINK: case ROWCOMPARE_SUBLINK: case CTE_SUBLINK: +#ifdef USE_SPQ + case NOT_EXISTS_SUBLINK: +#endif break; } break; diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y index 5734fc060..eb8ee42fb 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y +++ b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y @@ -9872,6 +9872,7 @@ make_execsql_stmt(int firsttoken, int location) bool insert_array_record = false; int values_end_loc = -1; int before_semi_loc = -1; + const char* err_msg = "The label name can only contain letters, digits and underscores"; PLpgSQL_row* row_data = NULL; PLpgSQL_rec* rec_data = NULL; PLpgSQL_var* array_data = NULL; @@ -9983,22 +9984,50 @@ make_execsql_stmt(int firsttoken, int location) { char* name = NULL; errno_t rc = 0; + int num = -1; int len = Min(NAMEDATALEN, lb.len - count + 1); name = (char*)palloc(len); rc = strncpy_s(name, len, lb.data, len - 1); securec_check_c(rc, "\0", "\0"); + num = strspn(pg_strtolower(name), "abcdefghijklmnopqrstuvwxyz0123456789_"); + + if(num != len - 1 || (name[0] >= '0' && name[0] <= '9')) { + pfree(name); + pfree_ext(lb.data); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg(err_msg), + parser_errposition(location + num))); + } plpgsql_ns_additem(PLPGSQL_NSTYPE_LABEL, 0, pg_strtolower(name)); pfree(name); } - else - { + else { + int valid_len = lb.len; if(lb.len >= NAMEDATALEN) { lb.data[NAMEDATALEN - 1] = '\0'; + valid_len = NAMEDATALEN - 1; } + int len = -1; + len = strspn(pg_strtolower(lb.data), "abcdefghijklmnopqrstuvwxyz0123456789_"); + if(len != valid_len) { + pfree_ext(lb.data); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg(err_msg), + parser_errposition(location + len))); + } + if(lb.data[0] >= '0' && lb.data[0] <= '9') { + pfree_ext(lb.data); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg(err_msg), + parser_errposition(location))); + } plpgsql_ns_additem(PLPGSQL_NSTYPE_LABEL, 0, pg_strtolower(lb.data)); } pfree_ext(lb.data); @@ -10042,22 +10071,14 @@ make_execsql_stmt(int firsttoken, int location) securec_check_c(rc, "\0", "\0"); len = strspn(pg_strtolower(name), "abcdefghijklmnopqrstuvwxyz0123456789_"); - if(len != lb.len - count) { + if(len != lb.len - count || (name[0] >= '0' && name[0] <= '9')) { pfree(name); pfree_ext(lb.data); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), + errmsg(err_msg), parser_errposition(location + len))); } - if(name[0] >= '0' && name[0] <= '9') { - pfree(name); - pfree_ext(lb.data); - ereport(errstate, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), - parser_errposition(location))); - } if(lb.len-count >= NAMEDATALEN) { @@ -10082,14 +10103,14 @@ make_execsql_stmt(int firsttoken, int location) pfree_ext(lb.data); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), + errmsg(err_msg), parser_errposition(location + len))); } if(lb.data[0] >= '0' && lb.data[0] <= '9') { pfree_ext(lb.data); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), + errmsg(err_msg), parser_errposition(location))); } if(lb.len >= NAMEDATALEN) @@ -13193,11 +13214,20 @@ static void read_signal_sqlstate(PLpgSQL_stmt_signal *newp, int tok) yyerror("unexpected end of function definition"); } - if (strcmp(yylval.str, "value") == 0) { - if (yylex() != SCONST) { - yyerror("syntax error, the expected value is a string."); + if (tok != SCONST && tok != T_WORD) { + yyerror("syntax error, the expected value is a string."); + } + + if (tok == T_WORD) { + if (strcmp(yylval.str, "value") == 0) { + if (yylex() != SCONST) { + yyerror("syntax error, the expected value is a string."); + } + } else { + yyerror("syntax error, the expected word is value."); } } + sqlstate_value = yylval.str; if (strlen(sqlstate_value) != 5 || diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index e5696661c..f08bb0341 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -277,7 +277,6 @@ static char* get_str_from_var_sci(NumericVar* var, int rscale); static void apply_typmod(NumericVar* var, int32 typmod); static int32 numericvar_to_int32(const NumericVar* var, bool can_ignore = false); -static double numeric_to_double_no_overflow(Numeric num); static double numericvar_to_double_no_overflow(NumericVar* var); static Datum numeric_abbrev_convert(Datum original_datum, SortSupport ssup); @@ -5732,7 +5731,7 @@ void int64_to_numericvar(int64 val, NumericVar* var) /* * Convert numeric to float8; if out of range, return +/- HUGE_VAL */ -static double numeric_to_double_no_overflow(Numeric num) +double numeric_to_double_no_overflow(Numeric num) { char* tmp = NULL; double val; diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index f4651f824..a8865fae6 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -5353,6 +5353,10 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) */ if (IsA(ps, AppendState)) dpns->outer_planstate = ((AppendState*)ps)->appendplans[0]; +#ifdef USE_SPQ + else if (IsA(ps, SequenceState)) + dpns->outer_planstate = ((SequenceState *) ps)->subplans[1]; +#endif else if (IsA(ps, VecAppendState)) dpns->outer_planstate = ((VecAppendState*)ps)->appendplans[0]; else if (IsA(ps, MergeAppendState)) @@ -5380,6 +5384,10 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) */ if (IsA(ps, SubqueryScanState)) dpns->inner_planstate = ((SubqueryScanState*)ps)->subplan; +#ifdef USE_SPQ + else if (IsA(ps, SequenceState)) + dpns->inner_planstate = ((SequenceState *) ps)->subplans[0]; +#endif else if (IsA(ps, VecSubqueryScanState)) dpns->inner_planstate = ((VecSubqueryScanState*)ps)->subplan; else if (IsA(ps, CteScanState)) diff --git a/contrib/dolphin/plugin_utils/adt/selfuncs.cpp b/contrib/dolphin/plugin_utils/adt/selfuncs.cpp index b4d375f46..b38946e18 100644 --- a/contrib/dolphin/plugin_utils/adt/selfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/selfuncs.cpp @@ -183,7 +183,6 @@ static void convert_bytea_to_scalar( static double convert_one_string_to_scalar(const char* value, int rangelo, int rangehi); static double convert_one_bytea_to_scalar(unsigned char* value, int valuelen, int rangelo, int rangehi); static char* convert_string_datum(Datum value, Oid typid); -static double convert_timevalue_to_scalar(Datum value, Oid typid); static void examine_simple_variable(PlannerInfo* root, Var* var, VariableStatData* vardata); static bool get_variable_range(PlannerInfo* root, VariableStatData* vardata, Oid sortop, Datum* min, Datum* max); static bool get_actual_variable_range(PlannerInfo* root, VariableStatData* vardata, Oid sortop, Datum* min, Datum* max); @@ -4375,7 +4374,7 @@ static double convert_one_bytea_to_scalar(unsigned char* value, int valuelen, in /* * Do convert_to_scalar()'s work for any timevalue data type. */ -static double convert_timevalue_to_scalar(Datum value, Oid typid) +double convert_timevalue_to_scalar(Datum value, Oid typid) { switch (typid) { case TIMESTAMPOID: @@ -5744,7 +5743,6 @@ static Pattern_Prefix_Status like_fixed_prefix( *prefix_const = string_to_const(match, typeId); else *prefix_const = string_to_bytea_const(match, match_pos); - (*prefix_const)->constcollid = patt_const->constcollid; if (rest_selec != NULL) *rest_selec = like_selectivity(&patt[pos], pattlen - pos, case_insensitive); diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index c31c2fd5c..06cc8727d 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -8839,7 +8839,7 @@ bool isNumeric(const char* str) return true; } -static double numeric_to_double_no_overflow(Numeric num) +static double numeric_to_double_no_overflow_dolphin(Numeric num) { char* endptr = NULL; char* tmp = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(num))); @@ -8965,11 +8965,11 @@ static text* _m_char(FunctionCallInfo fcinfo) str = char_deal(str, quotient, remainder, remainders, times); break; case NUMERICOID: - ret_round = (int128)round(numeric_to_double_no_overflow((Numeric)PG_GETARG_DATUM(i))); + ret_round = (int128)round(numeric_to_double_no_overflow_dolphin((Numeric)PG_GETARG_DATUM(i))); if ((*((int128 *)DatumGetPointer((Datum)(&ret_round)))) >= PG_UINT64_MAX) { appendStringInfoString(&str, MAX_UINT32_STR); } else { - quotient = (uint32)(round(numeric_to_double_no_overflow((Numeric)value))); + quotient = (uint32)(round(numeric_to_double_no_overflow_dolphin((Numeric)value))); str = char_deal(str, quotient, remainder, remainders, times); } break; diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 2f1e83809..e864bb227 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -827,6 +827,8 @@ static int128 EvaluateAutoIncrement(Relation rel, TupleDesc desc, AttrNumber att static void SetRelAutoIncrement(Relation rel, TupleDesc desc, int128 autoinc); static Node* RecookAutoincAttrDefault(Relation rel, int attrno, Oid targettype, int targettypmod); static void check_unsupported_charset_for_column(Oid collation, const char* col_name); +static void AlterTableNamespaceDependentProcess(Relation classRel ,Relation rel, Oid oldNspOid, + Oid nspOid, ObjectAddresses* objsMoved, char* newrelname); #ifdef DOLPHIN static List* ATGetNonUniqueKeyList(Relation rel); @@ -6723,7 +6725,7 @@ ObjectAddress RenameRelation(RenameStmt* stmt) #endif /* Do the work */ - RenameRelationInternal(relid, stmt->newname); + RenameRelationInternal(relid, stmt->newname, stmt->newschema); /* * Record the changecsn of the table that defines the index */ @@ -6748,13 +6750,22 @@ ObjectAddress RenameRelation(RenameStmt* stmt) * the sequence name should probably be removed from the * sequence, AFAIK there's no need for it to be there. */ -void RenameRelationInternal(Oid myrelid, const char* newrelname) +void RenameRelationInternal(Oid myrelid, const char* newrelname, char* newschema) { Relation targetrelation; Relation relrelation; /* for RELATION relation */ HeapTuple reltup; Form_pg_class relform; Oid namespaceId; + Oid oldNspOid = InvalidOid; + bool needChangeNsp = false; + ObjectAddresses* objsMoved = NULL; + ObjectAddress thisobj; + bool is_present = false; + + thisobj.classId = RelationRelationId; + thisobj.objectId = myrelid; + thisobj.objectSubId = 0; /* * Grab an exclusive lock on the target table, index, sequence or view, @@ -6762,6 +6773,26 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) */ targetrelation = relation_open(myrelid, AccessExclusiveLock); + if (newschema != NULL) { + if (targetrelation->rd_mlogoid != InvalidOid) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Un-support feature"), + errdetail("table owning matview doesn't support this ALTER yet.")))); + } + + if (targetrelation->rd_rel->relkind == RELKIND_MATVIEW) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER MATERIALIZED VIEW is not yet supported."))); + } + + /* Permission check */ + if (!pg_class_ownercheck(RelationGetRelid(targetrelation), GetUserId())) { + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(targetrelation)); + } + } + if (RelationIsSubPartitioned(targetrelation)) { ereport( ERROR, @@ -6793,6 +6824,24 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) relform = (Form_pg_class)GETSTRUCT(reltup); + + oldNspOid = namespaceId; + if (newschema != NULL) { + /* Get and lock schema OID and check its permissions. */ + RangeVar* newrv = makeRangeVar(newschema, (char*)newrelname, -1); + Oid newNspOid = RangeVarGetAndCheckCreationNamespace(newrv, NoLock, NULL, '\0'); + + needChangeNsp = (newNspOid != namespaceId); + if (needChangeNsp) { + /* common checks on switching namespaces */ + CheckSetNamespace(namespaceId, newNspOid, RelationRelationId, myrelid); + ledger_check_switch_schema(namespaceId, newNspOid); + objsMoved = new_object_addresses(); + namespaceId = newNspOid; + is_present = object_address_present(&thisobj, objsMoved); + } + } + /* * Check relation name to ensure that it doesn't conflict with existing synonym. */ @@ -6802,8 +6851,17 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) get_namespace_name(namespaceId)))); } - if (get_relname_relid(newrelname, namespaceId) != InvalidOid) - ereport(ERROR, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists", newrelname))); + if (get_relname_relid(newrelname, namespaceId) != InvalidOid) { + if (newschema != NULL) { + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_TABLE), + errmsg("relation \"%s\" already exists in schema \"%s\"", + newrelname, + newschema))); + } else { + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists", newrelname))); + } + } #ifdef ENABLE_MULTIPLE_NODES if (RelationIsTsStore(targetrelation)) { @@ -6830,6 +6888,9 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) */ (void)namestrcpy(&(relform->relname), newrelname); + /* Update pg_class tuple with new nsp. */ + relform->relnamespace = namespaceId; + simple_heap_update(relrelation, &reltup->t_self, reltup); /* keep the system catalog indexes current */ @@ -6846,14 +6907,33 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) renamePartitionedTable(myrelid, newrelname); } + if (needChangeNsp && !is_present) { + if (changeDependencyFor(RelationRelationId, myrelid, NamespaceRelationId, oldNspOid, namespaceId) != 1) { + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("failed to change schema dependency for relation \"%s\"", NameStr(relform->relname)))); + } + + add_exact_object_address(&thisobj, objsMoved); + } + tableam_tops_free_tuple(reltup); heap_close(relrelation, RowExclusiveLock); - /* - * Also rename the associated type, if any. - */ - if (OidIsValid(targetrelation->rd_rel->reltype)) - RenameTypeInternal(targetrelation->rd_rel->reltype, newrelname, namespaceId); + if (needChangeNsp && !is_present) { + AlterTableNamespaceDependentProcess(relrelation, targetrelation, oldNspOid, namespaceId, objsMoved, + (char*)newrelname); + if (targetrelation->rd_isblockchain) { + rename_hist_by_newnsp(myrelid, newschema); + } + free_object_addresses(objsMoved); + } else { + /* + * Also rename the associated type, if any. + */ + if (OidIsValid(targetrelation->rd_rel->reltype)) + RenameTypeInternal(targetrelation->rd_rel->reltype, newrelname, oldNspOid); + } /* * Also rename the associated constraint, if any. @@ -21884,8 +21964,16 @@ void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, Object AlterRelationNamespaceInternal(classRel, RelationGetRelid(rel), oldNspOid, nspOid, true, objsMoved); + AlterTableNamespaceDependentProcess(classRel, rel, oldNspOid, nspOid, objsMoved, NULL); + + heap_close(classRel, RowExclusiveLock); +} + +static void AlterTableNamespaceDependentProcess(Relation classRel ,Relation rel, Oid oldNspOid, + Oid nspOid, ObjectAddresses* objsMoved, char* newrelname) +{ /* Fix the table's row type too */ - (void)AlterTypeNamespaceInternal(rel->rd_rel->reltype, nspOid, false, false, objsMoved); + (void)AlterTypeNamespaceInternal(rel->rd_rel->reltype, nspOid, false, false, objsMoved, newrelname); /* Change the table's set type too */ TupleDesc tupDesc = rel->rd_att; @@ -21902,8 +21990,6 @@ void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, Object AlterSeqNamespaces(classRel, rel, oldNspOid, nspOid, objsMoved, AccessExclusiveLock); AlterConstraintNamespaces(RelationGetRelid(rel), oldNspOid, nspOid, false, objsMoved); } - - heap_close(classRel, RowExclusiveLock); } /* @@ -28974,6 +29060,21 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) ATUnusableGlobalIndex(partTableRel); } } +#ifdef USE_SPQ +void spq_btbuild_update_pg_class(Relation heap, Relation index) +{ + List *options = NIL; + DefElem *opt; + opt = makeNode(DefElem); + opt->type = T_DefElem; + opt->defnamespace = NULL; + opt->defname = "spq_build"; + opt->defaction = DEFELEM_SET; + opt->arg = (Node *)makeString("finish"); + options = lappend(options, opt); + ATExecSetRelOptions(index, options, AT_SetRelOptions, ShareUpdateExclusiveLock); +} +#endif void CheckSrcListSubPartitionForSplit(Relation rel, Oid partOid, Oid subPartOid) { -- Gitee From 6376768ab525dfe200126ede5569e501a5bfa2f3 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Wed, 18 Oct 2023 11:03:18 +0800 Subject: [PATCH 019/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5=E5=86=85=E6=A0=B8?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=E5=88=B0whale=E4=B8=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/whale/Makefile | 6 +- contrib/whale/plugin_optimizer/Makefile | 2 +- .../plugin_optimizer/commands/CMakeLists.txt | 32 - .../whale/plugin_optimizer/commands/Makefile | 23 - .../plugin_optimizer/commands/tablecmds.cpp | 32581 ---------------- contrib/whale/plugin_parser/gram.y | 58 +- contrib/whale/plugin_utils/adt/numeric.cpp | 44 +- contrib/whale/plugin_utils/adt/selfuncs.cpp | 5 +- contrib/whale/tablecmds.cpp | 186 +- 9 files changed, 262 insertions(+), 32675 deletions(-) delete mode 100644 contrib/whale/plugin_optimizer/commands/CMakeLists.txt delete mode 100644 contrib/whale/plugin_optimizer/commands/Makefile delete mode 100644 contrib/whale/plugin_optimizer/commands/tablecmds.cpp diff --git a/contrib/whale/Makefile b/contrib/whale/Makefile index e53a9b3f1..be9fecc5b 100644 --- a/contrib/whale/Makefile +++ b/contrib/whale/Makefile @@ -2,7 +2,6 @@ parser = plugin_parser utils = plugin_utils executor = plugin_executor -commands = plugin_optimizer/commands plan = plugin_optimizer/plan pl = plugin_pl/plpgsql/src adt = $(utils)/adt @@ -57,7 +56,7 @@ $(orafce)/utility.o $(orafce)/random.o $(orafce)/datefce.o $(orafce)/convert.o $ $(orafce)/aggregate.o $(orafce)/charlen.o $(orafce)/charpad.o $(orafce)/plvstr.o $(orafce)/replace_empty_string.o \ $(orafce)/varchar2.o $(orafce)/file.o $(orafce)/plvdate.o $(orafce)/plvsubst.o $(orafce)/plunit.o -all $(OBJS): write_git_commit parser utils executor plan commands storage pl orafce; +all $(OBJS): write_git_commit parser utils executor plan storage pl orafce; parser: make -C $(parser) @@ -74,9 +73,6 @@ storage: plan: make -C $(plan) -commands: - make -C $(commands) - pl: make -C $(pl) diff --git a/contrib/whale/plugin_optimizer/Makefile b/contrib/whale/plugin_optimizer/Makefile index 591134cd6..09a2bad1a 100644 --- a/contrib/whale/plugin_optimizer/Makefile +++ b/contrib/whale/plugin_optimizer/Makefile @@ -11,6 +11,6 @@ base_dir = contrib/whale include $(top_builddir)/src/Makefile.global include $(top_builddir)/$(base_dir)/configure.mk -SUBDIRS = commands plan +SUBDIRS = plan include $(top_srcdir)/src/gausskernel/common.mk diff --git a/contrib/whale/plugin_optimizer/commands/CMakeLists.txt b/contrib/whale/plugin_optimizer/commands/CMakeLists.txt deleted file mode 100644 index 4f9360c7f..000000000 --- a/contrib/whale/plugin_optimizer/commands/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -#This is the main CMAKE for build all components. -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_commands_SRC) - -if("${ENABLE_LITE_MODE}" STREQUAL "ON") - list(REMOVE_ITEM TGT_commands_SRC ${CMAKE_CURRENT_SOURCE_DIR}/obs_stream.cpp) -endif() - -set(CMAKE_MODULE_PATH - ${CMAKE_CURRENT_SOURCE_DIR}/sequence -) - -set(TGT_commands_INC - ${PROJECT_OPENGS_DIR}/contrib/log_fdw - ${PROJECT_SRC_DIR}/include/libcomm - ${PROJECT_SRC_DIR}/include - ${PROJECT_SRC_DIR}/lib/gstrace - ${LZ4_INCLUDE_PATH} - ${LIBCGROUP_INCLUDE_PATH} - ${LIBORC_INCLUDE_PATH} - ${EVENT_INCLUDE_PATH} - ${PROTOBUF_INCLUDE_PATH} - ${ZLIB_INCLUDE_PATH} - ${LIBOPENSSL_INCLUDE_PATH} - ${LIBCURL_INCLUDE_PATH} -) - -add_subdirectory(sequence) - -set(commands_DEF_OPTIONS ${MACRO_OPTIONS} -DOBS_SERVER) -set(commands_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) -set(commands_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -add_static_objtarget(gausskernel_optimizer_commands TGT_commands_SRC TGT_commands_INC "${commands_DEF_OPTIONS}" "${commands_COMPILE_OPTIONS}" "${commands_LINK_OPTIONS}") diff --git a/contrib/whale/plugin_optimizer/commands/Makefile b/contrib/whale/plugin_optimizer/commands/Makefile deleted file mode 100644 index ea1ba3fae..000000000 --- a/contrib/whale/plugin_optimizer/commands/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -#--------------------------------------------------------------------------------------- -# -# IDENTIFICATION -# src/gausskernel/optimizer/commands/Makefile -# -# --------------------------------------------------------------------------------------- - -subdir = contrib/whale/plugin_optimizer/commands -top_builddir = ../../../.. -base_dir = contrib/whale -include $(top_builddir)/src/Makefile.global -include $(top_builddir)/$(base_dir)/configure.mk -ifneq "$(MAKECMDGOALS)" "clean" - ifneq "$(MAKECMDGOALS)" "distclean" - ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" - -include $(DEPEND) - endif - endif -endif - -OBJS = tablecmds.o - -include $(top_srcdir)/src/gausskernel/common.mk diff --git a/contrib/whale/plugin_optimizer/commands/tablecmds.cpp b/contrib/whale/plugin_optimizer/commands/tablecmds.cpp deleted file mode 100644 index 0be71e9f8..000000000 --- a/contrib/whale/plugin_optimizer/commands/tablecmds.cpp +++ /dev/null @@ -1,32581 +0,0 @@ -/* ------------------------------------------------------------------------- - * - * tablecmds.cpp - * Commands for creating and altering table structures and settings - * - * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * - * - * IDENTIFICATION - * src/gausskernel/optimizer/commands/tablecmds.cpp - * - * ------------------------------------------------------------------------- - */ -#include "postgres.h" -#include "knl/knl_variable.h" - -#include "access/cstore_delete.h" -#include "access/cstore_delta.h" -#include "access/cstore_insert.h" -#include "access/cstore_rewrite.h" -#include "access/genam.h" -#include "access/heapam.h" -#include "access/reloptions.h" -#include "access/relscan.h" -#include "access/sysattr.h" -#include "access/transam.h" -#include "access/tuptoaster.h" -#include "access/visibilitymap.h" -#include "access/xact.h" -#include "access/xlog.h" -#include "access/tableam.h" -#include "access/ustore/knl_uheap.h" -#include "access/ustore/knl_uscan.h" -#include "access/multixact.h" -#include "catalog/catalog.h" -#include "catalog/dependency.h" -#include "catalog/heap.h" -#include "catalog/index.h" -#include "catalog/indexing.h" -#include "catalog/namespace.h" -#include "catalog/objectaccess.h" -#include "catalog/pg_attrdef.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_description.h" -#include "catalog/pg_foreign_table.h" -#include "catalog/pg_inherits.h" -#include "catalog/pg_inherits_fn.h" -#include "catalog/pg_job.h" -#include "catalog/pg_job_proc.h" -#include "catalog/pg_namespace.h" -#include "catalog/pg_object.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_partition.h" -#include "catalog/pg_partition_fn.h" -#include "catalog/pg_hashbucket.h" -#include "catalog/pg_hashbucket_fn.h" -#include "catalog/pg_rewrite.h" -#include "catalog/pg_synonym.h" -#include "catalog/pg_tablespace.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_type.h" -#include "catalog/pg_type_fn.h" -#include "catalog/pg_uid_fn.h" -#include "catalog/pg_rlspolicy.h" -#include "catalog/storage.h" -#include "catalog/storage_xlog.h" -#include "catalog/toasting.h" -#include "catalog/cstore_ctlg.h" -#include "catalog/storage_gtt.h" -#include "catalog/gs_encrypted_columns.h" -#include "catalog/gs_global_config.h" -#include "catalog/gs_matview.h" -#include "catalog/gs_db_privilege.h" -#include "catalog/namespace.h" -#include "commands/cluster.h" -#include "commands/comment.h" -#include "commands/dbcommands.h" -#include "commands/event_trigger.h" -#include "commands/defrem.h" -#include "commands/dbcommands.h" -#include "commands/sec_rls_cmds.h" -#include "commands/sequence.h" -#include "commands/tablecmds.h" -#include "commands/tablespace.h" -#include "commands/trigger.h" -#include "commands/typecmds.h" -#include "commands/vacuum.h" -#include "commands/verify.h" -#include "commands/matview.h" -#include "commands/view.h" -#include "commands/view.h" -#include "executor/executor.h" -#include "executor/node/nodeModifyTable.h" -#include "foreign/fdwapi.h" -#include "foreign/foreign.h" -#include "gssignal/gs_signal.h" -#include "gs_policy/gs_policy_masking.h" -#include "gtm/gtm_client.h" -#include "miscadmin.h" -#include "tde_key_management/tde_key_manager.h" -#include "tde_key_management/tde_key_storage.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" -#include "optimizer/clauses.h" -#include "optimizer/planner.h" -#include "optimizer/var.h" -#include "plugin_parser/parse_clause.h" -#include "plugin_parser/parse_coerce.h" -#include "plugin_parser/parse_collate.h" -#include "plugin_parser/parse_expr.h" -#include "plugin_parser/parse_oper.h" -#include "plugin_parser/parse_relation.h" -#include "plugin_parser/parse_type.h" -#include "plugin_parser/parse_utilcmd.h" -#include "plugin_parser/parser.h" -#include "pgxc/route.h" -#include "rewrite/rewriteDefine.h" -#include "rewrite/rewriteHandler.h" -#include "rewrite/rewriteManip.h" -#include "rewrite/rewriteRlsPolicy.h" -#include "rewrite/rewriteSupport.h" -#include "replication/slot.h" -#include "storage/buf/bufmgr.h" -#include "storage/freespace.h" -#include "storage/lmgr.h" -#include "storage/lock/lock.h" -#include "storage/page_compression.h" -#include "storage/predicate.h" -#include "storage/remote_read.h" -#include "storage/smgr/segment.h" -#include "storage/smgr/smgr.h" -#include "storage/tcap.h" -#include "streaming/streaming_catalog.h" -#include "tcop/utility.h" -#include "utils/acl.h" -#include "utils/aiomem.h" -#include "utils/builtins.h" -#include "utils/extended_statistics.h" -#include "utils/fmgroids.h" -#include "utils/int8.h" -#include "utils/int16.h" -#include "utils/inval.h" -#include "utils/guc.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/partcache.h" -#include "utils/partitionmap.h" -#include "utils/partitionlocate.h" -#include "utils/partitionmap_gs.h" -#include "utils/partitionkey.h" -#include "utils/relcache.h" -#include "utils/sec_rls_utils.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" -#include "access/heapam.h" -#include "utils/typcache.h" -#include "utils/numeric.h" -#include "plugin_utils/timestamp.h" -#include "catalog/pg_database.h" -#include "catalog/pg_authid.h" -#include "catalog/pg_auth_members.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_job.h" -#include "catalog/pg_user_status.h" -#include "gaussdb_version.h" -#include "workload/workload.h" -#include "utils/builtins.h" -#include "fmgr.h" -#include "pgstat.h" -#include "postmaster/rbcleaner.h" -#include "catalog/gs_collation.h" -#ifdef ENABLE_MULTIPLE_NODES -#include "tsdb/utils/ts_relcache.h" -#include "tsdb/common/ts_tablecmds.h" -#include "tsdb/common/delta_ctlg.h" -#include "tsdb/utils/ctlg_utils.h" -#include "tsdb/utils/delta_utils.h" -#include "tsdb/utils/ctlg_utils.h" -#include "tsdb/cache/tags_cachemgr.h" -#include "tsdb/storage/delta_merge.h" -#include "tsdb/optimizer/policy.h" -#endif /* ENABLE_MULTIPLE_NODES */ - -#ifdef PGXC -#include "pgxc/pgxc.h" -#include "pgxc/locator.h" -#include "access/gtm.h" -#include "catalog/pgxc_class.h" -#include "catalog/pgxc_slice.h" -#include "catalog/pgxc_node.h" -#include "commands/sequence.h" -#include "optimizer/pgxcship.h" -#include "pgxc/execRemote.h" -#include "pgxc/redistrib.h" -#include "pgxc/groupmgr.h" -#endif -#include "c.h" -#include "instruments/generate_report.h" -#include "gs_ledger/ledger_utils.h" -#include "gs_ledger/userchain.h" - -#include "client_logic/client_logic.h" -#include "client_logic/cache.h" -#include "pgxc/redistrib.h" - -#ifndef ENABLE_MULTIPLE_NODES -#include "utils/pl_package.h" -#endif - - -extern void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_table_age, TransactionId* oldestXmin, - TransactionId* freezeLimit, TransactionId* freezeTableLimit, MultiXactId* multiXactFrzLimit); - -/* - * ON COMMIT action list - */ -typedef struct OnCommitItem { - Oid relid; /* relid of relation */ - OnCommitAction oncommit; /* what to do at end of xact */ - - /* - * If this entry was created during the current transaction, - * creating_subid is the ID of the creating subxact; if created in a prior - * transaction, creating_subid is zero. If deleted during the current - * transaction, deleting_subid is the ID of the deleting subxact; if no - * deletion request is pending, deleting_subid is zero. - */ - SubTransactionId creating_subid; - SubTransactionId deleting_subid; -} OnCommitItem; - -static const char* ORCSupportOption[] = {"orientation", "compression", "version", "partial_cluster_rows"}; - -typedef struct ViewInfoForAdd { - Oid ev_class; - char *query_string; -} ViewInfoForAdd; - - - -/* Struct describing one new constraint to check in Phase 3 scan */ -/* Note: new NOT NULL constraints are handled elsewhere */ -typedef struct NewConstraint { - char* name; /* Constraint name, or NULL if none */ - ConstrType contype; /* CHECK or FOREIGN */ - Oid refrelid; /* PK rel, if FOREIGN */ - Oid refindid; /* OID of PK's index, if FOREIGN */ - Oid conid; /* OID of pg_constraint entry, if FOREIGN */ - Node* qual; /* Check expr or CONSTR_FOREIGN Constraint */ - List* qualstate; /* Execution state for CHECK */ -} NewConstraint; - -/* - * Struct describing one new column value that needs to be computed during - * Phase 3 copy (this could be either a new column with a non-null default, or - * a column that we're changing the type of). Columns without such an entry - * are just copied from the old table during ATRewriteTable. Note that the - * expr is an expression over *old* table values. - */ -typedef struct NewColumnValue { - AttrNumber attnum; /* which column */ - Expr* expr; /* expression to compute */ - ExprState* exprstate; /* execution state */ - bool is_generated; /* is it a GENERATED expression? */ - bool is_autoinc; - bool is_addloc; /* is add column first or after */ - AttrNumber newattnum; /* is modify column first or after - -1 denote add; - 0 denote modify without first|after; - > 0 denote modify with first|after */ - char *col_name; - AttrNumber generate_attnum; -} NewColumnValue; - -/* - * Error-reporting support for RemoveRelations - */ -struct dropmsgstrings { - char kind; - int nonexistent_code; - const char* nonexistent_msg; - const char* skipping_msg; - const char* nota_msg; - const char* drophint_msg; -}; - -static const struct dropmsgstrings dropmsgstringarray[] = {{RELKIND_RELATION, - ERRCODE_UNDEFINED_TABLE, - gettext_noop("table \"%s\" does not exist"), - gettext_noop("table \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a table"), - gettext_noop("Use DROP TABLE to remove a table.")}, - {RELKIND_SEQUENCE, - ERRCODE_UNDEFINED_TABLE, - gettext_noop("sequence \"%s\" does not exist"), - gettext_noop("sequence \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a sequence"), - gettext_noop("Use DROP SEQUENCE to remove a sequence.")}, - {RELKIND_LARGE_SEQUENCE, - ERRCODE_UNDEFINED_TABLE, - gettext_noop("large sequence \"%s\" does not exist"), - gettext_noop("large sequence \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a large sequence"), - gettext_noop("Use DROP LARGE SEQUENCE to remove a large sequence.")}, - {RELKIND_VIEW, - ERRCODE_UNDEFINED_TABLE, - gettext_noop("view \"%s\" does not exist"), - gettext_noop("view \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a view"), - gettext_noop("Use DROP VIEW to remove a view.")}, - {RELKIND_CONTQUERY, - ERRCODE_UNDEFINED_TABLE, - gettext_noop("contview \"%s\" does not exist"), - gettext_noop("contview \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a view"), - gettext_noop("Use DROP CONTVIEW to remove a contview.")}, - {RELKIND_MATVIEW, - ERRCODE_UNDEFINED_TABLE, - gettext_noop("materialized view \"%s\" does not exist"), - gettext_noop("materialized view \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a materialized view"), - gettext_noop("Use DROP MATERIALIZED VIEW to remove a materialized view.")}, - {RELKIND_INDEX, - ERRCODE_UNDEFINED_OBJECT, - gettext_noop("index \"%s\" does not exist"), - gettext_noop("index \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not an index"), - gettext_noop("Use DROP INDEX to remove an index.")}, - {RELKIND_GLOBAL_INDEX, - ERRCODE_UNDEFINED_OBJECT, - gettext_noop("global partition index \"%s\" does not exist"), - gettext_noop("global partition index \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not an global partition index"), - gettext_noop("Use DROP INDEX to remove an global partition index.")}, - {RELKIND_COMPOSITE_TYPE, - ERRCODE_UNDEFINED_OBJECT, - gettext_noop("type \"%s\" does not exist"), - gettext_noop("type \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a type"), - gettext_noop("Use DROP TYPE to remove a type.")}, - {RELKIND_FOREIGN_TABLE, - ERRCODE_UNDEFINED_OBJECT, - gettext_noop("foreign table \"%s\" does not exist"), - gettext_noop("foreign table \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a foreign table"), - gettext_noop("Use DROP FOREIGN TABLE to remove a foreign table.")}, - {RELKIND_STREAM, - ERRCODE_UNDEFINED_OBJECT, - gettext_noop("stream \"%s\" does not exist"), - gettext_noop("stream \"%s\" does not exist, skipping"), - gettext_noop("\"%s\" is not a stream"), - gettext_noop("Use DROP STREAM to remove a stream.")}, - {'\0', 0, NULL, NULL, NULL, NULL}}; - -struct DropRelationCallbackState { - char relkind; - Oid heapOid; - bool concurrent; -}; - -// When merge partitions, if toast tables have repeat chunkid, replace it. -// -struct ChunkIdHashKey { - Oid toastTableOid; - Oid oldChunkId; -}; - -// Entry structures for the hash tables -// -struct OldToNewChunkIdMappingData { - ChunkIdHashKey key; - Oid newChunkId; -}; - -struct RenameTableNameData { - char* schemaname; - char* relname; -}; - -typedef OldToNewChunkIdMappingData* OldToNewChunkIdMapping; - -/* Alter table target-type flags for ATSimplePermissions */ -#define ATT_NULL 0x0000 -#define ATT_TABLE 0x0001 -#define ATT_VIEW 0x0002 -#define ATT_INDEX 0x0004 -#define ATT_COMPOSITE_TYPE 0x0008 -#define ATT_FOREIGN_TABLE 0x0010 -#define ATT_SEQUENCE 0x0020 -#define ATT_MATVIEW 0x0040 - -#define DFS_SUPPORT_AT_CMD(cmd) \ - ((cmd) == AT_AddNodeList || (cmd) == AT_SubCluster || (cmd) == AT_AddColumn || (cmd) == AT_DropColumn || \ - (cmd) == AT_AddStatistics || (cmd) == AT_DeleteStatistics || (cmd) == AT_AddConstraint || \ - (cmd) == AT_DropConstraint || (cmd) == AT_ColumnDefault || (cmd) == AT_ChangeOwner) - -#define BASETABLE_SUPPORT_AT_CMD(cmd) \ - ((cmd) == AT_SubCluster || (cmd) == AT_ChangeOwner || (cmd) == AT_SetRelOptions || \ - (cmd) == AT_ResetRelOptions || (cmd) == AT_ReplaceRelOptions || \ - (cmd) == AT_EnableRls || (cmd) == AT_DisableRls || (cmd) == AT_ForceRls || (cmd) == AT_NoForceRls || \ - (cmd) == AT_SetNotNull || (cmd) == AT_AddIndex || (cmd) == AT_AddIndexConstraint || \ - (cmd) == AT_ValidateConstraint || (cmd) == AT_AddConstraint || (cmd) == AT_DropConstraint || \ - (cmd) == AT_EncryptionKeyRotation) - -#define MATVIEW_SUPPORT_AT_CMD(cmd) ((cmd) == AT_SubCluster || (cmd) == AT_ChangeOwner) - -#define MLOG_MAP_SUPPORT_AT_CMD(cmd) ((cmd) == AT_SubCluster) - -#define HDFS_TBLSPC_SUPPORT_CREATE_LOGIC_OBJECT(kind) \ - ((kind) == RELKIND_VIEW || (kind) == RELKIND_FOREIGN_TABLE || (kind) == RELKIND_SEQUENCE || \ - (kind) == RELKIND_COMPOSITE_TYPE || (kind) == RELKIND_STREAM || (kind) == RELKIND_CONTQUERY) - -#define PARTITION_DDL_CMD(cmd) \ - ((cmd) == AT_AddPartition || (cmd) == AT_AddSubPartition || \ - (cmd) == AT_DropPartition || (cmd) == AT_DropSubPartition || \ - (cmd) == AT_ExchangePartition || \ - (cmd) == AT_TruncatePartition || (cmd) == AT_TruncateSubPartition || \ - (cmd) == AT_SetPartitionTableSpace || \ - (cmd) == AT_SplitPartition || (cmd) == AT_SplitSubPartition || \ - (cmd) == AT_MergePartition) - -static bool CStoreSupportATCmd(AlterTableType cmdtype); -static bool CStoreSupportConstraint(Constraint* cons); -static List* MergeAttributes( - List* schema, List* supers, char relpersistence, List** supOids, List** supconstr, int* supOidCount); -static bool MergeCheckConstraint(List* constraints, char* name, Node* expr); -static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel); -static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel); -static void StoreCatalogInheritance(Oid relationId, List* supers); -static void StoreCatalogInheritance1(Oid relationId, Oid parentOid, int16 seqNumber, Relation inhRelation); -static int findAttrByName(const char* attributeName, List* schema); -static void AlterIndexNamespaces( - Relation classRel, Relation rel, Oid oldNspOid, Oid newNspOid, ObjectAddresses* objsMoved); -static void AlterSeqNamespaces( - Relation classRel, Relation rel, Oid oldNspOid, Oid newNspOid, ObjectAddresses* objsMoved, LOCKMODE lockmode); -static ObjectAddress ATExecValidateConstraint(Relation rel, char* constrName, bool recurse, bool recursing, LOCKMODE lockmode); -static int transformColumnNameList(Oid relId, List* colList, int16* attnums, Oid* atttypids); -static int transformFkeyGetPrimaryKey( - Relation pkrel, Oid* indexOid, List** attnamelist, int16* attnums, Oid* atttypids, Oid* opclasses); -static Oid transformFkeyCheckAttrs(Relation pkrel, int numattrs, int16* attnums, Oid* opclasses); -static void checkFkeyPermissions(Relation rel, int16* attnums, int natts); -static CoercionPathType findFkeyCast(Oid targetTypeId, Oid sourceTypeId, Oid* funcid); -static void validateCheckConstraint(Relation rel, HeapTuple constrtup); -static void validateCheckConstraintForBucket(Relation rel, Partition part, HeapTuple constrtup); -static void validateForeignKeyConstraint(char* conname, Relation rel, Relation pkrel, Oid pkindOid, Oid constraintOid); -static void createForeignKeyTriggers( - Relation rel, Oid refRelOid, Constraint* fkconstraint, Oid constraintOid, Oid indexOid); -static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bool recurse, LOCKMODE lockmode); -static bool ATCheckLedgerTableCmd(Relation rel, AlterTableCmd* cmd); -static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recurse, bool recursing, LOCKMODE lockmode, - bool isDeltaTable = false); -static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode); -static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode); -static void ATRewriteTables(AlterTableStmt *parsetree, List** wqueue, LOCKMODE lockmode); -static void ATRewriteTable(AlteredTableInfo* tab, Relation oldrel, Relation newrel); -static void ATCStoreRewriteTable(AlteredTableInfo* tab, Relation heapRel, LOCKMODE lockMode, Oid targetTblspc); -static void ATCStoreRewritePartition(AlteredTableInfo* tab, LOCKMODE lockMode); -static void at_timeseries_check(Relation rel, AlterTableCmd* cmd); - -#ifndef ENABLE_MULTIPLE_NODES -static void ATOnlyCheckCStoreTable(const AlteredTableInfo* tab, Relation rel); -#endif - -static void PSortChangeTableSpace(Oid psortOid, Oid newTableSpace, LOCKMODE lockmode); -static void ForbidToRewriteOrTestCstoreIndex(AlteredTableInfo* tab); -static inline void ChangeTableSpaceForDeltaRelation(Oid deltaOid, Oid targetTableSpace, LOCKMODE lockmode); -static inline void ChangeTableSpaceForCudescRelation( - Oid cudescIdxOid, Oid cudescOid, Oid targetTableSpace, LOCKMODE lockmode); - -static void ExecRewriteRowTable(AlteredTableInfo*, Oid, LOCKMODE); -static void ExecRewriteRowPartitionedTable(AlteredTableInfo*, Oid, LOCKMODE); -static void ExecRewriteCStoreTable(AlteredTableInfo*, Oid, LOCKMODE); -static void ExecRewriteCStorePartitionedTable(AlteredTableInfo*, Oid, LOCKMODE); -static void ExecOnlyTestRowTable(AlteredTableInfo*); -static void ExecOnlyTestRowPartitionedTable(AlteredTableInfo*); -/** - * @Description: Only check the validity of existing data, because of some altering operators. - * For example, the query "alter table ... add column col data type not null" contains - * "NOT NULL" constraint, if the relation has no data, the query will be executed successfully, - * otherwise get a fail result. - * @in tab, The AlteredTableInfo struct. - * @return None. - */ -static void ExecOnlyTestCStoreTable(AlteredTableInfo*); -static void ExecOnlyTestCStorePartitionedTable(AlteredTableInfo*); -static void ExecChangeTableSpaceForRowTable(AlteredTableInfo*, LOCKMODE); -static void ExecChangeTableSpaceForRowPartition(AlteredTableInfo*, LOCKMODE); -static void ExecChangeTableSpaceForCStoreTable(AlteredTableInfo*, LOCKMODE); -static void ExecChangeTableSpaceForCStorePartition(AlteredTableInfo*, LOCKMODE); - -static int GetAfterColumnAttnum(Oid attrelid, const char *after_name); -static Node *UpdateVarattnoAfterAddColumn(Node *node, int startattnum, int endattnum, bool is_increase); -static void UpdatePgDescriptionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdatePgAttributeFirstAfter(Relation attr_rel, Oid attrelid, int startattnum, int endattnum, - bool is_increase); -static void UpdatePgIndexFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdatePgConstraintFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdatePgConstraintConfkeyFirstAfter(Relation rel, int startattnum, int endattnum, - bool is_increase); -static void UpdateGenerateColFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdateIndexFirstAfter(Relation rel); -static void UpdatePgAttrdefFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdatePgDependFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase, - bool is_modified, bool *hasPartition); -static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); -static ViewInfoForAdd *GetViewInfoFirstAfter(const char *rel_name, Oid objid, bool keep_star = false); -static List *CheckPgRewriteFirstAfter(Relation rel); -static void ReplaceViewQueryFirstAfter(List *query_str); -static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, - bool *has_depend); -static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum); -static void UpdateAttrdefAdnumFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, - bool *has_default); -static void UpdatePartitionPartkeyFirstAfter(Oid myrelid, int curattnum, int newattnum); -static void AlterColumnToFirstAfter(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, - int curattnum); -static bool CheckIndexIsConstraint(Relation dep_rel, Oid objid, Oid *refobjid); - -static AlteredTableInfo* ATGetQueueEntry(List** wqueue, Relation rel, bool isDeltaTable = false); -static void ATSimplePermissions(Relation rel, int allowed_targets); -static void ATWrongRelkindError(Relation rel, int allowed_targets); -static void ATSimpleRecursion(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recurse, LOCKMODE lockmode); -static void ATTypedTableRecursion(List** wqueue, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode); -static List* find_typed_table_dependencies(Oid typeOid, const char* typname, DropBehavior behavior); -static void ATPrepAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, - bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode); -static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, ColumnDef* colDef, bool isOid, - bool recurse, bool recursing, bool is_first, char *after_name, LOCKMODE lockmode); -static void check_for_column_name_collision(Relation rel, const char* colname); -static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid); -static void add_column_collation_dependency(Oid relid, int32 attnum, Oid collid); -static void ATPrepAddOids(List** wqueue, Relation rel, bool recurse, AlterTableCmd* cmd, LOCKMODE lockmode); -static ObjectAddress ATExecDropNotNull(Relation rel, const char* colName, LOCKMODE lockmode); -static ObjectAddress ATExecSetNotNull(AlteredTableInfo* tab, Relation rel, const char* colName, LOCKMODE lockmode); -static ObjectAddress ATExecColumnDefault(Relation rel, const char* colName, Node* newDefault, LOCKMODE lockmode); -static void ATPrepSetStatistics(Relation rel); -static ObjectAddress ATExecSetStatistics( - Relation rel, const char* colName, Node* newValue, AlterTableStatProperty additional_property, LOCKMODE lockmode); -static void ATExecAddStatistics(Relation rel, Node* def, LOCKMODE lockmode); -static void ATExecDeleteStatistics(Relation rel, Node* def, LOCKMODE lockmode); -static ObjectAddress ATExecSetOptions(Relation rel, const char* colName, Node* options, bool isReset, LOCKMODE lockmode); -static ObjectAddress ATExecSetStorage(Relation rel, const char* colName, Node* newValue, LOCKMODE lockmode); -static void ATPrepCheckDefault(Node* node); -static bool CheckLastColumn(Relation rel, AttrNumber attrnum); -static void ATPrepDropColumn( - List** wqueue, Relation rel, bool recurse, bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode); -static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* colName, DropBehavior behavior, bool recurse, - bool recursing, bool missing_ok, LOCKMODE lockmode); -static ObjectAddress ATExecAddIndex(AlteredTableInfo* tab, Relation rel, IndexStmt* stmt, bool is_rebuild, LOCKMODE lockmode); -static ObjectAddress ATExecAddConstraint(List** wqueue, AlteredTableInfo* tab, Relation rel, Constraint* newConstraint, - bool recurse, bool is_readd, LOCKMODE lockmode); -static ObjectAddress ATExecAddIndexConstraint(AlteredTableInfo* tab, Relation rel, IndexStmt* stmt, LOCKMODE lockmode); -static ObjectAddress ATAddCheckConstraint(List** wqueue, AlteredTableInfo* tab, Relation rel, Constraint* constr, bool recurse, - bool recursing, bool is_readd, LOCKMODE lockmode); -static ObjectAddress ATAddForeignKeyConstraint(AlteredTableInfo* tab, Relation rel, Constraint* fkconstraint, LOCKMODE lockmode); -static void ATExecDropConstraint(Relation rel, const char* constrName, DropBehavior behavior, bool recurse, - bool recursing, bool missing_ok, LOCKMODE lockmode); -static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, bool recursing, - AlterTableCmd* cmd, LOCKMODE lockmode); -static void ATPrepAlterModifyColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, - bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode); -static bool ATColumnChangeRequiresRewrite(Node* expr, AttrNumber varattno); -static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode); -static void ATExecAlterModifyColumn(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd); -static ObjectAddress ATExecAlterColumnGenericOptions(Relation rel, const char* colName, List* options, LOCKMODE lockmode); -static void ATPostAlterTypeCleanup(List** wqueue, AlteredTableInfo* tab, LOCKMODE lockmode); -static void ATPostAlterTypeParse( - Oid oldId, Oid oldRelId, Oid refRelId, const char* cmd, List** wqueue, LOCKMODE lockmode, bool rewrite); -void TryReuseIndex(Oid oldId, IndexStmt* stmt); - -void tryReusePartedIndex(Oid oldId, IndexStmt* stmt, Relation rel); - -static void TryReuseForeignKey(Oid oldId, Constraint* con); -static void change_owner_fix_column_acls(Oid relationOid, Oid oldOwnerId, Oid newOwnerId); -static void change_owner_recurse_to_sequences(Oid relationOid, Oid newOwnerId, LOCKMODE lockmode); -static ObjectAddress ATExecClusterOn(Relation rel, const char* indexName, LOCKMODE lockmode); -static void ATExecDropCluster(Relation rel, LOCKMODE lockmode); -static void ATPrepSetTableSpace(AlteredTableInfo* tab, Relation rel, const char* tablespacename, LOCKMODE lockmode); -static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode); -static void ATExecSetRelOptionsToast(Oid toastid, List* defList, AlterTableType operation, LOCKMODE lockmode); -static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation rel, Node* partition); -static void ATExecSetTableSpaceForPartitionP3(Oid tableOid, Oid partOid, Oid newTableSpace, LOCKMODE lockmode); -static void atexecset_table_space(Relation rel, Oid newTableSpace, Oid newrelfilenode); -static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType operation, - LOCKMODE lockmode, bool innerset = false, AlteredTableInfo* tab = NULL); -static void ATExecEnableDisableTrigger( - Relation rel, const char* trigname, char fires_when, bool skip_system, LOCKMODE lockmode); -static void ATExecEnableDisableRule(Relation rel, const char* rulename, char fires_when, LOCKMODE lockmode); -static void ATPrepAddInherit(Relation child_rel); -static ObjectAddress ATExecAddInherit(Relation child_rel, RangeVar* parent, LOCKMODE lockmode); -static ObjectAddress ATExecDropInherit(Relation rel, RangeVar* parent, LOCKMODE lockmode); -static void drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid); -static ObjectAddress ATExecAddOf(Relation rel, const TypeName* ofTypename, LOCKMODE lockmode); -static void ATExecDropOf(Relation rel, LOCKMODE lockmode); -static void ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt* stmt, LOCKMODE lockmode); -static void ATExecGenericOptions(Relation rel, List* options); -static void ATExecSetCompress(Relation rel, const char* cmprsId); -#ifdef PGXC -static void AtExecDistributeBy(Relation rel, DistributeBy* options); -static void AtExecSubCluster(Relation rel, PGXCSubCluster* options); -static void AtExecAddNode(Relation rel, List* options); -static void AtExecDeleteNode(Relation rel, List* options); -static void AtExecCopySlice(CatCList* sliceList, Oid tabOid, Relation pgxcSliceRel); -static void AtExecUpdateSliceLike(Relation rel, const RangeVar* refTableName); -static void ATCheckCmd(Relation rel, AlterTableCmd* cmd); -static void sqlcmd_alter_exec_set_charsetcollate(Relation rel, CharsetCollateOptions* cc, LOCKMODE lockmode); -static void sqlcmd_alter_prep_convert_charset(AlteredTableInfo* tab, Relation rel, - AlterTableCmd* cmd, LOCKMODE lockmode); -static void sqlcmd_alter_exec_convert_charset(AlteredTableInfo* tab, Relation rel, - CharsetCollateOptions* cc, LOCKMODE lockmode); -static DFSFileType getSetFormatNewFormat(AlterTableCmd* cmd); -static bool checkColumnTypeIsBytea(Relation rel); -static DFSFileType getFormatByDefElem(DefElem* opt); -static DFSFileType getFormatByName(char* format); -static RedistribState* BuildRedistribCommands(Oid relid, List* subCmds); -static Oid* delete_node_list(Oid* old_oids, int old_num, Oid* del_oids, int del_num, int* new_num); -static Oid* add_node_list(Oid* old_oids, int old_num, Oid* add_oids, int add_num, int* new_num); -#endif - -static void copy_relation_data(Relation rel, SMgrRelation* dstptr, ForkNumber forkNum, char relpersistence); -static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char relpersistence, BlockNumber srcBlocks, - BlockNumber destBlocks, TupleDesc srcTupleDesc, Oid srcToastOid, Oid destToastOid, HTAB* chunkIdHashTable, - bool destHasFSM); -static void mergeVMBlock(Relation src, Relation dest, BlockNumber srcHeapBlocks, BlockNumber destHeapBloks); -static const char* storage_name(char c); - -static void RangeVarCallbackForDropRelation( - const RangeVar* rel, Oid relOid, Oid oldRelOid, bool target_is_partition, void* arg); -static void RangeVarCallbackForAlterRelation( - const RangeVar* rv, Oid relid, Oid oldrelid, bool target_is_partition, void* arg); - -static bool isQueryUsingTempRelation_walker(Node *node, void *context); -static bool CheckRangePartitionKeyType(Oid typoid); -static void CheckRangePartitionKeyType(FormData_pg_attribute* attrs, List* pos); - -static bool CheckListPartitionKeyType(Oid typoid); -static void CheckListPartitionKeyType(FormData_pg_attribute* attrs, List* pos); - -static bool CheckHashPartitionKeyType(Oid typoid); -static void CheckHashPartitionKeyType(FormData_pg_attribute* attrs, List* pos); - -static void CheckIntervalPartitionKeyType(FormData_pg_attribute* attrs, List* pos); -static void CheckIntervalValue( - const FormData_pg_attribute* attrs, const List* pos, const IntervalPartitionDefState* intervalPartDef); -static void CheckPartitionTablespace(const char* spcname, Oid owner); -static bool ConfirmTypeInfo(Oid* target_oid, int* target_mod, Const* src, Form_pg_attribute attrs, bool isinterval); - -static void ATPrepAddPartition(Relation rel); -static void ATPrepAddSubPartition(Relation rel); -static void ATPrepDropPartition(Relation rel); -static void ATPrepDropSubPartition(Relation rel); -static void ATPrepUnusableIndexPartition(Relation rel); -static void ATPrepUnusableAllIndexOnPartition(Relation rel); -static void ATExecAddPartition(Relation rel, AddPartitionState *partState); -static void ATExecAddPartitionInternal(Relation rel, AddPartitionState *partState); -static void ATExecAddSubPartition(Relation rel, AddSubPartitionState *subpartState); -static void CheckForAddPartition(Relation rel, List *partDefStateList); -static void CheckForAddSubPartition(Relation rel, Relation partrel, List *subpartDefStateList); -static void CheckTablespaceForAddPartition(Relation rel, List *partDefStateList); -static void CheckPartitionNameConflictForAddPartition(List *newPartitionNameList, List *existingPartitionNameList); -static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partDefState, bool partkeyIsFunc = false); -static void CheckSubpartitionForAddPartition(Relation rel, Node *partDefState); -static void ATExecDropPartition(Relation rel, AlterTableCmd *cmd); -static void ATExecDropSubPartition(Relation rel, AlterTableCmd *cmd); -static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *command); -static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid, const char *command); - -static void ATExecUnusableIndexPartition(Relation rel, const char* partition_name); -static void ATExecUnusableIndex(Relation rel); -static void ATUnusableGlobalIndex(Relation rel); -static void ATExecUnusableAllIndexOnPartition(Relation rel, const char* partition_name); -static void ATExecVisibleIndex(Relation rel, char* index_name, bool visible); -static void ATExecModifyRowMovement(Relation rel, bool rowMovement); -static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd); -static void ATExecTruncateSubPartition(Relation rel, AlterTableCmd* cmd); -static void checkStorageTypeForExchange(Relation partTableRel, Relation ordTableRel); -static void checkColStoreForExchange(Relation partTableRel, Relation ordTableRel); -static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd); -static void UpdatePrevIntervalPartToRange( - Relation srcRel, Relation pgPartition, int srcPartIndex, const char* briefCmd); -static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd); -static void ATExecAddTblIntoCBI(Relation idxRel, const AddTableIntoCBIState* state); -static void checkCompressForExchange(Relation partTableRel, Relation ordTableRel); -static void checkColumnForExchange(Relation partTableRel, Relation ordTableRel); -static void checkConstraintForExchange(Relation partTableRel, Relation ordTableRel); -/** - * @Description: Get the all constraint for specified table. - * @in relOid, the specified table oid. - * @in conType, the constraint type, default value is invalid. - * @return return constraint list. - */ -static List* getConstraintList(Oid relOid, char conType = CONSTRAINT_INVALID); -static void freeConstraintList(List* list); - -/** - * @Description: Whether or not the column has partial cluster key. - * @in rel, One relation. - * @in attNum, Represnet the attribute number. - * @return If exits partial cluster key in the column, return true, - * otherwise return false. - */ -static bool colHasPartialClusterKey(Relation rel, AttrNumber attNum); - -static void checkDistributeForExchange(Relation partTableRel, Relation ordTableRel); -static void checkIndexForExchange( - Relation partTableRel, Oid partOid, Relation ordTableRel, List** partIndexList, List** ordIndexList); -static void checkValidationForExchange(Relation partTableRel, Relation ordTableRel, Oid partOid, bool exchangeVerbose); - -static void finishIndexSwap(List* partIndexList, List* ordIndexList); -static Oid getPartitionOid(Relation partTableRel, const char* partName, Node* rangePartDef); -static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd); -static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd); -static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation partTableRel, int srcPartIndex); -static void checkDestPartitionNameForSplit(Oid partTableOid, List* partDefList); -static List* getDestPartBoundaryList(Relation partTableRel, List* destPartDefList, List** listForFree); -static void freeDestPartBoundaryList(List* list1, List* list2); -static char* GenTemporaryPartitionName(Relation partTableRel, int sequence = 0); -static Oid AddTemporaryPartition(Relation partTableRel, Node* partDef); -static void AlterPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel, Oid partOid); -static void AlterSubPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel, Oid partOid, Oid subPartOid); -static Oid AddTemporaryRangePartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, - int sequence, bool* renameTargetPart); -static Oid AddTemporaryPartitionForAlterPartitions(const AlterTableCmd* cmd, Relation rel, - Oid srcPartOid, bool* renameTargetPart); -static void ExchangePartitionWithGPI(const AlterTableCmd* cmd, Relation partTableRel, Oid srcPartOid, - TransactionId frozenXid, MultiXactId multiXid); -static void fastAddPartition(Relation partTableRel, List* destPartDefList, List** newPartOidList); -static void FastAddListSubPartition(Relation rel, List* destPartDefList, Oid partOid, List** newPartOidList); -static void FastAddRangeSubPartition(Relation rel, List* destPartDefList, Oid partOid, List** newPartOidList); -static void readTuplesAndInsert(Relation tempTableRel, Relation partTableRel); -static Oid createTempTableForPartition(Relation partTableRel, Partition part); -static void ATPrepEnableRowMovement(Relation rel); -static void ATPrepDisableRowMovement(Relation rel); -static void ATPrepTruncatePartition(Relation rel); -static void ATPrepTruncateSubPartition(Relation rel); -static void ATPrepExchangePartition(Relation rel); -static void ATPrepMergePartition(Relation rel); -static void ATPrepSplitPartition(Relation rel); -static void ATPrepSplitSubPartition(Relation rel); -static void ATPrepResetPartitionno(Relation rel); -static void ATExecResetPartitionno(Relation rel); -static void replaceRepeatChunkId(HTAB* chunkIdHashTable, List* srcPartToastRels); -static bool checkChunkIdRepeat(List* srcPartToastRels, int index, Oid chunkId); -static void addCudescTableForNewPartition(Relation relation, Oid newPartId); -static void addDeltaTableForNewPartition(Relation relation, Oid newPartId); -static bool OptionSupportedByORCRelation(const char* option); -static void checkObjectCreatedinHDFSTblspc(CreateStmt* stmt, char relkind); -/** - * @Description: Previous check whether the object may be created. - * @in stmt, the object struct. - * @in dfsTablespace, whether is a HDFS tablespace. - */ -static void PreCheckCreatedObj(CreateStmt* stmt, bool dfsTablespace, char relKind); -static List* InitDfsOptions(List* options); -static void validateDfsTableDef(CreateStmt* stmt, bool isDfsTbl); -static void simple_delete_redis_tuples(Relation rel, Oid partOid); -static void ResetPartsRedisCtidRelOptions(Relation rel); -static void ResetOnePartRedisCtidRelOptions(Relation rel, Oid part_oid); -static void ResetRelRedisCtidInfo(Relation rel, Oid part_oid, HeapTuple tuple, Oid pgcat_oid, Datum* repl_val, - const bool* repl_null, const bool* repl_repl); -static void ResetRelRedisCtidRelOptions( - Relation rel, Oid part_oid, int cat_id, int att_num, int att_inx, Oid pgcat_oid); -static bool WLMRelationCanTruncate(Relation rel); -static OnCommitAction GttOncommitOption(const List *options); -static void ATCheckDuplicateColumn(const AlterTableCmd* cmd, const List* tabCmds); -static void ATCheckNotNullConstr(const AlterTableCmd* cmd, const AlteredTableInfo* tab); -static void DelDependencONDataType(Relation rel, Relation depRel, const Form_pg_attribute attTup); -static void ATExecEncryptionKeyRotation(Relation rel, LOCKMODE lockmode); -static void CopyTempAutoIncrement(Relation oldrel, Relation newrel); -static void ATAlterCheckModifiyColumnRepeatedly(const AlterTableCmd* cmd, const List* tab_cmds); -static int128 EvaluateAutoIncrement(Relation rel, TupleDesc desc, AttrNumber attnum, Datum* value, bool* is_null); -static void SetRelAutoIncrement(Relation rel, TupleDesc desc, int128 autoinc); -static Node* RecookAutoincAttrDefault(Relation rel, int attrno, Oid targettype, int targettypmod); -static void check_unsupported_charset_for_column(Oid collation, const char* col_name); - -inline static bool CStoreSupportATCmd(AlterTableType cmdtype) -{ - bool ret = false; - switch (cmdtype) { - case AT_AddPartition: - case AT_ExchangePartition: - case AT_TruncatePartition: - case AT_DropPartition: - case AT_ResetPartitionno: - case AT_AddConstraint: - case AT_DropConstraint: - case AT_AddNodeList: - case AT_DeleteNodeList: - case AT_SubCluster: - case AT_UpdateSliceLike: - case AT_AddColumn: - case AT_DropColumn: - case AT_AlterColumnType: - case AT_ColumnDefault: - case AT_SetStatistics: - case AT_AddStatistics: - case AT_SET_COMPRESS: - case AT_DeleteStatistics: - case AT_SetTableSpace: - case AT_SetPartitionTableSpace: - case AT_SetOptions: - case AT_ResetOptions: - case AT_SetStorage: - case AT_SetRelOptions: - case AT_ResetRelOptions: - case AT_MergePartition: - case AT_ChangeOwner: - case AT_EnableRls: - case AT_DisableRls: - case AT_ForceRls: - case AT_NoForceRls: - case AT_EncryptionKeyRotation: -#ifndef ENABLE_MULTIPLE_NODES - case AT_AddIndex: - case AT_AddIndexConstraint: -#endif - case AT_VisibleIndex: - case AT_InvisibleIndex: - ret = true; - break; - default: - ret = false; - break; - } - return ret; -} - -inline static bool CStoreSupportConstraint(Constraint* cons) -{ - bool ret = false; - switch (cons->contype) { -#ifndef ENABLE_MULTIPLE_NODES - case CONSTR_PRIMARY: - case CONSTR_UNIQUE: - /* Only check deferrable attribute in primary and unique constraint */ - if (cons->deferrable || cons->initdeferred) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column/timeseries store unsupport DEFERRABLE/INITIALLY DEFERRED on constraint \"%s\"", - GetConstraintType(cons->contype)))); - } - case CONSTR_ATTR_NOT_DEFERRABLE: /* CONSTR_ATTR_XXX is a constraint attribute instead of actual constraint */ - case CONSTR_ATTR_IMMEDIATE: -#endif - case CONSTR_NULL: - case CONSTR_NOTNULL: - case CONSTR_DEFAULT: - case CONSTR_CLUSTER: - ret = true; - break; - default: - ret = false; - break; - } - return ret; -} - -/* get all partitions oid */ -static List* get_all_part_oid(Oid relid) -{ - List* oid_list = NIL; - Relation pgpartition; - TableScanDesc scan; - HeapTuple tuple; - ScanKeyData keys[2]; - - /* Process all partitions of this partitiond table */ - ScanKeyInit(&keys[0], - Anum_pg_partition_parttype, - BTEqualStrategyNumber, - F_CHAREQ, - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION)); - - ScanKeyInit(&keys[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - - pgpartition = heap_open(PartitionRelationId, AccessShareLock); - scan = tableam_scan_begin(pgpartition, SnapshotNow, 2, keys); - while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - oid_list = lappend_oid(oid_list, HeapTupleGetOid(tuple)); - } - tableam_scan_end(scan); - heap_close(pgpartition, AccessShareLock); - - return oid_list; -} -static List* find_cstore_delta(Relation rel, LOCKMODE lockmode) -{ - List* children = NIL; - if (!RELATION_IS_PARTITIONED(rel)) { - /* - * Add column for delta table. - */ - children = lappend_oid(children, RelationGetDeltaRelId(rel)); - LockRelationOid(RelationGetDeltaRelId(rel), lockmode); - } else { - List* part_oid_list = get_all_part_oid(RelationGetRelid(rel)); - ListCell* cell = NULL; - foreach (cell, part_oid_list) { - Oid part_oid = lfirst_oid(cell); - Partition partrel = partitionOpen(rel, part_oid, lockmode); - /* - * Add column for delta table. - */ - children = lappend_oid(children, partrel->pd_part->reldeltarelid); - LockRelationOid(partrel->pd_part->reldeltarelid, lockmode); - partitionClose(rel, partrel, NoLock); - } - list_free(part_oid_list); - } - return children; -} - -static void CheckPartitionUnsupported(CreateStmt* stmt) -{ - if (stmt->partTableState && stmt->partTableState->intervalPartDef) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Unsupport feature"), - errdetail( - "cstore/timeseries don't support interval partition type."))); - } - - if (stmt->partTableState != NULL && stmt->partTableState->subPartitionState != NULL) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Unsupport feature"), errdetail("cstore/timeseries don't support subpartition table."), - errcause("The function is not implemented."), - erraction("Create a new row-store table to replace it.")))); - } - - if (stmt->partTableState != NULL && - (stmt->partTableState->partitionStrategy == PART_STRATEGY_LIST || - stmt->partTableState->partitionStrategy == PART_STRATEGY_HASH)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Unsupport feature"), - errdetail( - "cstore/timeseries don't support LIST/HASH partition type."))); - } -} - -// all unsupported features are checked and error reported here for cstore table -static void CheckCStoreUnsupportedFeature(CreateStmt* stmt) -{ - Assert(stmt); - - if (stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("global temporary table can only support heap table"))); - } - - if (stmt->ofTypename) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Unsupport feature"), - errdetail("cstore/timeseries don't support relation defination " - "with composite type using CREATE TABLE OF TYPENAME."))); - - if (stmt->inhRelations) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Unsupport feature"), - errdetail("cstore/timeseries don't support relation defination with inheritance."))); - } - - if (stmt->relation->schemaname != NULL && - IsSystemNamespace(get_namespace_oid(stmt->relation->schemaname, false))) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Unsupport feature"), - errdetail("cstore/timeseries don't support relation defination with System namespace."))); - } - CheckPartitionUnsupported(stmt); - // Check constraints - ListCell* lc = NULL; - foreach (lc, stmt->tableEltsDup) { - Node* element = (Node*)lfirst(lc); - /* check table-level constraints */ - if (IsA(element, Constraint) && !CStoreSupportConstraint((Constraint*)element)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column/timeseries store unsupport constraint \"%s\"", - GetConstraintType(((Constraint*)element)->contype)))); - } else if (IsA(element, ColumnDef)) { - List* colConsList = ((ColumnDef*)element)->constraints; - ListCell* lc2 = NULL; - /* check column-level constraints */ - foreach (lc2, colConsList) { - Constraint* colCons = (Constraint*)lfirst(lc2); - if (!CStoreSupportConstraint(colCons)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column/timeseries store unsupport constraint \"%s\"", - GetConstraintType(colCons->contype)))); - } - } - } - } -} - -void CheckCStoreRelOption(StdRdOptions* std_opt) -{ - Assert(std_opt); - if (std_opt->partial_cluster_rows < std_opt->max_batch_rows && std_opt->partial_cluster_rows >= 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("PARTIAL_CLUSTER_ROWS cannot be less than MAX_BATCHROW."), - errdetail("PARTIAL_CLUSTER_ROWS must be greater than or equal to MAX_BATCHROW."), - errhint("PARTIAL_CLUSTER_ROWS is MAX_BATCHROW multiplied by an integer."))); - } -} - -static void partition_policy_interval_check(StdRdOptions* std_opt, bool is_ts_table) -{ - int32 typmod = -1; - Interval* ttl_interval = NULL; - Interval* period_interval = NULL; - if (pg_strcasecmp(TIME_UNDEFINED, StdRdOptionsGetStringData(std_opt, ttl, TIME_UNDEFINED)) != 0) { - ttl_interval = char_to_interval((char*)StdRdOptionsGetStringData(std_opt, ttl, TIME_UNDEFINED), typmod); - } - if (pg_strcasecmp(TIME_UNDEFINED, StdRdOptionsGetStringData(std_opt, period, TIME_UNDEFINED)) != 0) { - period_interval = char_to_interval((char*)StdRdOptionsGetStringData(std_opt, period, TIME_UNDEFINED), typmod); - } - - if (period_interval == NULL && is_ts_table) { - period_interval = char_to_interval(TIME_ONE_DAY, typmod); - ereport(WARNING, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg(" using %s as default period.", TIME_ONE_DAY))); - } - - if (ttl_interval != NULL && period_interval != NULL && interval_cmp_internal(period_interval, ttl_interval) > 0) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg(" period must smaller than ttl."))); - } -} - -static void check_partion_policy_rel_option(List* options, StdRdOptions* std_opt) -{ - Assert(std_opt); - ListCell* opt = NULL; - bool has_opt = false; - - if (options == NULL) { - return; /* nothing to do */ - } - bool is_ts_table = false; - foreach (opt, options) { - DefElem* def = (DefElem*)lfirst(opt); - if (pg_strcasecmp(def->defname, ORIENTATION_TIMESERIES) == 0) { - is_ts_table = true; - } - if (pg_strcasecmp(def->defname, "ttl") == 0 || pg_strcasecmp(def->defname, "period") == 0) { - has_opt = true; - } - } - - if (!has_opt) { - return; /* nothing to do */ - } - - partition_policy_interval_check(std_opt, is_ts_table); -} - -/* - * Brief : Whether or not set orientation option and check the validity - * Input : options, the options list. - * isDfsTbl, whether or not is a dfs table. - * Output : isCUFormat, whether ot not the table is CU format. - * Return Value : Retutn the true if has been setted otherwise return false. - * Notes : None. - */ -static bool isOrientationSet(List* options, bool* isCUFormat, bool isDfsTbl) -{ - bool isSetFormat = false; - ListCell* cell = NULL; - foreach (cell, options) { - DefElem* def = (DefElem*)lfirst(cell); - if (pg_strcasecmp(def->defname, "orientation") == 0) { - if (isDfsTbl) { - /* The orientation option values must be "ORIENTATION_COLUMN" or "ORIENTATION_ORC". */ - if (pg_strcasecmp(defGetString(def), ORIENTATION_ORC) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Invalid string for \"ORIENTATION\" option"), - errdetail("Valid string is \"orc\"."))); - } - } else { -#ifdef ENABLE_MULTIPLE_NODES - if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) != 0 && - pg_strcasecmp(defGetString(def), ORIENTATION_TIMESERIES) != 0 && - pg_strcasecmp(defGetString(def), ORIENTATION_ROW) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Invalid string for \"ORIENTATION\" option"), - errdetail("Valid string are \"column\", \"row\", \"timeseries\"."))); - } -#else /* ENABLE_MULTIPLE_NODES */ - if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) != 0 && - pg_strcasecmp(defGetString(def), ORIENTATION_ROW) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Invalid string for \"ORIENTATION\" option"), - errdetail("Valid string are \"column\", \"row\"."))); - } -#endif /* ENABLE_MULTIPLE_NODES */ - } - if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) == 0 && isCUFormat != NULL) { - *isCUFormat = true; - } - - isSetFormat = true; - break; - } - } - - return isSetFormat; -} - -/* - * @Description: add default reloption of both row and column table. - * @Param [IN] options: the table's option which user defined. - * @Param [IN] relkind: table's kind(ordinary table or other database object). - * @return: option with defalut options. - */ -static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateStmt* stmt, Oid relnamespace) -{ - List* res = options; - int8 relcmprs = stmt->row_compress; - ListCell* cell = NULL; - bool isCStore = false; - bool isTsStore = false; - bool hasCompression = false; - bool createWithOrientationRow = false; /* To mark whether table have been create with(orientation = row) */ - bool isUstore = false; - bool assignedStorageType = false; - bool segment = false; - TableCreateSupport tableCreateSupport{(int)COMPRESS_TYPE_NONE, false, false, false, false, false, true, false}; - (void)isOrientationSet(options, NULL, false); - foreach (cell, options) { - DefElem* def = (DefElem*)lfirst(cell); - if (pg_strcasecmp(def->defname, "orientation") == 0 && - pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) == 0) { - isCStore = true; - tableCreateSupport.is_orientation_row = false; - } else if (pg_strcasecmp(def->defname, "orientation") == 0 && - pg_strcasecmp(defGetString(def), ORIENTATION_ROW) == 0) { - createWithOrientationRow = true; - tableCreateSupport.is_orientation_row = true; - } else if (pg_strcasecmp(def->defname, "orientation") == 0 && - pg_strcasecmp(defGetString(def), ORIENTATION_TIMESERIES) == 0) { - isTsStore = true; - tableCreateSupport.is_orientation_row = false; - } else if (pg_strcasecmp(def->defname, "compression") == 0) { - if (pg_strcasecmp(defGetString(def), COMPRESSION_NO) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_YES) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_LOW) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_MIDDLE) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_HIGH) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid string for \"COMPRESSION\" option"), - errdetail( - "Valid string are \"no\", \"yes\", \"low\", \"middle\", \"high\" for non-dfs table."))); - } - hasCompression = true; - } else if (pg_strcasecmp(def->defname, "version") == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("It is not allowed to assign version option for non-dfs table."))); - } else if (pg_strcasecmp(def->defname, "segment") == 0) { - segment = ReadBoolFromDefElem(def); - } else { - SetOneOfCompressOption(def, &tableCreateSupport); - } - - if (pg_strcasecmp(def->defname, "orientation") == 0 && pg_strcasecmp(defGetString(def), ORIENTATION_ORC) == 0) { -#ifdef ENABLE_MULTIPLE_NODES - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Invalid string for \"ORIENTATION\" option"), - errdetail("Valid string are \"column\", \"row\", \"timeseries\"."))); -#else /* ENABLE_MULTIPLE_NODES */ - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Invalid string for \"ORIENTATION\" option"), - errdetail("Valid string are \"column\", \"row\"."))); -#endif /* ENABLE_MULTIPLE_NODES */ - } - if (pg_strcasecmp(def->defname, "storage_type") == 0) { - if (pg_strcasecmp(defGetString(def), TABLE_ACCESS_METHOD_USTORE) == 0) { - isUstore = true; - tableCreateSupport.is_storage_type_ustore = true; - } - assignedStorageType = true; - } - } - - if (isUstore && !createWithOrientationRow && !isCStore && !isTsStore) { - DefElem* def = makeDefElem("orientation", (Node*)makeString(ORIENTATION_ROW)); - res = lcons(def, options); - } - if (isCStore && !hasCompression) { - DefElem* def = makeDefElem("compression", (Node*)makeString(COMPRESSION_LOW)); - res = lappend(options, def); - } - - bool noSupportTable = segment || isCStore || isTsStore || relkind != RELKIND_RELATION || - stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED || - stmt->relation->relpersistence == RELPERSISTENCE_TEMP || - stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP; - if (noSupportTable && tableCreateSupport.compressType) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compresstype can not be used in segment table, " - "column table, view, unlogged table or temp table."))); - } - CheckCompressOption(&tableCreateSupport); - - if (isUstore && !isCStore && !hasCompression && !tableCreateSupport.compressType) { - DefElem* def = makeDefElem("compression", (Node *)makeString(COMPRESSION_NO)); - res = lappend(options, def); - } - - /* If it's a row table, give the defalut reloption with orientation and compression. */ - if (!isUstore && !isCStore && !isTsStore && (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)) { - Value* rowCmprOpt = NULL; - if (IsCompressedByCmprsInPgclass((RelCompressType)relcmprs)) { - rowCmprOpt = makeString(COMPRESSION_YES); - } else { - rowCmprOpt = makeString(COMPRESSION_NO); - } - /* - * As column table default reloptions is {orientation=column,compression=low}, we - * set the row table default reloptions {orientation=row,compression=no} to keep the - * display format on "\d(+)" consistently. - */ - if (options == NULL) { - DefElem* def1 = makeDefElem("orientation", (Node*)makeString(ORIENTATION_ROW)); - DefElem* def2 = makeDefElem("compression", (Node*)rowCmprOpt); - res = list_make2(def1, def2); - if (g_instance.attr.attr_storage.enable_ustore && u_sess->attr.attr_sql.enable_default_ustore_table && - !IsSystemNamespace(relnamespace) && !assignedStorageType) { - DefElem* def3 = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE)); - res = lappend(res, def3); - } - } else { - /* - * To show orientation at the head of reloption when createWithOrientationRow - * is false, we use lcons instead of lappend here. - */ - if (!createWithOrientationRow) { - DefElem *def1 = makeDefElem("orientation", (Node *)makeString(ORIENTATION_ROW)); - res = lcons(def1, options); - } - if (!hasCompression && !tableCreateSupport.compressType) { - DefElem *def2 = makeDefElem("compression", (Node *)rowCmprOpt); - res = lappend(options, def2); - } - if (g_instance.attr.attr_storage.enable_ustore && u_sess->attr.attr_sql.enable_default_ustore_table && - !IsSystemNamespace(relnamespace) && !assignedStorageType) { - DefElem *def2 = makeDefElem("storage_type", (Node *)makeString(TABLE_ACCESS_METHOD_USTORE)); - res = lappend(options, def2); - } - } - } - return res; -} - -/* - * @Description: Previous check whether the object may be created. - * @in stmt, the object struct. - * @in dfsTablespace, whether is a HDFS tablespace. - */ -static void PreCheckCreatedObj(CreateStmt* stmt, bool dfsTablespace, char relKind) -{ -#ifndef ENABLE_MULTIPLE_NODES - if (stmt->subcluster != NULL) { - if (stmt->subcluster->clustertype == SUBCLUSTER_GROUP || stmt->subcluster->clustertype == SUBCLUSTER_NODE) - DISTRIBUTED_FEATURE_NOT_SUPPORTED(); - } -#endif - - bool ignore_enable_hadoop_env = false; - ListCell* cell = NULL; - foreach (cell, stmt->options) { - DefElem* def = (DefElem*)lfirst(cell); - if (0 == pg_strcasecmp(def->defname, OptIgnoreEnableHadoopEnv)) { - if (1 == defGetInt64(def)) { - ignore_enable_hadoop_env = true; - break; - } - } - } - - if (!ignore_enable_hadoop_env && u_sess->attr.attr_sql.enable_hadoop_env && !dfsTablespace) { - if (relKind == RELKIND_RELATION && stmt->relation->relpersistence != RELPERSISTENCE_TEMP && - stmt->relation->relpersistence != RELPERSISTENCE_UNLOGGED) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("It is unsupported to create row/cstore non-temporary/non-unlogged table in hadoop " - "enviroment."))); - } - } - - if (dfsTablespace && (stmt->relation->relpersistence == RELPERSISTENCE_TEMP || - stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("It is unsupported to create unlogged table and temporary table on DFS tablespace."))); - } - - if (dfsTablespace && stmt->subcluster && !in_logic_cluster() && IS_PGXC_COORDINATOR && - stmt->subcluster->clustertype == SUBCLUSTER_GROUP) { - /* For dfs table we are going to block TO-GROUP create table request */ - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("It is unsupported to create table with to group option on DFS tablespace."))); - } - - if ((RELKIND_FOREIGN_TABLE == relKind || RELKIND_STREAM == relKind) - && stmt->subcluster && !in_logic_cluster() && IS_PGXC_COORDINATOR && - stmt->subcluster->clustertype == SUBCLUSTER_GROUP) { - /* For foreign table we are going to block TO-GROUP create table request */ - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("It is unsupported to create foreign table with to group option."))); - } - - if (!isRestoreMode && stmt->subcluster && stmt->subcluster->clustertype == SUBCLUSTER_NODE) { - /* If not in restore mode, we are going to block TO-NODE create table request */ - ereport( - ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("CREATE TABLE ... TO NODE is not yet supported."))); - } -} - -/* - * brief: Check Object to be created. initialize DFS table options. - * check DFS table definition. - * input param @stmt: a CreateStmt struct. - * input param @stmt: the object kind. - * Return : None. - */ -static void checkObjectCreatedinHDFSTblspc(CreateStmt* stmt, char relkind) -{ - /* - * If the object is a logic object, do not initialize and check. - */ - if (HDFS_TBLSPC_SUPPORT_CREATE_LOGIC_OBJECT(relkind)) { - return; - } - - /* - * Check options and add default option for Dfs relation if need. - */ - stmt->options = InitDfsOptions(stmt->options); - - /* - * Validate Dfs table definition. - */ - validateDfsTableDef(stmt, true); -} - -/* - * brief: Whether ORC format relation support the option or not. - * input param @option: the option to be checked. - * Return true if the option is supported by ORC format relation, - * otherwise return false. - */ -static bool OptionSupportedByORCRelation(const char* option) -{ - for (uint32 i = 0; i < sizeof(ORCSupportOption) / sizeof(char*); ++i) { - if (0 == pg_strcasecmp(ORCSupportOption[i], option)) { - return true; - } - } - return false; -} - -/* - * Brief : Initialize and validity check for options. If the user do - * not set option, we assign default value to the option. - * Input : options, the setted table options by user. - * Output : None. - * Return Value : Retrun new options list. - * Notes : 1. The default orientation value is "ORIENTATION_ORC". - * 2. The defualt compression value is "COMPRESSION_NO"; - * 3. The default version value is "ORC_VERSION_012". - */ -static List* InitDfsOptions(List* options) -{ - List* res = options; - ListCell* cell = NULL; - bool isSetFormat = false; - bool hasSetCompression = false; - bool hasSetVersion = false; - - isSetFormat = isOrientationSet(options, NULL, true); - foreach (cell, options) { - DefElem* def = (DefElem*)lfirst(cell); - - if (!OptionSupportedByORCRelation(def->defname)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Unsupport \"%s\" option", def->defname), - errdetail("Valid options are \"%s\", \"%s\", \"%s\".", - ORCSupportOption[0], - ORCSupportOption[1], - ORCSupportOption[2]))); - } - if (pg_strcasecmp(def->defname, "compression") == 0) { - /* - * YES=LOW=SNAPPY+LOW_COMPRESS; MIDDLE=SNAPPY+HIGH_COMPRESS; SNAPPY and LZ4 - * with low compress and ZLIB with HIGH compress; HIGH=ZLIB+HIGH_COMPRESS. - */ - if (pg_strcasecmp(defGetString(def), COMPRESSION_NO) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_YES) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_ZLIB) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_SNAPPY) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_LZ4) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_LOW) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_MIDDLE) != 0 && - pg_strcasecmp(defGetString(def), COMPRESSION_HIGH) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid string for \"COMPRESSION\" option"), - errdetail("Valid string are \"no\", \"yes\", \"low\", \"middle\", \"high\", \"snappy\", " - "\"zlib\", \"lz4\" for dfs table."))); - } - hasSetCompression = true; - } - if (pg_strcasecmp(def->defname, "version") == 0) { - if (pg_strcasecmp(defGetString(def), ORC_VERSION_012) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPTION), - errmsg("Invalid string for \"VERSION\" option"), - errdetail("Valid string is \"0.12\"."))); - } - - hasSetVersion = true; - } - } - - if (!isSetFormat) { - DefElem* def = makeDefElem("orientation", (Node*)makeString(ORIENTATION_ORC)); - res = lappend(res, def); - } - if (!hasSetCompression) { - DefElem* def = NULL; - - def = makeDefElem("compression", (Node*)makeString(COMPRESSION_SNAPPY)); - res = lappend(res, def); - } - if (!hasSetVersion) { - DefElem* def = makeDefElem("version", (Node*)makeString(ORC_VERSION_012)); - res = lappend(res, def); - } - - return res; -} - -/* - * Brief : Validate Dfs table definition. - * Input : stmt, a CreateStmt struct. - * dfsTbl, whethre or not the table is dfs table. - * The dfsTbl is true, the table is dfs table. - * Output : None. - * Return Value : None. - * Notes : None. - */ -static void validateDfsTableDef(CreateStmt* stmt, bool isDfsTbl) -{ - ListCell* optionCell = NULL; - char* optionValue = NULL; - bool cuFormat = false; - if (!isDfsTbl) { - return; - } - - foreach (optionCell, stmt->options) { - DefElem* optionDef = (DefElem*)lfirst(optionCell); - char* optionDefName = optionDef->defname; - - if (pg_strcasecmp(optionDefName, "orientation") == 0) { - optionValue = defGetString(optionDef); - break; - } - } - - /* determine whether it is a CU */ - if ((NULL != optionValue) && (0 == pg_strcasecmp(optionValue, ORIENTATION_COLUMN))) { - cuFormat = true; - } - - /* - * Currently, we only support "Value-Based" partitioning scheme for partitioned - * HDFS table - */ - if (stmt->partTableState) { - /* - * For value partitioned HDFS table we should force RowMovement ON, as we - * will enable it anyway for a table created as columanr(PAX-ORC) and also - * partition case. - */ - stmt->partTableState->rowMovement = ROWMOVEMENT_ENABLE; - - /* Number of partition key check */ - if (list_length(stmt->partTableState->partitionKey) == 0) { - ereport(ERROR, - (errcode(ERRCODE_PARTITION_ERROR), - errmsg("Num of partition keys in value-partitioned table should not be zeror"))); - } else if (list_length(stmt->partTableState->partitionKey) > VALUE_PARTKEYMAXNUM) { - ereport(ERROR, - (errcode(ERRCODE_PARTITION_ERROR), - errmsg("Num of partition keys in value-partitioned table exceeds max allowed num:%d", - RANGE_PARTKEYMAXNUM))); - } - - /* Partition stragegy check */ - if (stmt->partTableState->partitionStrategy != PART_STRATEGY_VALUE) { - ereport(ERROR, - (errcode(ERRCODE_PARTITION_ERROR), - errmsg("Unsupport partition strategy '%s' feature for dfs table.", - GetPartitionStrategyNameByType(stmt->partTableState->partitionStrategy)))); - } - } - - /* - * Currently, support hash/replication distribution for dfs table(not cu format). - * when support other distribution, this code will be deleted. - */ - if (!cuFormat && stmt->distributeby != NULL && stmt->distributeby->disttype != DISTTYPE_HASH && - stmt->distributeby->disttype != DISTTYPE_REPLICATION) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("Only support hash/replication distribution for dfs table."))); - } -} - -static void check_sub_part_tbl_space(Oid ownerId, char* tablespacename, List* subPartitionDefState) -{ - ListCell* subspccell = NULL; - foreach(subspccell, subPartitionDefState) { - RangePartitionDefState* subpartitiondef = (RangePartitionDefState*)lfirst(subspccell); - char* subtablespacename = subpartitiondef->tablespacename; - CheckPartitionTablespace(subtablespacename, ownerId); - } -} - -/* Check tablespace's permissions for partition */ -static void check_part_tbl_space(CreateStmt* stmt, Oid ownerId, bool dfsTablespace) -{ - ListCell* spccell = NULL; - /* check value partition table is created at DFS table space */ - if (stmt->partTableState->partitionStrategy == PART_STRATEGY_VALUE && !dfsTablespace) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Value partitioned table can only be created on DFS tablespace."))); - - foreach (spccell, stmt->partTableState->partitionList) { - if (nodeTag(lfirst(spccell)) == T_RangePartitionDefState) { - RangePartitionDefState* partitiondef = (RangePartitionDefState*)lfirst(spccell); - char* tablespacename = partitiondef->tablespacename; - List* subPartitionDefState = partitiondef->subPartitionDefState; - CheckPartitionTablespace(tablespacename, ownerId); - check_sub_part_tbl_space(ownerId, tablespacename, subPartitionDefState); - } else if (nodeTag(lfirst(spccell)) == T_HashPartitionDefState) { - HashPartitionDefState* partitiondef = (HashPartitionDefState*)lfirst(spccell); - char* tablespacename = partitiondef->tablespacename; - List* subPartitionDefState = partitiondef->subPartitionDefState; - CheckPartitionTablespace(tablespacename, ownerId); - check_sub_part_tbl_space(ownerId, tablespacename, subPartitionDefState); - } else if (nodeTag(lfirst(spccell)) == T_ListPartitionDefState) { - ListPartitionDefState* partitiondef = (ListPartitionDefState*)lfirst(spccell); - char* tablespacename = partitiondef->tablespacename; - List* subPartitionDefState = partitiondef->subPartitionDefState; - CheckPartitionTablespace(tablespacename, ownerId); - check_sub_part_tbl_space(ownerId, tablespacename, subPartitionDefState); - } else { - ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Unknown PartitionDefState"), - errdetail("N/A"), errcause("The partition type is incorrect."), - erraction("Use the correct partition type."))); - break; - } - } -} - -static void alter_orientation(CreateStmt** stmt, bool all_field, bool all_tag, - Datum* reloptions, Node** orientedFrom, char** storeChar) -{ - static const char* const validnsps[] = HEAP_RELOPT_NAMESPACES; - ListCell* cell = NULL; - bool has_compression = false; - - if (all_field) { - foreach (cell, (*stmt)->options) { - DefElem* def = (DefElem*)lfirst(cell); - if (pg_strcasecmp("orientation", def->defname) == 0) { - def->arg = (Node*)makeString(ORIENTATION_COLUMN); - break; - } - } - ereport(NOTICE, - (errcode(ERRCODE_SUCCESSFUL_COMPLETION), - errmsg("'TSTAG' not found. Using '%s' as the orientation.", ORIENTATION_COLUMN), - errhint("Please use both 'TSTAG' and 'TSFIELD' if orientation is '%s'.", ORIENTATION_TIMESERIES))); - *reloptions = transformRelOptions((Datum)0, (*stmt)->options, NULL, validnsps, true, false); - *orientedFrom = (Node*)makeString(ORIENTATION_COLUMN); - *storeChar = ORIENTATION_COLUMN; - } else if (all_tag) { - foreach (cell, (*stmt)->options) { - DefElem* def = (DefElem*)lfirst(cell); - if (pg_strcasecmp("orientation", def->defname) == 0) { - def->arg = (Node*)makeString(ORIENTATION_ROW); - } - if (pg_strcasecmp("compression", def->defname) == 0) { - has_compression = true; - def->arg = (Node*)makeString(COMPRESSION_NO); - } - } - if (!has_compression) { - Node* value = (Node*)makeString(COMPRESSION_NO); - (*stmt)->options = lappend((*stmt)->options, makeDefElem("compression", value)); - } - ereport(NOTICE, - (errcode(ERRCODE_SUCCESSFUL_COMPLETION), - errmsg("'TSFIELD' not found. Using '%s' as the orientation.", ORIENTATION_ROW), - errhint("Please use both 'TSTAG' and 'TSFIELD' if orientation is '%s'.", ORIENTATION_TIMESERIES))); - *reloptions = transformRelOptions((Datum)0, (*stmt)->options, NULL, validnsps, true, false); - *orientedFrom = (Node*)makeString(ORIENTATION_ROW); - *storeChar = ORIENTATION_ROW; - } -} - -/* - * KVType only used in timeseries table. - * Set kvtype to ATT_KV_UNDEFINED when using it in row-oriented or column-oriented table. - */ -static void clear_kvtype_row_column(CreateStmt* stmt) -{ - ListCell* cell = NULL; - foreach (cell, stmt->tableElts) { - ColumnDef* colDef = (ColumnDef*)lfirst(cell); - colDef->kvtype = ATT_KV_UNDEFINED; - } -} - -static bool validate_timeseries(CreateStmt** stmt, Datum* reloptions, char** storeChar, Node** orientedFrom) -{ - bool kvtype_all_tag = true; - bool kvtype_all_field = true; - bool is_timeseries = true; - int kvtype_time_count = 0; - - List *schema = (*stmt)->tableElts; - ListCell *cell = NULL; - - /* check relpersistence, only permanent is supported */ - if ((*stmt)->relation->relpersistence != RELPERSISTENCE_PERMANENT) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unsupported persistency for timeseries table."))); - - /* Currently, only support hash or hidetag distribution for timeseries table.*/ - if ((*stmt)->distributeby != NULL && - ((*stmt)->distributeby->disttype != DISTTYPE_HASH && (*stmt)->distributeby->disttype != DISTTYPE_HIDETAG)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("Only support hash distribution for timeseries table."))); - } - - foreach (cell, schema) { - ColumnDef* colDef = (ColumnDef*)lfirst(cell); - if (colDef->kvtype == ATT_KV_TAG) { - kvtype_all_field = false; - } else if (colDef->kvtype == ATT_KV_FIELD) { - kvtype_all_tag = false; - } else if (colDef->kvtype == ATT_KV_TIMETAG) { - kvtype_time_count++; - } else { - if (colDef->kvtype != ATT_KV_HIDE) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("kvtype of '%s' must be defined when using timeseries.", colDef->colname))); - } - } - } - /* TIMESERIES only allowed one time column */ - if (kvtype_time_count != 1) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("TIMESERIES must have one and only one time column."))); - } - - if (kvtype_all_field || kvtype_all_tag) { - alter_orientation(stmt, kvtype_all_field, kvtype_all_tag, reloptions, orientedFrom, storeChar); - is_timeseries = false; - } - - if (is_timeseries) { -#ifdef ENABLE_MULTIPLE_NODES - Tsdb::CheckTsRelname((*stmt)->relation->relname); -#endif /* ENABLE_MULTIPLE_NODES */ - foreach (cell, schema) { - ColumnDef* colDef = (ColumnDef*)lfirst(cell); - int32 typmod = 0; - HeapTuple typeTuple = typenameType(NULL, colDef->typname, &typmod); - /* set it here to check the type distribution in DefineRelation */ - colDef->typname->typeOid = HeapTupleGetOid(typeTuple); - ReleaseSysCache(typeTuple); - - // check the supported data type and error report if needed. - if (!IsTypeSupportedByTsStore(colDef->kvtype, colDef->typname->typeOid)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("type \"%s\" is not supported in timeseries store", - format_type_with_typemod(colDef->typname->typeOid, typmod)))); - } - } - } - if (is_timeseries && !g_instance.attr.attr_common.enable_tsdb) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Cannot use orientation is timeseries when enable_tsdb is off."))); - } - return is_timeseries; -} - -static void add_partiton(CreateStmt* stmt, StdRdOptions* std_opt) -{ - List* schema = stmt->tableElts; - ListCell* cell = NULL; - int32 typmod = -1; - RangePartitionDefState* part1 = NULL; - RangePartitionDefState* part2 = NULL; - ColumnRef* col_ref = NULL; - PartitionState* part_state = NULL; - Interval* period_interval = NULL; - - if (0 != pg_strcasecmp(TIME_UNDEFINED, StdRdOptionsGetStringData(std_opt, period, TIME_UNDEFINED))) { - period_interval = char_to_interval((char*)StdRdOptionsGetStringData(std_opt, period, TIME_UNDEFINED), typmod); - } else { - period_interval = char_to_interval(TIME_ONE_DAY, typmod); - } - - /* Copy because timestamptz_to_str returns a static buffer */ - char* part1_boundary_cstr = NULL; - char* part2_boundary_cstr = NULL; - TimestampTz current_sharp_time = GetCurrentTimestamp(); - if (INTERVAL_TO_USEC(period_interval) >= USECS_PER_DAY) { - int tz; - struct pg_tm tt, *tm = &tt; - fsec_t fsec; - const char* tzn = NULL; - if (timestamp2tm(current_sharp_time, &tz, tm, &fsec, &tzn, NULL) != 0) { - ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range."))); - } - current_sharp_time = - current_sharp_time - (current_sharp_time % USECS_PER_DAY) + USECS_PER_DAY + (tz * USECS_PER_SEC); - part1_boundary_cstr = pstrdup(timestamptz_to_str(current_sharp_time)); - part2_boundary_cstr = pstrdup(timestamptz_to_str(current_sharp_time + INTERVAL_TO_USEC(period_interval))); - } else { - part1_boundary_cstr = pstrdup(timestamptz_to_str(current_sharp_time + INTERVAL_TO_USEC(period_interval))); - part2_boundary_cstr = pstrdup(timestamptz_to_str(current_sharp_time + 2 * INTERVAL_TO_USEC(period_interval))); - } - Datum constvalue1 = CStringGetDatum(part1_boundary_cstr); - Const* con1 = makeConst(UNKNOWNOID, -1, InvalidOid, -1, constvalue1, false, true); - Datum constvalue2 = CStringGetDatum(part2_boundary_cstr); - Const* con2 = makeConst(UNKNOWNOID, -1, InvalidOid, -1, constvalue2, false, true); - - part1 = makeNode(RangePartitionDefState); - part1->partitionName = "default_part_1"; - part1->boundary = list_make1(con1); - part1->partitionno = 1; - - part2 = makeNode(RangePartitionDefState); - part2->partitionName = "default_part_2"; - part2->boundary = list_make1(con2); - part1->partitionno = 2; - - part_state = makeNode(PartitionState); - part_state->partitionStrategy = 'r'; - part_state->rowMovement = ROWMOVEMENT_DEFAULT; - foreach (cell, schema) { - ColumnDef* col_def = (ColumnDef*)lfirst(cell); - if (col_def->kvtype == ATT_KV_TIMETAG) { - col_ref = makeNode(ColumnRef); - col_ref->fields = list_make1(makeString(col_def->colname)); - col_ref->location = -1; - part_state->partitionKey = list_make1(col_ref); - break; - } - } - part_state->partitionList = list_make2(part1, part2); - stmt->partTableState = part_state; -} - -static void partition_policy_check(CreateStmt* stmt, StdRdOptions* std_opt, bool timeseries_checked) -{ - List* schema = stmt->tableElts; - ListCell* cell = NULL; - - partition_policy_interval_check(std_opt, timeseries_checked); - /* - * For TIMESERIES storage, it is based on the partition table. - * If it is not a partition table, turn relation into partition table by construncting - * regarding partition structure explicitly with 2 partition forward - */ - if (stmt->partTableState == NULL && timeseries_checked) { - add_partiton(stmt, std_opt); - } - - if (stmt->partTableState != NULL) { - List *partitionKey = stmt->partTableState->partitionKey; - ColumnRef *colRef = NULL; - - if (partitionKey->length != 1) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Only support one partition Key."))); - } - colRef = (ColumnRef*) lfirst(list_head(partitionKey)); - Value *val = (Value *) lfirst(list_head(colRef->fields)); - char *key_name = val->val.str; - - foreach(cell, schema) { - ColumnDef *colDef = (ColumnDef*)lfirst(cell); - if (pg_strcasecmp(colDef->colname, key_name) == 0) { - Oid result = InvalidOid; - Type typtup = LookupTypeName(NULL, colDef->typname, NULL); - if (typtup == NULL) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("type \"%s\" does not exist.", - TypeNameToString(colDef->typname)))); - result = typeTypeId(typtup); - ReleaseSysCache(typtup); - if(result != TIMESTAMPOID && result != TIMESTAMPTZOID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg(" Partition Key must be of type TIMESTAMP(TZ) when using ttl or period."))); - if(timeseries_checked && colDef->kvtype != ATT_KV_TIMETAG) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg(" Partition Key must be of kv type TSTAG."))); - } - } - } -} - -static void FetchSliceReftableOid(CreateStmt* stmt, Oid namespaceId) -{ - if (stmt->distributeby == NULL || stmt->distributeby->distState == NULL || - stmt->distributeby->distState->refTableName == NULL) { - return; - } - - char* refName = stmt->distributeby->distState->refTableName; - Oid refOid = get_relname_relid(refName, namespaceId); - if (!OidIsValid(refOid)) { - /* User provided not-null yet invalid base table name */ - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Relation \"%s\" does not exist in current namespace", refName))); - } - stmt->distributeby->referenceoid = refOid; -} - -#ifdef ENABLE_MOT -static void DetermineColumnCollationForMOTTable(Oid *collOid) -{ - AssertArg(collOid != nullptr); - - if (*collOid == DEFAULT_COLLATION_OID) { - *collOid = C_COLLATION_OID; - } else if (*collOid != 0 && *collOid != C_COLLATION_OID && *collOid != POSIX_COLLATION_OID) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("collations other than \"C\" or \"POSIX\" are not supported on MOT tables"))); - } -} -#endif - -static void CheckPartitionKeyForCreateTable(PartitionState *partTableState, List *schema, TupleDesc descriptor) -{ - List *pos = NIL; - bool partkeyIsFunc = false; - - /* get partitionkey's position */ - pos = GetPartitionkeyPos(partTableState->partitionKey, schema, &partkeyIsFunc); - /* check partitionkey's datatype */ - if (partTableState->partitionStrategy == PART_STRATEGY_VALUE) { - CheckValuePartitionKeyType(descriptor->attrs, pos); - } else if (partTableState->partitionStrategy == PART_STRATEGY_INTERVAL) { - CheckIntervalPartitionKeyType(descriptor->attrs, pos); - CheckIntervalValue(descriptor->attrs, pos, partTableState->intervalPartDef); - } else if (partTableState->partitionStrategy == PART_STRATEGY_RANGE) { - CheckRangePartitionKeyType(descriptor->attrs, pos); - } else if (partTableState->partitionStrategy == PART_STRATEGY_LIST) { - CheckListPartitionKeyType(descriptor->attrs, pos); - } else if (partTableState->partitionStrategy == PART_STRATEGY_HASH) { - CheckHashPartitionKeyType(descriptor->attrs, pos); - } else { - list_free_ext(pos); - ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("The partition type is not supported!"), errdetail("N/A"), - errcause("The partition type is incorrect."), erraction("Use the correct partition type."))); - } - - /* - * Check partitionkey's value for none value-partition table as for value - * partition table, partition value is known until data get loaded. - */ - if (partTableState->partitionStrategy != PART_STRATEGY_VALUE && - partTableState->partitionStrategy != PART_STRATEGY_HASH && - partTableState->partitionStrategy != PART_STRATEGY_LIST) - ComparePartitionValue(pos, descriptor->attrs, partTableState->partitionList, true, partkeyIsFunc); - else if (partTableState->partitionStrategy == PART_STRATEGY_LIST) - CompareListValue(pos, descriptor->attrs, partTableState->partitionList, partkeyIsFunc); - - /* charset of partkey columns cannot be different from server_encoding */ - if (DB_IS_CMPT(B_FORMAT)) { - foreach_cell (cell, pos) { - int attidx = lfirst_int(cell); - check_unsupported_charset_for_column( - descriptor->attrs[attidx].attcollation, NameStr(descriptor->attrs[attidx].attname)); - } - } - - list_free_ext(pos); -} - -static List *GetSubPartitionDefList(PartitionState *partTableState, ListCell *cell) -{ - PartitionDefState *partitionDefState = (PartitionDefState *)lfirst(cell); - List *subPartitionList = partitionDefState->subPartitionDefState; - - if (subPartitionList == NIL) { - Const *boundaryDefault = makeNode(Const); - boundaryDefault->ismaxvalue = true; - boundaryDefault->location = -1; - - if (partTableState->partitionStrategy == PART_STRATEGY_RANGE) { - RangePartitionDefState *tmpSubPartitionDefState = makeNode(RangePartitionDefState); - tmpSubPartitionDefState->boundary = list_make1(boundaryDefault); - subPartitionList = lappend(subPartitionList, tmpSubPartitionDefState); - } else if (partTableState->partitionStrategy == PART_STRATEGY_LIST) { - ListPartitionDefState *tmpSubPartitionDefState = makeNode(ListPartitionDefState); - tmpSubPartitionDefState->boundary = list_make1(boundaryDefault); - subPartitionList = lappend(subPartitionList, tmpSubPartitionDefState); - } else if (partTableState->partitionStrategy == PART_STRATEGY_HASH) { - HashPartitionDefState *tmpSubPartitionDefState = makeNode(HashPartitionDefState); - tmpSubPartitionDefState->boundary = list_make1(boundaryDefault); - subPartitionList = lappend(subPartitionList, tmpSubPartitionDefState); - } - - } - - return subPartitionList; -} - -void UpdatePartKeyExpr(Relation rel, PartitionState *partTableState, Oid partOid) -{ - ParseState* pstate = NULL; - RangeTblEntry* rte = NULL; - HeapTuple partTuple = NULL; - pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); - addRTEtoQuery(pstate, rte, true, true, true); - Relation pgPartitionRel = heap_open(PartitionRelationId, RowExclusiveLock); - if (OidIsValid(partOid)) - partTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partOid)); - else - partTuple = searchPgPartitionByParentIdCopy(PART_OBJ_TYPE_PARTED_TABLE, rel->rd_id); - if (!partTuple) - ereport(ERROR,(errcode(ERRCODE_PARTITION_ERROR),errmsg("The partition can't be found"))); - bool isnull = false; - fastgetattr(partTuple, Anum_pg_partition_partkeyexpr, RelationGetDescr(pgPartitionRel), &isnull); - if (isnull) { - if (OidIsValid(partOid)) - ReleaseSysCache(partTuple); - else - heap_freetuple(partTuple); - heap_close(pgPartitionRel, RowExclusiveLock); - return; - } - // Oid* partitionKeyDataType = NULL; - Node* expr = transformExpr(pstate, (Node*)(linitial(partTableState->partitionKey)), EXPR_KIND_OTHER); - assign_expr_collations(pstate, expr); - bool nulls[Natts_pg_partition] = {false}; - bool replaces[Natts_pg_partition] = {false}; - Datum values[Natts_pg_partition] = {0}; - replaces[Anum_pg_partition_partkeyexpr - 1] = true; - char* partkeyexpr = nodeToString(expr); - values[Anum_pg_partition_partkeyexpr - 1] = partkeyexpr ? CStringGetTextDatum(partkeyexpr) : CStringGetTextDatum(""); - HeapTuple new_tuple = heap_modify_tuple(partTuple, RelationGetDescr(pgPartitionRel), values, nulls, replaces); - simple_heap_update(pgPartitionRel, &new_tuple->t_self, new_tuple); - CatalogUpdateIndexes(pgPartitionRel, new_tuple); - if (OidIsValid(partOid)) - ReleaseSysCache(partTuple); - else - heap_freetuple(partTuple); - heap_freetuple_ext(new_tuple); - if (pgPartitionRel) { - heap_close(pgPartitionRel, RowExclusiveLock); - } -} - -/* ---------------------------------------------------------------- - * DefineRelation - * Creates a new relation. - * - * stmt carries parsetree information from an ordinary CREATE TABLE statement. - * The other arguments are used to extend the behavior for other cases: - * relkind: relkind to assign to the new relation - * ownerId: if not InvalidOid, use this as the new relation's owner. - * typaddress: if not null, it's set to the pg_type entry's address. - * - * Note that permissions checks are done against current user regardless of - * ownerId. A nonzero ownerId is used when someone is creating a relation - * "on behalf of" someone else, so we still want to see that the current user - * has permissions to do it. - * - * If successful, returns the address of the new relation. - * ---------------------------------------------------------------- - */ -ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, ObjectAddress* typaddress, bool isCTAS) -{ - char relname[NAMEDATALEN]; - Oid namespaceId; - List* schema = stmt->tableElts; - Oid relationId; - Oid tablespaceId; - Relation rel; - TupleDesc descriptor; - List* inheritOids = NIL; - List* old_constraints = NIL; - bool localHasOids = false; - int parentOidCount; - List* rawDefaults = NIL; - List* cookedDefaults = NIL; - List *ceLst = NIL; - Datum reloptions; - ListCell* listptr = NULL; - AttrNumber attnum; - static const char* const validnsps[] = HEAP_RELOPT_NAMESPACES; - Oid ofTypeId; - Node* orientedFrom = NULL; - char* storeChar = ORIENTATION_ROW; - bool timeseries_checked = false; - bool dfsTablespace = false; - bool isInitdbOnDN = false; - bool isInLedgerNsp = false; - HashBucketInfo* bucketinfo = NULL; - DistributionType distType; - ObjectAddress address; - bool relhasuids = false; - Oid nspdefcoll = InvalidOid; - Oid rel_coll_oid = InvalidOid; - - /* - * isalter is true, change the owner of the objects as the owner of the - * namespace, if the owner of the namespce has the same name as the namescpe - */ - bool isalter = false; - bool hashbucket = false; - int bucketcnt = 0; - StorageType storage_type = HEAP_DISK; - - bool relisshared = u_sess->attr.attr_common.IsInplaceUpgrade && u_sess->upg_cxt.new_catalog_isshared; - errno_t rc; - /* - * Truncate relname to appropriate length (probably a waste of time, as - * parser should have done this already). - */ - rc = strncpy_s(relname, NAMEDATALEN, stmt->relation->relname, NAMEDATALEN - 1); - securec_check(rc, "", ""); - - if (ISMATMAP(relname) || ISMLOG(relname)) { - ereport(WARNING, (errcode(ERRCODE_INVALID_NAME), - errmsg("\"%s\" is not an appropriated name for relation", relname), - errdetail("The kernel may treat it as a %s table of materialized view", - ISMATMAP(relname) ? "map" : "mlog"))); - } - - if (stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED && STMT_RETRY_ENABLED) - stmt->relation->relpersistence = RELPERSISTENCE_PERMANENT; - - /* During grayscale upgrade, forbid creating LIST/RANGE tables if workingVersionNum is too low. */ - if (stmt->distributeby != NULL) { - distType = stmt->distributeby->disttype; - if ((distType == DISTTYPE_RANGE || distType == DISTTYPE_LIST) && - t_thrd.proc->workingVersionNum < RANGE_LIST_DISTRIBUTION_VERSION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg( - "Working Version Num less than %u does not support LIST/RANGE distributed tables.", - RANGE_LIST_DISTRIBUTION_VERSION_NUM))); - } - } - - /* - * Check consistency of arguments - */ - if (stmt->oncommit != ONCOMMIT_NOOP - && !(stmt->relation->relpersistence == RELPERSISTENCE_TEMP - || stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("ON COMMIT can only be used on temporary tables"))); - } - - //@Temp Table. We do not support on commit drop right now. - if ((stmt->relation->relpersistence == RELPERSISTENCE_TEMP - || stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP) - && stmt->oncommit == ONCOMMIT_DROP) { - ereport( - ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg( - "ON COMMIT only support PRESERVE ROWS or DELETE ROWS option"))); - } - - if (stmt->constraints != NIL && relkind == RELKIND_FOREIGN_TABLE) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("constraints on foreign tables are not supported"))); - } - - if (stmt->constraints != NIL && relkind == RELKIND_STREAM) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("constraints on streams are not supported"))); - } - /* - * For foreign table ROUNDROBIN distribution is a built-in support. - */ - if (IsA(stmt, CreateForeignTableStmt) && - (IsSpecifiedFDW(((CreateForeignTableStmt*)stmt)->servername, DIST_FDW) || - IsSpecifiedFDW(((CreateForeignTableStmt*)stmt)->servername, LOG_FDW) || - IsSpecifiedFDW(((CreateForeignTableStmt*)stmt)->servername, GC_FDW)) && - (IS_PGXC_COORDINATOR || (isRestoreMode && stmt->subcluster)) && !stmt->distributeby) { - stmt->distributeby = makeNode(DistributeBy); - stmt->distributeby->disttype = DISTTYPE_ROUNDROBIN; - stmt->distributeby->colname = NULL; - } - /* - * Look up the namespace in which we are supposed to create the relation, - * check we have permission to create there, lock it against concurrent - * drop, and mark stmt->relation as RELPERSISTENCE_TEMP if a temporary - * namespace is selected. - */ - namespaceId = RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, NULL, relkind); - - if (u_sess->attr.attr_sql.enforce_a_behavior) { - /* Identify user ID that will own the table - * - * change the owner of the objects as the owner of the namespace - * if the owner of the namespce has the same name as the namescpe - * note: the object must be of the ordinary table, sequence, view or - * composite type - */ - if (!OidIsValid(ownerId) && (relkind == RELKIND_RELATION || RELKIND_IS_SEQUENCE(relkind) || - relkind == RELKIND_VIEW || relkind == RELKIND_COMPOSITE_TYPE - || relkind == RELKIND_CONTQUERY)) { - bool anyResult = CheckRelationCreateAnyPrivilege(GetUserId(), relkind); - ownerId = GetUserIdFromNspId(namespaceId, false, anyResult); - } - - if (!OidIsValid(ownerId)) - ownerId = GetUserId(); - else if (ownerId != GetUserId()) - isalter = true; - - if (isalter) { - /* Check namespace permissions. */ - AclResult aclresult; - - aclresult = pg_namespace_aclcheck(namespaceId, ownerId, ACL_CREATE); - bool anyResult = false; - if (aclresult != ACLCHECK_OK && !IsSysSchema(namespaceId)) { - anyResult = CheckRelationCreateAnyPrivilege(ownerId, relkind); - } - if (aclresult != ACLCHECK_OK && !anyResult) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); - } - } - /* - * Security check: disallow creating temp tables from security-restricted - * code. This is needed because calling code might not expect untrusted - * tables to appear in pg_temp at the front of its search path. - */ - if ((stmt->relation->relpersistence == RELPERSISTENCE_TEMP - || stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP) - && InSecurityRestrictedOperation()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("cannot create temporary table within security-restricted operation"))); - } - - /* disallow creating table under blockchain schema directly */ - if (namespaceId == PG_BLOCKCHAIN_NAMESPACE) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("cannot create table under blockchain namspace."))); - } - isInLedgerNsp = IsLedgerNameSpace(namespaceId); - nspdefcoll = get_nsp_default_collation(namespaceId); - - /* - * Select tablespace to use. If not specified, use default tablespace - * (which may in turn default to database's default). - */ - if (stmt->tablespacename) { - tablespaceId = get_tablespace_oid(stmt->tablespacename, false); - } else { - tablespaceId = GetDefaultTablespace(stmt->relation->relpersistence); - /* note InvalidOid is OK in this case */ - } - - dfsTablespace = IsSpecifiedTblspc(tablespaceId, FILESYSTEM_HDFS); - if (dfsTablespace) { - FEATURE_NOT_PUBLIC_ERROR("HDFS is not yet supported."); - } - - if (dfsTablespace && is_feature_disabled(DATA_STORAGE_FORMAT)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Unsupport the dfs table in this version."))); - } - - PreCheckCreatedObj(stmt, dfsTablespace, relkind); - - /* Check permissions except when using database's default */ - if (OidIsValid(tablespaceId) && tablespaceId != u_sess->proc_cxt.MyDatabaseTableSpace) { - AclResult aclresult; - - aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, get_tablespace_name(tablespaceId)); - /* view and sequence are not related to tablespace, so no need to check permissions */ - if (isalter && relkind != RELKIND_VIEW && relkind != RELKIND_CONTQUERY && - relkind != RELKIND_SEQUENCE && relkind != RELKIND_LARGE_SEQUENCE) { - aclresult = pg_tablespace_aclcheck(tablespaceId, ownerId, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, get_tablespace_name(tablespaceId)); - } - } - - /* In all cases disallow placing user relations in pg_global */ - if (!relisshared && tablespaceId == GLOBALTABLESPACE_OID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("only shared relations can be placed in pg_global tablespace"))); - - /* Identify user ID that will own the table */ - if (!OidIsValid(ownerId)) - ownerId = GetUserId(); - - /* Add default options for relation if need. */ - if (!dfsTablespace) { - if (!u_sess->attr.attr_common.IsInplaceUpgrade) { - stmt->options = AddDefaultOptionsIfNeed(stmt->options, relkind, stmt, namespaceId); - } - } else { - checkObjectCreatedinHDFSTblspc(stmt, relkind); - } - - /* Only support one partial cluster key for dfs table. */ - if (stmt->clusterKeys && list_length(stmt->clusterKeys) > 1) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("Only support one partial cluster key for dfs/cstore table."))); - } - - /* Check tablespace's permissions for partition */ - if (stmt->partTableState) { - check_part_tbl_space(stmt, ownerId, dfsTablespace); - } - - /* - * Parse and validate reloptions, if any. - */ - /* global temp table */ - OnCommitAction oncommitAction = GttOncommitOption(stmt->options); - if (stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP - && relkind == RELKIND_RELATION) { - if (oncommitAction != ONCOMMIT_NOOP && stmt->oncommit == ONCOMMIT_NOOP) { - stmt->oncommit = oncommitAction; - } else { - if (oncommitAction != ONCOMMIT_NOOP && stmt->oncommit != ONCOMMIT_NOOP) { - stmt->options = RemoveRelOption(stmt->options, "on_commit_delete_rows", NULL); - } - DefElem *opt = makeNode(DefElem); - - opt->type = T_DefElem; - opt->defnamespace = NULL; - opt->defname = "on_commit_delete_rows"; - opt->defaction = DEFELEM_UNSPEC; - - /* use reloptions to remember on commit clause */ - if (stmt->oncommit == ONCOMMIT_DELETE_ROWS) { - opt->arg = reinterpret_cast(makeString("true")); - } else if (stmt->oncommit == ONCOMMIT_PRESERVE_ROWS) { - opt->arg = reinterpret_cast(makeString("false")); - } else if (stmt->oncommit == ONCOMMIT_NOOP) { - opt->arg = reinterpret_cast(makeString("false")); - } else { - elog(ERROR, "global temp table not support on commit drop clause"); - } - stmt->options = lappend(stmt->options, opt); - } - } else if (oncommitAction != ONCOMMIT_NOOP) { - elog(ERROR, "The parameter on_commit_delete_rows is exclusive to the global temp table, which cannot be " - "specified by a regular table"); - } - - /* relation collation is stored using stmt->options. */ - if (DB_IS_CMPT(B_FORMAT) && relkind == RELKIND_RELATION) { - (void)fill_relation_collation(stmt->collate, stmt->charset, &stmt->options, nspdefcoll); - } - - fillTdeRelOptions(stmt->options, relkind); - - reloptions = transformRelOptions((Datum)0, stmt->options, NULL, validnsps, true, false); - - StdRdOptions* std_opt = (StdRdOptions*)heap_reloptions(relkind, reloptions, true); - relhasuids = StdRdOptionsHasUids(std_opt, relkind); - if (relhasuids && t_thrd.proc->workingVersionNum < HASUID_VERSION_NUM) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hasuids is not supported in current version!"))); - } - if (ENABLE_DMS && relhasuids) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hasuids is not supported under Shared Storage."))); - } - if (std_opt != NULL) { - RowTblCheckHashBucketOption(stmt->options, std_opt); - if ((std_opt->segment)) { - if (t_thrd.proc->workingVersionNum < SEGMENT_PAGE_VERSION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hash bucket table not supported in current version!"))); - } - if (!XLogIsNeeded()) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmodule(MOD_SEGMENT_PAGE), - errmsg("segment-page storage requires wal-logging."), - errdetail("wal_level must >= WAL_LEVEL_ARCHIVE"))); - } - } - rel_coll_oid = std_opt->collate; - hashbucket = std_opt->hashbucket; - bucketcnt = std_opt->bucketcnt; - storage_type = (std_opt->segment == true) ? SEGMENT_PAGE : HEAP_DISK; - - if (pg_strcasecmp(ORIENTATION_COLUMN, StdRdOptionsGetStringData(std_opt, orientation, ORIENTATION_ROW)) == 0) { - orientedFrom = (Node*)makeString(ORIENTATION_COLUMN); - storeChar = ORIENTATION_COLUMN; - } else if (pg_strcasecmp(ORIENTATION_ORC, - StdRdOptionsGetStringData(std_opt, orientation, ORIENTATION_ROW)) == 0) { - /* - * Don't allow "create DFS table" to run inside a transaction block. - * - * "DfsDDLIsTopLevelXact" is set in "case T_CreateStmt" of - * standard_ProcessUtility() - * - * exception: allow "CREATE DFS TABLE" operation in transaction block - * during redis a table. - */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && u_sess->attr.attr_sql.enable_cluster_resize == false) - PreventTransactionChain(u_sess->exec_cxt.DfsDDLIsTopLevelXact, "CREATE DFS TABLE"); - - orientedFrom = (Node*)makeString(ORIENTATION_ORC); - storeChar = ORIENTATION_COLUMN; - } else if(0 == pg_strcasecmp(ORIENTATION_TIMESERIES, - StdRdOptionsGetStringData(std_opt, orientation, ORIENTATION_ROW))) { - orientedFrom = (Node *)makeString(ORIENTATION_TIMESERIES); - storeChar = ORIENTATION_TIMESERIES; - /* for ts table redistribute, timeseries table redis_ is reserved */ - if (!u_sess->attr.attr_sql.enable_cluster_resize) { - if (strncmp(relname, "redis_", 6) == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("Invalid table name prefix redis_, reserved in redis mode."))); - } - } - /* - * Check the kvtype parameter legality for timeseries storage method. - * If all the kvtype exclude tstime are same, change the orientation to row or column explicitly. - */ - timeseries_checked = validate_timeseries(&stmt, &reloptions, &storeChar, &orientedFrom); - std_opt = (StdRdOptions*)heap_reloptions(relkind, reloptions, true); - } else if (0 == pg_strcasecmp(TABLE_ACCESS_METHOD_USTORE, - StdRdOptionsGetStringData(std_opt, storage_type, TABLE_ACCESS_METHOD_ASTORE))) { - if ((t_thrd.proc->workingVersionNum < USTORE_VERSION && u_sess->attr.attr_common.upgrade_mode != 0) -#ifdef ENABLE_MULTIPLE_NODES - || true -#endif - ) { - ereport(ERROR, (errmsg("Ustore table creation is not supported."))); - } - if (!g_instance.attr.attr_storage.enable_ustore) { - ereport(ERROR, (errmsg("Ustore is disabled, please set enable_ustore=on."))); - - } - if (isInLedgerNsp) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Ustore table is not supported ledger user table."))); - } - orientedFrom = (Node *)makeString(TABLE_ACCESS_METHOD_USTORE); - storeChar = TABLE_ACCESS_METHOD_USTORE; - } - - // Set kvtype to ATT_KV_UNDEFINED in row-oriented or column-oriented table. - if (0 != pg_strcasecmp(storeChar, ORIENTATION_TIMESERIES)) { - clear_kvtype_row_column(stmt); - } - - /* - * Because we also support create partition policy for non timeseries table, we should check parameter - * ttl and period if it contains - */ - if (timeseries_checked || - 0 != pg_strcasecmp(TIME_UNDEFINED, StdRdOptionsGetStringData(std_opt, ttl, TIME_UNDEFINED)) || - 0 != pg_strcasecmp(TIME_UNDEFINED, StdRdOptionsGetStringData(std_opt, period, TIME_UNDEFINED))) { - partition_policy_check(stmt, std_opt, timeseries_checked); - if (stmt->partTableState != NULL) { - check_part_tbl_space(stmt, ownerId, dfsTablespace); - checkPartitionSynax(stmt); - } - } - - if (IS_SINGLE_NODE && stmt->partTableState != NULL) { - if (stmt->partTableState->rowMovement != ROWMOVEMENT_DISABLE) - stmt->partTableState->rowMovement = ROWMOVEMENT_ENABLE; - } - - if (0 == pg_strcasecmp(storeChar, ORIENTATION_COLUMN)) { - CheckCStoreUnsupportedFeature(stmt); - CheckCStoreRelOption(std_opt); - ForbidToSetOptionsForColTbl(stmt->options); - if (stmt->partTableState) { - if (stmt->partTableState->rowMovement == ROWMOVEMENT_DISABLE) { - ereport(NOTICE, - (errmsg("disable row movement is invalid for column stored tables." - " They always enable row movement between partitions."))); - } - /* always enable rowmovement for column stored tables */ - stmt->partTableState->rowMovement = ROWMOVEMENT_ENABLE; - } - } else if (0 == pg_strcasecmp(storeChar, ORIENTATION_TIMESERIES)) { - /* check both support coloumn store and row store */ - CheckCStoreUnsupportedFeature(stmt); - CheckCStoreRelOption(std_opt); - if (stmt->partTableState) { - if (stmt->partTableState->rowMovement == ROWMOVEMENT_DISABLE) - ereport(NOTICE, - (errmsg("disable row movement is invalid for timeseries stored tables." - " They always enable row movement between partitions."))); - /* always enable rowmovement for column stored tables */ - stmt->partTableState->rowMovement = ROWMOVEMENT_ENABLE; - } - if (relkind == RELKIND_RELATION) { - /* only care heap relation. ignore foreign table and index relation */ - forbid_to_set_options_for_timeseries_tbl(stmt->options); - } - - /* construct distribute keys using tstag if not specified */ - if (stmt->distributeby == NULL) { - ListCell* cell = NULL; - DistributeBy* newnode = makeNode(DistributeBy); - List* colnames = NIL; - newnode->disttype = DISTTYPE_HASH; - - foreach (cell, schema) { - ColumnDef* colDef = (ColumnDef*)lfirst(cell); - if (colDef->kvtype == ATT_KV_TAG && IsTypeDistributable(colDef->typname->typeOid)) { - colnames = lappend(colnames, makeString(colDef->colname)); - } - } - if (list_length(colnames) == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("No column can be used as distribution column."))); - } - newnode->colname = colnames; - stmt->distributeby = newnode; - /* if specified hidetag, add a hidden column as distribution column */ - } else if (stmt->distributeby->disttype == DISTTYPE_HIDETAG && - stmt->distributeby->colname == NULL) { - bool has_distcol = false; - ListCell* cell; - foreach (cell, schema) { - ColumnDef* colDef = (ColumnDef*)lfirst(cell); - if (colDef->kvtype == ATT_KV_TAG && IsTypeDistributable(colDef->typname->typeOid)) { - has_distcol = true; - } - } - if (!has_distcol) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("No column can be used as distribution column."))); - } - ColumnDef* colDef = makeColumnDef(TS_PSEUDO_DIST_COLUMN, "char"); - colDef->kvtype = ATT_KV_HIDE; - stmt->tableElts = lappend(stmt->tableElts, colDef); - /* still use hash logic later */ - DistributeBy* distnode = stmt->distributeby; - distnode->disttype = DISTTYPE_HASH; - - distnode->colname = lappend(distnode->colname, makeString(colDef->colname)); - ereport(LOG, (errmodule(MOD_TIMESERIES), errmsg("use implicit distribution column method."))); - } - } else if (pg_strcasecmp(storeChar, TABLE_ACCESS_METHOD_USTORE) == 0) { - if (stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UStore tables do not support global temp table"))); - } - auto compression = StdRdOptionsGetStringData(std_opt, compression, COMPRESSION_NO); - auto orientation = StdRdOptionsGetStringData(std_opt, orientation, ORIENTATION_ROW); - if ((pg_strcasecmp(COMPRESSION_NO, compression) != 0 && - pg_strcasecmp(ORIENTATION_COLUMN, orientation) == 0) || - IsCompressedByCmprsInPgclass((RelCompressType)stmt->row_compress)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UStore tables do not support compression."))); - } - if (g_instance.attr.attr_storage.recovery_parse_workers > 1) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UStore tables do not support extreme rto."))); - } - ForbidToSetOptionsForRowTbl(stmt->options); - ForbidToSetOptionsForUstoreTbl(stmt->options); - } else { - if (relkind == RELKIND_RELATION) { - /* only care heap relation. ignore foreign table and index relation */ - ForbidToSetOptionsForRowTbl(stmt->options); - } - } - - if (pg_strcasecmp(storeChar, TABLE_ACCESS_METHOD_USTORE) != 0){ - /* init_td option is valid only when an Ustore table is created */ - ForbidToSetOptionsForNotUstoreTbl(stmt->options); - } - pfree_ext(std_opt); - } - - if (pg_strcasecmp(storeChar, ORIENTATION_COLUMN) == 0) { - ListCell *cell; - foreach (cell, stmt->options) { - DefElem *def = (DefElem *)lfirst(cell); - if (pg_strcasecmp(def->defname, "storage_type") == 0) { - char *cprType = NULL; - if (nodeTag(def->arg) == T_String) { - cprType = strVal(def->arg); - } else if (nodeTag(def->arg) == T_TypeName) { - cprType = TypeNameToString((TypeName*)def->arg); - } else { - Assert(false); - } - if (pg_strcasecmp(cprType, "ustore") == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("orientation=column and storage_type=ustore can not be specified simultaneously"))); - } - } - } - } - - if (orientedFrom == NULL) { - /* default is ORIENTATION_ROW */ - orientedFrom = (Node *)makeString(ORIENTATION_ROW); - } - - if (pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) { - RowTblCheckCompressionOption(stmt->options, stmt->row_compress); - } else if (relhasuids == true) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("hasuids is only supported for row table"))); - } - - if (stmt->ofTypename) { - AclResult aclresult; - - ofTypeId = typenameTypeId(NULL, stmt->ofTypename); - - aclresult = pg_type_aclcheck(ofTypeId, GetUserId(), ACL_USAGE); - if (aclresult != ACLCHECK_OK) - aclcheck_error_type(aclresult, ofTypeId); - if (isalter) { - ofTypeId = typenameTypeId(NULL, stmt->ofTypename); - - aclresult = pg_type_aclcheck(ofTypeId, ownerId, ACL_USAGE); - if (aclresult != ACLCHECK_OK) - aclcheck_error_type(aclresult, ofTypeId); - } - } else - ofTypeId = InvalidOid; - - /* - * Look up inheritance ancestors and generate relation schema, including - * inherited attributes. - */ - schema = MergeAttributes( - schema, stmt->inhRelations, stmt->relation->relpersistence, &inheritOids, &old_constraints, &parentOidCount); - - if (isInLedgerNsp && relkind == RELKIND_RELATION && stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT && - pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) { - check_ledger_attrs_support(schema); - } - /* - * Create a tuple descriptor from the relation schema. Note that this - * deals with column names, types, and NOT NULL constraints, but not - * default values or CHECK constraints; we handle those below. - */ - if (relkind == RELKIND_COMPOSITE_TYPE) - descriptor = BuildDescForRelation(schema, orientedFrom, relkind); - else - descriptor = BuildDescForRelation(schema, orientedFrom, '\0', rel_coll_oid); - - /* Must specify at least one column when creating a table. */ - if (descriptor->natts == 0 && relkind != RELKIND_COMPOSITE_TYPE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("must have at least one column"))); - } - - /* check column charset */ - if (DB_IS_CMPT(B_FORMAT) && - (0 == pg_strcasecmp(storeChar, ORIENTATION_COLUMN) || 0 == pg_strcasecmp(storeChar, ORIENTATION_TIMESERIES))) { - for (int attidx = 0; attidx < descriptor->natts; attidx++) { - check_unsupported_charset_for_column( - descriptor->attrs[attidx].attcollation, NameStr(descriptor->attrs[attidx].attname)); - } - } - - if (stmt->partTableState) { - CheckPartitionKeyForCreateTable(stmt->partTableState, schema, descriptor); - if (stmt->partTableState->subPartitionState != NULL) { - ListCell *cell = NULL; - Assert(list_length(stmt->partTableState->partitionKey) == 1); - Assert(list_length(stmt->partTableState->subPartitionState->partitionKey) == 1); - - foreach (cell, stmt->partTableState->partitionList) { - stmt->partTableState->subPartitionState->partitionList = - GetSubPartitionDefList(stmt->partTableState, cell); - CheckPartitionKeyForCreateTable(stmt->partTableState->subPartitionState, schema, descriptor); - } - stmt->partTableState->subPartitionState->partitionList = NIL; - - if (hashbucket) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("The subpartition table do not support hashbucket."), - errcause("The function is not implemented."), - erraction("Do not set up hashbucket.")))); - } - } - } - - localHasOids = interpretOidsOption(stmt->options); - descriptor->tdhasoid = (localHasOids || parentOidCount > 0); - - if ((pg_strcasecmp(storeChar, ORIENTATION_COLUMN) == 0 || - pg_strcasecmp(storeChar, ORIENTATION_TIMESERIES) == 0 || - relhasuids) && localHasOids) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Local OID column not supported in column/timeseries/hasuids store tables."))); - } - - bool is_gc_fdw = false; - if (!isRestoreMode && IsA(stmt, CreateForeignTableStmt) && - (IsSpecifiedFDW(((CreateForeignTableStmt*)stmt)->servername, GC_FDW))) { - is_gc_fdw = true; - } - - /* - * Find columns with default values and prepare for insertion of the - * defaults. Pre-cooked (that is, inherited) defaults go into a list of - * CookedConstraint structs that we'll pass to heap_create_with_catalog, - * while raw defaults go into a list of RawColumnDefault structs that will - * be processed by AddRelationNewConstraints. (We can't deal with raw - * expressions until we can do transformExpr.) - * - * We can set the atthasdef flags now in the tuple descriptor; this just - * saves StoreAttrDefault from having to do an immediate update of the - * pg_attribute rows. - */ - rawDefaults = NIL; - cookedDefaults = NIL; - attnum = 0; - -#ifdef ENABLE_MOT - bool isMot = - (relkind == RELKIND_FOREIGN_TABLE) ? isMOTTableFromSrvName(((CreateForeignTableStmt*)stmt)->servername) : false; -#endif - - foreach (listptr, schema) { - ColumnDef* colDef = (ColumnDef*)lfirst(listptr); - - attnum++; - -#ifdef ENABLE_MOT - if (isMot) { - DetermineColumnCollationForMOTTable(&descriptor->attrs[attnum - 1].attcollation); - } -#endif - - if (is_gc_fdw) { - if (colDef->constraints != NULL || colDef->is_not_null == true) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("column constraint on openGauss foreign tables are not supported"))); - } - - Type ctype = typenameType(NULL, colDef->typname, NULL); - - if (ctype) { - Form_pg_type typtup = (Form_pg_type)GETSTRUCT(ctype); - if (typtup->typrelid > 0) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relation type column on openGauss foreign tables are not supported"))); - } - - ReleaseSysCache(ctype); - } - } - - if ((colDef->raw_default != NULL || colDef->update_default != NULL) && colDef->cooked_default == NULL) { - RawColumnDefault* rawEnt = NULL; - - if (relkind == RELKIND_FOREIGN_TABLE) { - if (!(IsA(stmt, CreateForeignTableStmt) && ( -#ifdef ENABLE_MOT - isMOTTableFromSrvName(((CreateForeignTableStmt*)stmt)->servername) || -#endif - isPostgresFDWFromSrvName(((CreateForeignTableStmt*)stmt)->servername)))) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s on foreign tables are not supported", - colDef->generatedCol ? "generated column" : "default values"))); -#ifdef ENABLE_MOT - if (IsA(stmt, CreateForeignTableStmt) && - isMOTTableFromSrvName(((CreateForeignTableStmt *)stmt)->servername) && colDef->generatedCol) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("generated column on foreign tables are not supported"))); - } -#endif - } - - if (relkind == RELKIND_STREAM) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s on streams are not supported", - colDef->generatedCol ? "generated column" : "default values"))); - } - - Assert(colDef->cooked_default == NULL); - - rawEnt = (RawColumnDefault*)palloc(sizeof(RawColumnDefault)); - rawEnt->attnum = attnum; - rawEnt->raw_default = colDef->raw_default; - rawEnt->generatedCol = colDef->generatedCol; - rawEnt->update_expr = colDef->update_default; - rawDefaults = lappend(rawDefaults, rawEnt); - descriptor->attrs[attnum - 1].atthasdef = true; - } else if (colDef->cooked_default != NULL || colDef->update_default != NULL) { - CookedConstraint* cooked = NULL; - - cooked = (CookedConstraint*)palloc(sizeof(CookedConstraint)); - - if (colDef->generatedCol) - cooked->contype = CONSTR_GENERATED; - else - cooked->contype = CONSTR_DEFAULT; - cooked->conoid = InvalidOid; /* until created */ - cooked->name = NULL; - cooked->attnum = attnum; - cooked->expr = colDef->cooked_default; - cooked->skip_validation = false; - cooked->is_local = true; /* not used for defaults */ - cooked->inhcount = 0; /* ditto */ - cooked->is_no_inherit = false; - cooked->update_expr = colDef->update_default; - cookedDefaults = lappend(cookedDefaults, cooked); - descriptor->attrs[attnum - 1].atthasdef = true; - } - if (colDef->clientLogicColumnRef != NULL) { - CeHeapInfo *ceHeapInfo = NULL; - ceHeapInfo = (CeHeapInfo*) palloc(sizeof(CeHeapInfo)); - ceHeapInfo->attnum = attnum; - process_encrypted_columns(colDef, ceHeapInfo); - ceLst = lappend (ceLst, ceHeapInfo); - } - } - - - /*Get hash partition key based on relation distribution info*/ - - bool createbucket = false; - bool isbucket = false; - /* restore mode */ - if (isRestoreMode) { - /* table need hash partition */ - if (hashbucket == true) { - /* here is dn */ - if (u_sess->storage_cxt.dumpHashbucketIds != NULL) { - Assert(stmt->distributeby == NULL); - createbucket = true; - } else { - if (unlikely(stmt->distributeby == NULL)) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("distributeby is NULL."))); - } - } - - bucketinfo = GetRelationBucketInfo(stmt->distributeby, descriptor, &createbucket, true); - isbucket = true; - Assert((createbucket == true && bucketinfo->bucketlist != NULL && bucketinfo->bucketcol != NULL) || - (createbucket == false && bucketinfo->bucketlist == NULL && bucketinfo->bucketcol != NULL)); - } - } else { - /* here is normal mode */ - /* check if the table can be hash partition */ - if (!IS_SINGLE_NODE && !IsInitdb && (relkind == RELKIND_RELATION) && !IsSystemNamespace(namespaceId) && - !IsCStoreNamespace(namespaceId) && (0 == pg_strcasecmp(storeChar, ORIENTATION_ROW)) && - (stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT)) { - if (hashbucket == true || u_sess->attr.attr_storage.enable_hashbucket) { - if (IS_PGXC_DATANODE) { - createbucket = true; - } - bucketinfo = GetRelationBucketInfo(stmt->distributeby, descriptor, &createbucket, hashbucket); - isbucket = true; - Assert((bucketinfo == NULL && u_sess->attr.attr_storage.enable_hashbucket) || - (createbucket == true && bucketinfo->bucketlist != NULL && bucketinfo->bucketcol != NULL) || - (createbucket == false && bucketinfo->bucketlist == NULL && bucketinfo->bucketcol != NULL)); - } - } else if (hashbucket == true) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("The table %s do not support hash bucket", stmt->relation->relname))); - } - } - - if (!IsInitdb && (relkind == RELKIND_RELATION) && !IsSystemNamespace(namespaceId) && - !IsCStoreNamespace(namespaceId) && (pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) && - (stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT) && !u_sess->attr.attr_storage.enable_recyclebin) { - if (u_sess->attr.attr_storage.enable_segment || bucketinfo != NULL) { - storage_type = SEGMENT_PAGE; - } - } else if (storage_type == SEGMENT_PAGE) { - if (u_sess->attr.attr_storage.enable_recyclebin) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_SEGMENT_PAGE), - errmsg("The table %s do not support segment-page storage", stmt->relation->relname), - errdetail("Segment-page storage doest not support recyclebin"), - errhint("set enable_recyclebin = off before using segmnet-page storage."))); - } - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("The table %s do not support segment storage", stmt->relation->relname))); - } - - if (storage_type == SEGMENT_PAGE) { - Oid tbspcId = (tablespaceId == InvalidOid) ? u_sess->proc_cxt.MyDatabaseTableSpace : tablespaceId; - uint64 tablespaceMaxSize = 0; - bool isLimit = TableSpaceUsageManager::IsLimited(tbspcId, &tablespaceMaxSize); - if (isLimit) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_SEGMENT_PAGE), - errmsg("The table %s do not support segment-page storage", stmt->relation->relname), - errdetail("Segment-page storage doest not support limited tablespace \"%s\"", get_tablespace_name(tbspcId)), - errhint("use default or unlimited user defined tablespace before using segment-page storage."))); - } - } - - if (ENABLE_DMS && !u_sess->attr.attr_common.IsInplaceUpgrade) { - if ((relkind == RELKIND_RELATION && storage_type != SEGMENT_PAGE) || - relkind == RELKIND_MATVIEW || - pg_strcasecmp(storeChar, ORIENTATION_ROW) != 0 || - relkind == RELKIND_FOREIGN_TABLE || - stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED || - stmt->relation->relpersistence == RELPERSISTENCE_TEMP || - stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP || - pg_strcasecmp(COMPRESSION_NO, StdRdOptionsGetStringData(std_opt, compression, COMPRESSION_NO)) != 0 || - IsCompressedByCmprsInPgclass((RelCompressType)stmt->row_compress)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Only support segment storage type and ASTORE while DMS and DSS enabled.\n" - "Foreign table, matview, temp table or unlogged table is not supported.\nCompression is not " - "supported."))); - } - } - - /* - * Create the relation. Inherited defaults and constraints are passed in - * for immediate handling --- since they don't need parsing, they can be - * stored immediately. - */ - relationId = heap_create_with_catalog(relname, - namespaceId, - tablespaceId, - InvalidOid, - InvalidOid, - ofTypeId, - ownerId, - descriptor, - list_concat(cookedDefaults, old_constraints), - relkind, - stmt->relation->relpersistence, - relisshared, - relisshared, - localHasOids, - parentOidCount, - stmt->oncommit, - reloptions, - true, - (g_instance.attr.attr_common.allowSystemTableMods || u_sess->attr.attr_common.IsInplaceUpgrade), - stmt->partTableState, - stmt->row_compress, - bucketinfo, - true, - ceLst, - storage_type, - AccessShareLock, - typaddress); - if (bucketinfo != NULL) { - pfree_ext(bucketinfo->bucketcol); - pfree_ext(bucketinfo->bucketlist); - pfree_ext(bucketinfo); - } - - /* Store inheritance information for new rel. */ - StoreCatalogInheritance(relationId, inheritOids); - - if (relhasuids) { - InsertUidEntry(relationId); - } - - /* - * We must bump the command counter to make the newly-created relation - * tuple visible for opening. - */ - CommandCounterIncrement(); - -#ifdef PGXC - /* - * Add to pgxc_class. - * we need to do this after CommandCounterIncrement - * Distribution info is to be added under the following conditions: - * 1. The create table command is being run on a coordinator - * 2. The create table command is being run in restore mode and - * the statement contains distribute by clause. - * While adding a new datanode to the cluster an existing dump - * that was taken from a datanode is used, and - * While adding a new coordinator to the cluster an exiting dump - * that was taken from a coordinator is used. - * The dump taken from a datanode does NOT contain any DISTRIBUTE BY - * clause. This fact is used here to make sure that when the - * DISTRIBUTE BY clause is missing in the statemnet the system - * should not try to find out the node list itself. - * 3. When the sum of shmemNumDataNodes and shmemNumCoords equals to one, - * the create table command is executed on datanode.In this case, we - * do not write created table info in pgxc_class. - */ - if ((*t_thrd.pgxc_cxt.shmemNumDataNodes + *t_thrd.pgxc_cxt.shmemNumCoords) == 1) - isInitdbOnDN = true; - - if ((!u_sess->attr.attr_common.IsInplaceUpgrade || !IsSystemNamespace(namespaceId)) && - (IS_PGXC_COORDINATOR || (isRestoreMode && stmt->distributeby != NULL && !isInitdbOnDN)) && - (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW || - (relkind == RELKIND_STREAM && stmt->distributeby != NULL) || -#ifdef ENABLE_MOT - (relkind == RELKIND_FOREIGN_TABLE && (stmt->distributeby != NULL || - (IsA(stmt, CreateForeignTableStmt) && - isMOTTableFromSrvName(((CreateForeignTableStmt*)stmt)->servername)))))) { -#else - (relkind == RELKIND_FOREIGN_TABLE && stmt->distributeby != NULL))) { -#endif - char* logic_cluster_name = NULL; - PGXCSubCluster* subcluster = stmt->subcluster; - bool isinstallationgroup = (dfsTablespace || relkind == RELKIND_FOREIGN_TABLE - || relkind == RELKIND_STREAM); - if (in_logic_cluster()) { - isinstallationgroup = false; - if (subcluster == NULL) { - logic_cluster_name = PgxcGroupGetCurrentLogicCluster(); - if (logic_cluster_name != NULL) { - subcluster = makeNode(PGXCSubCluster); - subcluster->clustertype = SUBCLUSTER_GROUP; - subcluster->members = list_make1(makeString(logic_cluster_name)); - } - } - } - - /* assemble referenceoid for slice reference table creation */ - FetchSliceReftableOid(stmt, namespaceId); - - AddRelationDistribution(relname, relationId, stmt->distributeby, subcluster, - inheritOids, descriptor, isinstallationgroup, isbucket, bucketcnt); - - if (logic_cluster_name != NULL && subcluster != NULL) { - list_free_deep(subcluster->members); - pfree_ext(subcluster); - pfree_ext(logic_cluster_name); - } - - CommandCounterIncrement(); - /* Make sure locator info gets rebuilt */ - RelationCacheInvalidateEntry(relationId); - } - /* If no Datanodes defined, do not create foreign table */ - if (IS_PGXC_COORDINATOR && (relkind == RELKIND_FOREIGN_TABLE || relkind == RELKIND_STREAM) - && u_sess->pgxc_cxt.NumDataNodes == 0) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("No Datanode defined in cluster"))); - } -#endif - /* - * Open the new relation and acquire exclusive lock on it. This isn't - * really necessary for locking out other backends (since they can't see - * the new rel anyway until we commit), but it keeps the lock manager from - * complaining about deadlock risks. - */ - rel = relation_open(relationId, AccessExclusiveLock); - - ereport(DEBUG1, (errmsg("Define relation <%s.%s>, reloid: %u, relfilenode: %u", stmt->relation->schemaname, - stmt->relation->relname, relationId, rel->rd_node.relNode))); - - if (stmt->partTableState) { - UpdatePartKeyExpr(rel, stmt->partTableState, InvalidOid); - if (stmt->partTableState->subPartitionState) { - List* partitionList = relationGetPartitionList(rel, NoLock); - ListCell* cell = NULL; - foreach (cell, partitionList) { - Partition partition = (Partition)(lfirst(cell)); - UpdatePartKeyExpr(rel, stmt->partTableState->subPartitionState, partition->pd_id); - } - releasePartitionList(rel, &partitionList, NoLock); - } - CommandCounterIncrement(); - } - /* - * Now add any newly specified column default and generation expressions - * to the new relation. These are passed to us in the form of raw - * parsetrees; we need to transform them to executable expression trees - * before they can be added. The most convenient way to do that is to - * apply the parser's transformExpr routine, but transformExpr doesn't - * work unless we have a pre-existing relation. So, the transformation has - * to be postponed to this final step of CREATE TABLE. - */ - if (rawDefaults != NULL || stmt->constraints != NULL) { - List *tmp = AddRelationNewConstraints(rel, rawDefaults, stmt->constraints, true, true); - list_free_ext(tmp); - } - - /* - * Now add any cluter key constraint for relation if has. - */ - if (stmt->clusterKeys) - AddRelClusterConstraints(rel, stmt->clusterKeys); - - ObjectAddressSet(address, RelationRelationId, relationId); - /* - * Clean up. We keep lock on new relation (although it shouldn't be - * visible to anyone else anyway, until commit). - */ - relation_close(rel, NoLock); - list_free_ext(rawDefaults); - list_free_ext(ceLst); - - return address; -} - -/* - * Emit the right error or warning message for a "DROP" command issued on a - * non-existent relation - */ -static void DropErrorMsgNonExistent(const char* relname, char rightkind, bool missing_ok) -{ - const struct dropmsgstrings* rentry = NULL; - - for (rentry = dropmsgstringarray; rentry->kind != '\0'; rentry++) { - if (rentry->kind == rightkind) { - if (!missing_ok) { - ereport(ERROR, (errcode(rentry->nonexistent_code), errmsg(rentry->nonexistent_msg, relname))); - } else { - ereport(NOTICE, (errmsg(rentry->skipping_msg, relname))); - break; - } - } - } - - Assert(rentry->kind != '\0'); /* Should be impossible */ -} - -static void does_not_exist_skipping_ParallelDDLMode(ObjectType objtype, List* objname, List* objargs, bool missing_ok) -{ - char* msg = NULL; - char* name = NULL; - char* args = NULL; - - StringInfo message = makeStringInfo(); - - switch (objtype) { - case OBJECT_SCHEMA: - msg = gettext_noop("schema \"%s\" does not exist"); - name = NameListToString(objname); - break; - case OBJECT_FUNCTION: - msg = gettext_noop("function %s(%s) does not exist"); - name = NameListToString(objname); - args = TypeNameListToString(objargs); - break; - default: { - pfree_ext(message->data); - pfree_ext(message); - - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("unexpected object type (%d)", (int)objtype))); - } break; - } - - if (missing_ok) { - if (args == NULL) { - appendStringInfo(message, msg, name); - } else { - appendStringInfo(message, msg, name, args); - } - - appendStringInfo(message, ", skipping"); - - ereport(NOTICE, (errmsg("%s", message->data))); - - pfree_ext(message->data); - pfree_ext(message); - } else { - pfree_ext(message->data); - pfree_ext(message); - - if (args == NULL) - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg(msg, name))); - else - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg(msg, name, args))); - } -} - -static List *TryGetTypeNeedDrop(Relation rel) -{ - List *typlist = NULL; - - if (rel == NULL || rel->rd_att == NULL || rel->rd_rel->relkind != RELKIND_RELATION) { - return NULL; - } - - TupleDesc tupDesc = rel->rd_att; - for (int i = 0; i < tupDesc->natts; i++) { - if (!tupDesc->attrs[i].attisdropped && type_is_set(tupDesc->attrs[i].atttypid)) { - typlist = lappend(typlist, &tupDesc->attrs[i].atttypid); - } - } - - return typlist; -} - -/* - * Emit the right error message for a "DROP" command issued on a - * relation of the wrong type - */ -static void DropErrorMsgWrongType(const char* relname, char wrongkind, char rightkind) -{ - const struct dropmsgstrings* rentry = NULL; - const struct dropmsgstrings* wentry = NULL; - - for (rentry = dropmsgstringarray; rentry->kind != '\0'; rentry++) { - if (rentry->kind == rightkind) { - break; - } - } - Assert(rentry->kind != '\0'); - - for (wentry = dropmsgstringarray; wentry->kind != '\0'; wentry++) { - if (wentry->kind == wrongkind) { - break; - } - } - /* wrongkind could be something we don't have in our table... */ - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg(rentry->nota_msg, relname), - (wentry->kind != '\0') ? errhint("%s", _(wentry->drophint_msg)) : 0)); -} - -/* - * PreCheckforRemoveRelation - * Check before implementing DROP TABLE, DROP INDEX, DROP SEQUENCE, DROP VIEW, - * DROP FOREIGN TABLE, DROP MATERIALIZED VIEW to exclude objects which do not exist. - */ -ObjectAddresses* PreCheckforRemoveRelation(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExecType* exec_type) -{ - ObjectAddresses* objects = NULL; - char relkind; - const char* relkind_s = NULL; - ListCell* cell = NULL; - LOCKMODE lockmode = AccessExclusiveLock; - bool cn_miss_relation = false; - uint32 flags = 0; - StringInfo relation_namelist = makeStringInfo(); - - /* DROP CONCURRENTLY uses a weaker lock, and has some restrictions */ - if (drop->concurrent) { - flags |= PERFORM_DELETION_CONCURRENTLY; - lockmode = ShareUpdateExclusiveLock; - Assert(drop->removeType == OBJECT_INDEX); - if (list_length(drop->objects) != 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DROP INDEX CONCURRENTLY does not support dropping multiple objects"))); - if (drop->behavior == DROP_CASCADE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("DROP INDEX CONCURRENTLY does not support CASCADE"))); - } - - /* - * First we identify all the relations, then we delete them in a single - * performMultipleDeletions() call. This is to avoid unwanted DROP - * RESTRICT errors if one of the relations depends on another. - */ - /* Determine required relkind */ - switch (drop->removeType) { - case OBJECT_TABLE: - relkind = RELKIND_RELATION; - relkind_s = "TABLE"; - break; - - case OBJECT_INDEX: - relkind = RELKIND_INDEX; - relkind_s = "INDEX"; - break; - - case OBJECT_SEQUENCE: - relkind = RELKIND_SEQUENCE; - relkind_s = "SEQUENCE"; - break; - - case OBJECT_LARGE_SEQUENCE: - relkind = RELKIND_LARGE_SEQUENCE; - relkind_s = "LARGE SEQUENCE"; - break; - - case OBJECT_VIEW: - relkind = RELKIND_VIEW; - relkind_s = "VIEW"; - break; - - case OBJECT_CONTQUERY: - relkind = RELKIND_CONTQUERY; - relkind_s = "CONTVIEW"; - break; - - case OBJECT_MATVIEW: - relkind = RELKIND_MATVIEW; - relkind_s = "MATERIALIZED VIEW"; - break; - - case OBJECT_FOREIGN_TABLE: - relkind = RELKIND_FOREIGN_TABLE; - relkind_s = "FOREIGN TABLE"; - break; - case OBJECT_STREAM: - relkind = RELKIND_STREAM; - relkind_s = "STREAM"; - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized drop object type: %d", (int)drop->removeType))); - relkind = 0; /* keep compiler quiet */ - relkind_s = ""; /* keep compiler quiet */ - } break; - } - - objects = new_object_addresses(); - - foreach (cell, drop->objects) { - RangeVar* rel = makeRangeVarFromNameList((List*)lfirst(cell)); - Oid relOid; - ObjectAddress obj; - Relation delrel; - struct DropRelationCallbackState state; - - /* - * These next few steps are a great deal like relation_openrv, but we - * don't bother building a relcache entry since we don't need it. - * - * Check for shared-cache-inval messages before trying to access the - * relation. This is needed to cover the case where the name - * identifies a rel that has been dropped and recreated since the - * start of our transaction: if we don't flush the old syscache entry, - * then we'll latch onto that entry and suffer an error later. - */ - AcceptInvalidationMessages(); - - /* Look up the appropriate relation using namespace search. */ - state.relkind = relkind; - state.heapOid = InvalidOid; - state.concurrent = drop->concurrent; - - /* - * Redis in online expansion will get AccessShareLock for resizing table. - * We use ExclusiveLock here to avoid collisions with AccessShareLock so - * that we can send drop query to first CN and cancel the redis thread in - * first CN. - */ - if (CheckRangeVarInRedistribution(rel)) { - if (lockmode == AccessExclusiveLock) - lockmode = ExclusiveLock; - } else { - /* Reset lockmode in the next loop for normal table not in redis. */ - if (lockmode == ExclusiveLock) - lockmode = AccessExclusiveLock; - } - - relOid = RangeVarGetRelidExtended( - rel, lockmode, true, false, false, false, RangeVarCallbackForDropRelation, (void*)&state); - - /* - * Relation not found. - * - * For "DROP TABLE/INDEX/VIEW/...", just ERROR - * - * For "DROP TABLE/INDEX/VIEW/... IF EXISTS ...", - * local CN: rewrite the querystring without the not-found relations - * remote nodes: should not happen since local CN does have the relation - * so that the query is passed down. In this case, we just ERROR. - * In maintenance mode, we pass down the original querystring anyway. - */ - if (!OidIsValid(relOid)) { - bool missing_ok = drop->missing_ok; - - /* for the inconsistent index on nodes cause by creating index concurrently, missing is ok on local node */ - if (!u_sess->attr.attr_common.xc_maintenance_mode && relkind != RELKIND_INDEX) { - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) - cn_miss_relation = true; - else - missing_ok = false; - } - - DropErrorMsgNonExistent(rel->relname, relkind, missing_ok); - - continue; - } - - TrForbidAccessRbObject(RelationRelationId, relOid, rel->relname); - - delrel = try_relation_open(relOid, NoLock); - /* - * Open up drop table command for table being redistributed right now. - * - * During online expansion time, we only allow to drop object when - * the object is a table and the target table is not in read only mode - */ - if (delrel != NULL && !u_sess->attr.attr_sql.enable_cluster_resize && - (RelationInClusterResizingReadOnly(delrel) || - (RelationInClusterResizing(delrel) && drop->removeType != OBJECT_TABLE))) { - ereport(ERROR, - (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), - errmsg("%s is redistributing, please retry later.", delrel->rd_rel->relname.data))); - } - - if (delrel != NULL) { - relation_close(delrel, NoLock); - } - - /* OK, we're ready to delete this one */ - obj.classId = RelationRelationId; - obj.objectId = relOid; - obj.objectSubId = 0; - - add_exact_object_address(&obj, objects); - - /* Record relations that do exist on local CN. */ - char* relation_name = NameListToQuotedString((List*)lfirst(cell)); - appendStringInfo(relation_namelist, relation_namelist->data[0] ? ", %s" : "%s", relation_name); - pfree_ext(relation_name); - - if (OidIsValid(state.heapOid)) { - UnlockRelationOid(state.heapOid, state.concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock); - } - - UnlockRelationOid(relOid, lockmode); - } - - /* - * Fabricate a new DROP TABLE/INDEX/VIEW/... querystring with relations found on local CN. - * If no such relations, then there is nothing to be done on remote nodes. - */ - if (cn_miss_relation) { - if (relation_namelist->data[0]) - appendStringInfo(tmp_queryString, - "DROP %s IF EXISTS %s %s", - relkind_s, - relation_namelist->data, - drop->behavior == DROP_CASCADE ? "CASCADE" : "RESTRICT"); - else - *exec_type = EXEC_ON_NONE; - } - - pfree_ext(relation_namelist->data); - pfree_ext(relation_namelist); - - return objects; -} - -/* - * RemoveRelationsonMainExecCN - * Implements DROP TABLE, DROP INDEX, DROP SEQUENCE, DROP VIEW, DROP MATERIALIZED VIEW - * DROP FOREIGN TABLE on main execute coordinator. - */ -void RemoveRelationsonMainExecCN(DropStmt* drop, ObjectAddresses* objects) -{ - uint32 flags = 0; - - /* DROP CONCURRENTLY uses a weaker lock, and has some restrictions */ - if (drop->concurrent) { - flags |= PERFORM_DELETION_CONCURRENTLY; - Assert(drop->removeType == OBJECT_INDEX); - if (list_length(drop->objects) != 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DROP INDEX CONCURRENTLY does not support dropping multiple objects"))); - if (drop->behavior == DROP_CASCADE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("DROP INDEX CONCURRENTLY does not support CASCADE"))); - } - - for (int i = 0; i < objects->numrefs; i++) { - const ObjectAddress* thisobj = objects->refs + i; - - Assert(thisobj->objectId); - - if (thisobj->classId == RelationRelationId) { - /* - * In DROP INDEX CONCURRENTLY, take only ShareUpdateExclusiveLock on - * the index for the moment. index_drop() will promote the lock once - * it's safe to do so. In all other cases we need full exclusive - * lock. - */ - if (flags & PERFORM_DELETION_CONCURRENTLY) { - LockRelationOid(thisobj->objectId, ShareUpdateExclusiveLock); - } else { - LockRelationOid(thisobj->objectId, AccessExclusiveLock); - } -#ifdef ENABLE_MULTIPLE_NODES - PreventDDLIfTsdbDisabled(thisobj->objectId); - /* check if the rel is timeseries rel, if so, remove jobs */ - RemoveJobsWhenRemoveRelation(thisobj->objectId); -#endif /* ENABLE_MULTIPLE_NODES */ - } else { - /* assume we should lock the whole object not a sub-object */ - LockDatabaseObject(thisobj->classId, thisobj->objectId, 0, AccessExclusiveLock); - } - } - - performMultipleDeletions(objects, drop->behavior, flags); -} - -/* - * RemoveRelations - * Implements DROP TABLE, DROP INDEX, DROP SEQUENCE, DROP VIEW, - * DROP FOREIGN TABLE on datanodes and none main execute coordinator. - */ -void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExecType* exec_type) -{ - ObjectAddresses* objects = NULL; - char relkind; - const char* relkind_s = NULL; - ListCell* cell = NULL; - uint32 flags = 0; - LOCKMODE lockmode = AccessExclusiveLock; - bool cn_miss_relation = false; - StringInfo relation_namelist = makeStringInfo(); - char relPersistence; - List *typlist = NULL; - - /* DROP CONCURRENTLY uses a weaker lock, and has some restrictions */ - if (drop->concurrent) { - lockmode = ShareUpdateExclusiveLock; - Assert(drop->removeType == OBJECT_INDEX); - if (list_length(drop->objects) != 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DROP INDEX CONCURRENTLY does not support dropping multiple objects"))); - if (drop->behavior == DROP_CASCADE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("DROP INDEX CONCURRENTLY does not support CASCADE"))); - } - - /* - * First we identify all the relations, then we delete them in a single - * performMultipleDeletions() call. This is to avoid unwanted DROP - * RESTRICT errors if one of the relations depends on another. - */ - /* Determine required relkind */ - switch (drop->removeType) { - case OBJECT_TABLE: - relkind = RELKIND_RELATION; - relkind_s = "TABLE"; - break; - - case OBJECT_INDEX: - relkind = RELKIND_INDEX; - relkind_s = "INDEX"; - break; - - case OBJECT_SEQUENCE: - relkind = RELKIND_SEQUENCE; - relkind_s = "SEQUENCE"; - break; - - case OBJECT_LARGE_SEQUENCE: - relkind = RELKIND_LARGE_SEQUENCE; - relkind_s = "LARGE SEQUENCE"; - break; - - case OBJECT_VIEW: - relkind = RELKIND_VIEW; - relkind_s = "VIEW"; - break; - - case OBJECT_CONTQUERY: - relkind = RELKIND_CONTQUERY; - relkind_s = "CONTVIEW"; - break; - - case OBJECT_MATVIEW: - relkind = RELKIND_MATVIEW; - relkind_s = "MATERIALIZED VIEW"; - break; - - case OBJECT_FOREIGN_TABLE: - relkind = RELKIND_FOREIGN_TABLE; - relkind_s = "FOREIGN TABLE"; - break; - - case OBJECT_STREAM: - relkind = RELKIND_STREAM; - relkind_s = "STREAM"; - break; - - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized drop object type: %d", (int)drop->removeType))); - - relkind = 0; /* keep compiler quiet */ - relkind_s = ""; /* keep compiler quiet */ - } break; - } - - /* Lock and validate each relation; build a list of object addresses */ - objects = new_object_addresses(); - - int cnt = 0; /* counter used to identify the round of the objects */ -#ifdef ENABLE_MULTIPLE_NODES - bool ts_drop_idx = false; - Oid idx_id = InvalidOid; -#endif - foreach (cell, drop->objects) { - RangeVar* rel = makeRangeVarFromNameList((List*)lfirst(cell)); - Oid relOid; - ObjectAddress obj; - Relation delrel; - struct DropRelationCallbackState state = {0}; - cnt++; - - /* - * These next few steps are a great deal like relation_openrv, but we - * don't bother building a relcache entry since we don't need it. - * - * Check for shared-cache-inval messages before trying to access the - * relation. This is needed to cover the case where the name - * identifies a rel that has been dropped and recreated since the - * start of our transaction: if we don't flush the old syscache entry, - * then we'll latch onto that entry and suffer an error later. - */ - AcceptInvalidationMessages(); - - /* Look up the appropriate relation using namespace search. */ - state.relkind = relkind; - state.heapOid = InvalidOid; - state.concurrent = drop->concurrent; - -#ifdef ENABLE_MULTIPLE_NODES - /* - * If the case for timeseries table with drop index, transfrom target object - * from ts rel to the correct tag rel index(es) objects. Mark this scenario - * by ts_drop_idx. - */ - char idx_name[NAMEDATALEN] = {0}; - Oid nspname; - /* if the object in first round is a timeseries table */ - if (drop->removeType == OBJECT_INDEX && cnt == 1 && check_ts_idx_ddl(rel, &idx_id, &nspname)) { - ts_drop_idx = true; - } - if (ts_drop_idx) { - if (cnt == 1 && list_length(drop->objects) == 1) { - /* In upgrade scenario, drop the old multi-column index */ - char tag_relname[NAMEDATALEN] = {0}; - get_outdate_tag_relname(tag_relname, idx_id); - /* add timeseries rel's distribution */ - add_ts_tag_distribution(tag_relname, nspname, rel->relname); - get_ts_idx_tgt(idx_name, &idx_id, true); - } else if (cnt > 1) { - /* drop specified index on ts tag table */ - get_ts_idx_tgt(idx_name, &idx_id, true, false, rel->relname); - } else { - /* the first turn to drop specified index, skip */ - continue; - } - /* timeseries indexes are all in cstore namespace */ - rel->schemaname = "cstore"; - rel->relname = idx_name; - } -#endif - - relOid = RangeVarGetRelidExtended( - rel, lockmode, true, false, false, false, RangeVarCallbackForDropRelation, (void*)&state); - - /* - * Relation not found. - * - * For "DROP TABLE/INDEX/VIEW/...", just ERROR - * - * For "DROP TABLE/INDEX/VIEW/... IF EXISTS ...", - * local CN: rewrite the querystring without the not-found relations - * remote nodes: should not happen since local CN does have the relation - * so that the query is passed down. In this case, we just ERROR. - * In maintenance mode, we pass down the original querystring anyway. - */ - if (!OidIsValid(relOid)) { - bool missing_ok = drop->missing_ok; - - /* for the inconsistent index on nodes cause by creating index concurrently, missing is ok on local node */ - if (!u_sess->attr.attr_common.xc_maintenance_mode && !IS_SINGLE_NODE && relkind != RELKIND_INDEX) { - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) - cn_miss_relation = true; - else if (!ENABLE_ROUTER_DN) // in router, drop if exists should enable on dn. - missing_ok = false; - } - - DropErrorMsgNonExistent(rel->relname, relkind, missing_ok); - continue; - } - -#ifdef ENABLE_MULTIPLE_NODES - /* check if the rel is timeseries rel, if so, remove jobs */ - if (relkind == RELKIND_RELATION) { - PreventDDLIfTsdbDisabled(relOid); - RemoveJobsWhenRemoveRelation(relOid); - } -#endif /* ENABLE_MULTIPLE_NODES */ - if (relkind == RELKIND_RELATION) { - DeleteUidEntry(relOid); - } else if (relkind == RELKIND_LARGE_SEQUENCE && CheckSeqOwnedByAutoInc(relOid)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot drop sequence owned by auto_increment column"))); - } - - delrel = try_relation_open(relOid, NoLock); - /* - * Open up drop table command for table being redistributed right now. - * - * During online expansion time, we only allow to drop object when - * the object is a table and the target table is not in read only mode - */ - if (delrel != NULL && !u_sess->attr.attr_sql.enable_cluster_resize && - (RelationInClusterResizingReadOnly(delrel) || - (RelationInClusterResizing(delrel) && drop->removeType != OBJECT_TABLE))) { - ereport(ERROR, - (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), - errmsg("%s is redistributing, please retry later.", delrel->rd_rel->relname.data))); - } - - // cstore relation doesn't support concurrent INDEX now. - if (drop->concurrent == true && delrel != NULL && OidIsValid(delrel->rd_rel->relcudescrelid)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column store table does not support concurrent INDEX yet"), - errdetail("The feature is not currently supported"))); - } - - typlist = TryGetTypeNeedDrop(delrel); - - if (delrel != NULL) { - relation_close(delrel, NoLock); - } - - relPersistence = get_rel_persistence(relOid); - if (drop->concurrent - && !(relPersistence == RELPERSISTENCE_TEMP - || relPersistence == RELPERSISTENCE_GLOBAL_TEMP)) { - Assert(list_length(drop->objects) == 1 && drop->removeType == OBJECT_INDEX); - flags |= PERFORM_DELETION_CONCURRENTLY; - } - - /* OK, we're ready to delete this one */ - obj.classId = RelationRelationId; - obj.objectId = relOid; - obj.objectSubId = 0; - - TrForbidAccessRbObject(obj.classId, obj.objectId, rel->relname); - - add_exact_object_address(&obj, objects); - if (relkind == RELKIND_RELATION) { - add_type_object_address(typlist, objects); - } - - /* Record relations that do exist on local CN. */ - char* relation_name = NameListToQuotedString((List*)lfirst(cell)); - appendStringInfo(relation_namelist, relation_namelist->data[0] ? ", %s" : "%s", relation_name); - pfree_ext(relation_name); - } - - /* - * Fabricate a new DROP TABLE/INDEX/VIEW/... querystring with relations found on local CN. - * If no such relations, then there is nothing to be done on remote nodes. - */ - if ((IS_PGXC_COORDINATOR && !IsConnFromCoord()) || cn_miss_relation) { - if (relation_namelist->data[0]) - appendStringInfo(tmp_queryString, - "DROP %s IF EXISTS %s %s", - relkind_s, - relation_namelist->data, - drop->behavior == DROP_CASCADE ? "CASCADE" : "RESTRICT"); - else - *exec_type = EXEC_ON_NONE; - } - - if (TrCheckRecyclebinDrop(drop, objects)) { - /* Here we use Recyclebin-based-Drop. */ - TrDrop(drop, objects, drop->behavior); - } else { - /* Here we really delete them. */ - performMultipleDeletions(objects, drop->behavior, flags); - } - - free_object_addresses(objects); - pfree_ext(relation_namelist->data); - pfree_ext(relation_namelist); -} - -/* - * PreCheckforRemoveObjects - * Check before implementing DROP SCHEMA, DROP FUNCTION to exclude objects which do not exist. - */ -ObjectAddresses* PreCheckforRemoveObjects( - DropStmt* stmt, StringInfo tmp_queryString, RemoteQueryExecType* exec_type, bool isFirstNode, bool is_securityadmin) -{ - ObjectAddresses* objects = NULL; - ListCell* cell1 = NULL; - ListCell* cell2 = NULL; - bool skip_check = false; - bool cn_miss_relation = false; - StringInfo relation_namelist = makeStringInfo(); - const char* relkind_s = NULL; - - if (stmt->removeType == OBJECT_SCHEMA) - relkind_s = "SCHEMA"; - else if (stmt->removeType == OBJECT_FUNCTION) - relkind_s = "FUNCATION"; - else { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NODE_STATE), errmsg("unexpected object type (%d)", (int)stmt->removeType))); - } - - objects = new_object_addresses(); - - foreach (cell1, stmt->objects) { - ObjectAddress address; - List* objname = (List*)lfirst(cell1); - List* objargs = NIL; - Relation relation = NULL; - Oid namespaceId; - - if (stmt->arguments) { - cell2 = (!cell2 ? list_head(stmt->arguments) : lnext(cell2)); - objargs = (List*)lfirst(cell2); - } - - /* Get an ObjectAddress for the object. */ - address = - get_object_address(stmt->removeType, objname, objargs, &relation, AccessExclusiveLock, stmt->missing_ok); - - /* Issue NOTICE if supplied object was not found. */ - if (!OidIsValid(address.objectId)) { - bool missing_ok = stmt->missing_ok; - - if (!u_sess->attr.attr_common.xc_maintenance_mode) { - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) - cn_miss_relation = true; - else - missing_ok = false; - } - - does_not_exist_skipping_ParallelDDLMode(stmt->removeType, objname, objargs, missing_ok); - - continue; - } - - /* - * Although COMMENT ON FUNCTION, SECURITY LABEL ON FUNCTION, etc. are - * happy to operate on an aggregate as on any other function, we have - * historically not allowed this for DROP FUNCTION. - */ - if (stmt->removeType == OBJECT_FUNCTION) { - Oid funcOid = address.objectId; - if (IsMaskingFunctionOid(funcOid) && !u_sess->attr.attr_common.IsInplaceUpgrade) { - ereport(ERROR, (errmodule(MOD_FUNCTION), - errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("function \"%s\" is a masking function, it can not be droped", - NameListToString(objname)), - errdetail("cannot drop masking function"))); - } - HeapTuple tup; - - /* if the function is a builtin function, its oid is less than 10000. - * we can't allow drop the builtin functions - */ - if (IsSystemObjOid(funcOid) && u_sess->attr.attr_common.IsInplaceUpgrade == false) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg( - "function \"%s\" is a builtin function,it can not be droped", NameListToString(objname)))); - } - - tup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); - if (!HeapTupleIsValid(tup)) { - /* should not happen */ - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for function %u", funcOid))); - } - - if (((Form_pg_proc)GETSTRUCT(tup))->proisagg) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is an aggregate function", NameListToString(objname)), - errhint("Use DROP AGGREGATE to drop aggregate functions."))); - - CacheInvalidateFunction(funcOid, InvalidOid); - ReleaseSysCache(tup); - } - - //@Temp Table. myTempNamespace and myTempToastNamespace's owner is - // bootstrap user, so can not be deleted by ordinary user. to ensuer this two - // schema be deleted on session quiting, we should bypass acl check when - // drop my own temp namespace - if (stmt->removeType == OBJECT_SCHEMA && (address.objectId == u_sess->catalog_cxt.myTempNamespace || - address.objectId == u_sess->catalog_cxt.myTempToastNamespace)) - skip_check = true; - - if (!skip_check) { - AclResult aclresult = ACLCHECK_NO_PRIV; - switch (stmt->removeType) { - case OBJECT_SCHEMA: - aclresult = pg_namespace_aclcheck(address.objectId, GetUserId(), ACL_DROP); - break; - case OBJECT_FUNCTION: - aclresult = pg_proc_aclcheck(address.objectId, GetUserId(), ACL_DROP); - break; - default: - break; - } - skip_check = (aclresult == ACLCHECK_OK) ? true : false; - } - - /* Check permissions. */ - namespaceId = get_object_namespace(&address); - if ((!is_securityadmin) && (!skip_check) && - (!OidIsValid(namespaceId) || !pg_namespace_ownercheck(namespaceId, GetUserId()))) - check_object_ownership(GetUserId(), stmt->removeType, address, objname, objargs, relation); - - /* Release any relcache reference count, but keep lock until commit. */ - if (relation) - heap_close(relation, NoLock); - - add_exact_object_address(&address, objects); - - /* Record relations that do exist on local CN. */ - char* relation_name = NameListToQuotedString((List*)lfirst(cell1)); - appendStringInfo(relation_namelist, relation_namelist->data[0] ? ", %s" : "%s", relation_name); - pfree_ext(relation_name); - - if (isFirstNode) - continue; - - Assert(address.classId != RelationRelationId); - - if (IsSharedRelation(address.classId)) - UnlockSharedObject(address.classId, address.objectId, 0, AccessExclusiveLock); - else - UnlockDatabaseObject(address.classId, address.objectId, 0, AccessExclusiveLock); - } - - /* - * Fabricate a new DROP TABLE/INDEX/VIEW/... querystring with relations found on local CN. - * If no such relations, then there is nothing to be done on remote nodes. - */ - if (cn_miss_relation) { - if (relation_namelist->data[0]) - appendStringInfo(tmp_queryString, - "DROP %s IF EXISTS %s %s", - relkind_s, - relation_namelist->data, - stmt->behavior == DROP_CASCADE ? "CASCADE" : "RESTRICT"); - else - *exec_type = EXEC_ON_NONE; - } - - pfree_ext(relation_namelist->data); - pfree_ext(relation_namelist); - - return objects; -} - -/* - * RemoveObjectsonMainExecCN - * Implements DROP SCHEMA, DROP FUNCTION - * on main execute coordinator. - */ -void RemoveObjectsonMainExecCN(DropStmt* drop, ObjectAddresses* objects, bool isFirstNode) -{ - if (!isFirstNode) { - for (int i = 0; i < objects->numrefs; i++) { - const ObjectAddress* thisobj = objects->refs + i; - - Assert(thisobj->classId != RelationRelationId); - Assert(thisobj->objectId); - - if (IsSharedRelation(thisobj->classId)) - LockSharedObject(thisobj->classId, thisobj->objectId, 0, AccessExclusiveLock); - else - LockDatabaseObject(thisobj->classId, thisobj->objectId, 0, AccessExclusiveLock); - } - } - - /* Here we really delete them. */ - performMultipleDeletions(objects, drop->behavior, 0); -} - -/* - * Check whether we have the permission to process the classform. - */ -static bool CheckClassFormPermission(Form_pg_class classform) -{ - if (g_instance.attr.attr_common.allowSystemTableMods || u_sess->attr.attr_common.IsInplaceUpgrade || - !IsSystemClass(classform)) { - return true; - } - return false; -} - -/* Allow DROP to table owner, schema owner or users who have DROP privilege of the target object */ -static void DropRelationPermissionCheck(char relkind, Oid relOid, Oid nspOid, const char* relname) -{ - AclResult aclresult; - if (relkind == RELKIND_INDEX) { - Oid tableoid = IndexGetRelation(relOid, false); - aclresult = pg_class_aclcheck(tableoid, GetUserId(), ACL_INDEX); - bool anyResult = false; - if (aclresult != ACLCHECK_OK && !IsSysSchema(GetNamespaceIdbyRelId(tableoid))) { - anyResult = HasSpecAnyPriv(GetUserId(), DROP_ANY_INDEX, false); - } - aclresult = anyResult ? ACLCHECK_OK : aclresult; - } else { - aclresult = pg_class_aclcheck(relOid, GetUserId(), ACL_DROP); - } - - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(relOid, GetUserId()) && - !pg_namespace_ownercheck(nspOid, GetUserId())) { - aclcheck_error(aclresult, ACL_KIND_CLASS, relname); - } -} - -static bool IsPartitionDeltaCudesc(Oid relOid) -{ -#define PARTITION_DELTA_NAME "pg_delta_part_" -#define PARTITION_CUDESC_NAME "pg_cudesc_part_" - - int attnum; - bool found = false; - ScanKeyData scanKey[1]; - TableScanDesc scan; - Relation pgpartition = NULL; - Relation rel = NULL; - - rel = try_relation_open(relOid, AccessShareLock); - if (!RelationIsValid(rel)) { - return false; - } - - const char *relname = RelationGetRelationName(rel); - if (strncmp(relname, PARTITION_DELTA_NAME, strlen(PARTITION_DELTA_NAME)) == 0) { - attnum = Anum_pg_partition_reltoastrelid; - } else if (strncmp(relname, PARTITION_CUDESC_NAME, strlen(PARTITION_CUDESC_NAME)) == 0) { - attnum = Anum_pg_partition_relcudescrelid; - } else { - heap_close(rel, AccessShareLock); - return false; - } - - ScanKeyInit(&scanKey[0], attnum, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rel->rd_id)); - pgpartition = heap_open(PartitionRelationId, AccessShareLock); - scan = tableam_scan_begin(pgpartition, SnapshotNow, 1, scanKey); - if (tableam_scan_getnexttuple(scan, ForwardScanDirection)) { - found = true; - } - tableam_scan_end(scan); - heap_close(pgpartition, AccessShareLock); - heap_close(rel, AccessShareLock); - - return found; -} - -/* - * Before acquiring a table lock, check whether we have sufficient rights. - * In the case of DROP INDEX, also try to lock the table before the index. - */ -static void RangeVarCallbackForDropRelation( - const RangeVar* rel, Oid relOid, Oid oldRelOid, bool target_is_partition, void* arg) -{ - HeapTuple tuple; - struct DropRelationCallbackState* state; - char relkind; - Form_pg_class classform; - LOCKMODE heap_lockmode; - bool invalid_system_index; - - state = (struct DropRelationCallbackState*)arg; - relkind = state->relkind; - heap_lockmode = state->concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock; - invalid_system_index = false; - - if (target_is_partition) - heap_lockmode = AccessShareLock; - /* - * If we previously locked some other index's heap, and the name we're - * looking up no longer refers to that relation, release the now-useless - * lock. - */ - if (relOid != oldRelOid && OidIsValid(state->heapOid)) { - UnlockRelationOid(state->heapOid, heap_lockmode); - state->heapOid = InvalidOid; - } - - /* Didn't find a relation, so no need for locking or permission checks. */ - if (!OidIsValid(relOid)) - return; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid)); - if (!HeapTupleIsValid(tuple)) - return; /* concurrently dropped, so nothing to do */ - classform = (Form_pg_class)GETSTRUCT(tuple); - - char expected_relkind = classform->relkind; - if (classform->relkind == RELKIND_GLOBAL_INDEX) { - expected_relkind = RELKIND_INDEX; - } - bool flag = !(expected_relkind == RELKIND_STREAM && relkind == RELKIND_FOREIGN_TABLE) - && !(expected_relkind == RELKIND_CONTQUERY && relkind == RELKIND_VIEW) - && (expected_relkind != relkind) && !(u_sess->attr.attr_common.IsInplaceUpgrade && - relkind == RELKIND_RELATION && expected_relkind == RELKIND_TOASTVALUE); - if (flag) { - DropErrorMsgWrongType(rel->relname, classform->relkind, relkind); - } - - /* - * Check the case of a system index that might have been invalidated by a - * failed concurrent process and allow its drop. For the time being, this - * only concerns indexes of toast relations that became invalid during a - * REINDEX CONCURRENTLY process. - */ - if(IsSystemClass(classform)&&relkind == RELKIND_INDEX) { - HeapTuple locTuple; - Form_pg_index indexform; - bool indisvalid; - - locTuple = SearchSysCache1(INDEXRELID,ObjectIdGetDatum(relOid)); - if(!HeapTupleIsValid(locTuple)) { - ReleaseSysCache(tuple); - return; - } - - indexform = (Form_pg_index) GETSTRUCT(locTuple); - indisvalid = indexform->indisvalid; - ReleaseSysCache(locTuple); - - /*Mark object as being an invaild index of system catalogs*/ - if(!indisvalid) - invalid_system_index = true; - } - - if (IsPartitionDeltaCudesc(relOid)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot drop relation \"%s\", it is a partition delta/cudesc table", rel->relname))); - } - - /* Permission Check */ - DropRelationPermissionCheck(relkind, relOid, classform->relnamespace, rel->relname); - - if (!invalid_system_index && !CheckClassFormPermission(classform)) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", rel->relname))); - } - - ReleaseSysCache(tuple); - - /* - * In DROP INDEX, attempt to acquire lock on the parent table before - * locking the index. index_drop() will need this anyway, and since - * regular queries lock tables before their indexes, we risk deadlock if - * we do it the other way around. No error if we don't find a pg_index - * entry, though --- the relation may have been dropped. - */ - if ((relkind == RELKIND_INDEX || relkind == RELKIND_GLOBAL_INDEX) && relOid != oldRelOid) { - state->heapOid = IndexGetRelation(relOid, true); - if (OidIsValid(state->heapOid)) - LockRelationOid(state->heapOid, heap_lockmode); - } -} - -void TruncateOnePart(Relation rel, HeapTuple tup) -{ - Oid toastOid = ((Form_pg_partition)GETSTRUCT(tup))->reltoastrelid; - Relation toastRel = NULL; - MultiXactId minmulti = GetOldestMultiXactId(); - - Oid partOid = HeapTupleGetOid(tup); - Partition p = partitionOpen(rel, partOid, AccessExclusiveLock); - PartitionSetNewRelfilenode(rel, p, u_sess->utils_cxt.RecentXmin, - RelationIsColStore(rel) ? InvalidMultiXactId : minmulti); - - /* process the toast table */ - if (OidIsValid(toastOid)) { - Assert(rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED); - toastRel = heap_open(toastOid, AccessExclusiveLock); - RelationSetNewRelfilenode(toastRel, u_sess->utils_cxt.RecentXmin, minmulti); - heap_close(toastRel, AccessExclusiveLock); - } - partitionClose(rel, p, AccessExclusiveLock); - - /* report truncate partition to PgStatCollector */ - Oid statFlag = RelationIsPartitionOfSubPartitionTable(rel) ? partid_get_parentid(rel->rd_id) : rel->rd_id; - pgstat_report_truncate(partOid, statFlag, rel->rd_rel->relisshared); -} - -/* - * ExecuteTruncate - * Executes a TRUNCATE command. - * - * This is a multi-relation truncate. We first open and grab exclusive - * lock on all relations involved, checking permissions and otherwise - * verifying that the relation is OK for truncation. In CASCADE mode, - * relations having FK references to the targeted relations are automatically - * added to the group; in RESTRICT mode, we check that all FK references are - * internal to the group that's being truncated. Finally all the relations - * are truncated and reindexed. - */ -#ifdef PGXC -void ExecuteTruncate(TruncateStmt* stmt, const char* sql_statement) -#else -void ExecuteTruncate(TruncateStmt* stmt) -#endif -{ - List* rels = NIL; - List* relids = NIL; - List* seq_relids = NIL; - List* autoinc_seqoids = NIL; - List* rels_in_redis = NIL; - EState* estate = NULL; - ResultRelInfo* resultRelInfos = NULL; - ResultRelInfo* resultRelInfo = NULL; - SubTransactionId mySubid; - ListCell* cell = NULL; - bool isDfsTruncate = false; -#ifdef PGXC - char* FirstExecNode = NULL; - bool isFirstNode = false; - - if (TrCheckRecyclebinTruncate(stmt)) { - TrTruncate(stmt); - return; - } - - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - FirstExecNode = find_first_exec_cn(); - isFirstNode = (strcmp(FirstExecNode, g_instance.attr.attr_common.PGXCNodeName) == 0); - } -#endif - -#ifdef PGXC - if (stmt->restart_seqs) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("PGXC does not support RESTART IDENTITY yet"), - errdetail("The feature is not supported currently"))); -#endif - -#ifdef PGXC - /* - * If I am the main execute CN but not CCN, - * Notify the CCN to create firstly, and then notify other CNs except me. - */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - if (u_sess->attr.attr_sql.enable_parallel_ddl && !isFirstNode) { - bool is_temp = false; - RemoteQuery* step = makeNode(RemoteQuery); - - /* Check un-allowed case where truncate tables from different node groups */ - if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); - } - - foreach (cell, stmt->relations) { - Oid relid; - RangeVar* rel = (RangeVar*)lfirst(cell); - - relid = RangeVarGetRelid(rel, NoLock, false); - - if (IsTempTable(relid)) { - is_temp = true; - break; - } - } - - step->combine_type = COMBINE_TYPE_SAME; - step->exec_nodes = NULL; - step->sql_statement = pstrdup(sql_statement); - step->force_autocommit = false; - - if (is_temp) - step->exec_type = EXEC_ON_NONE; - else - step->exec_type = EXEC_ON_COORDS; - - step->is_temp = is_temp; - ExecRemoteUtility_ParallelDDLMode(step, FirstExecNode); - pfree_ext(step->sql_statement); - pfree_ext(step); - } - } -#endif - - /* - * Open, exclusive-lock, and check all the explicitly-specified relations - */ - foreach (cell, stmt->relations) { - RangeVar* rv = (RangeVar*)lfirst(cell); - Relation rel, myrel; - bool recurse = interpretInhOption(rv->inhOpt); - Oid myrelid; - LOCKMODE lockmode; - - /* - * Need to let ProcSleep know if we could cancel redistribution transaction which - * locks the table we want to truncate. ProcSleep will make sure we only cancel the - * transaction doing redistribution. - * - * Move the following check outside of loop?? - */ - if (IS_PGXC_COORDINATOR) - u_sess->exec_cxt.could_cancel_redistribution = true; - - if (IsGlobalTempTableParallelTrunc()) { - /* - * Truncate global temp table only cleans up the data in current backend, - * only low-level locks are required. - */ - myrelid = RangeVarGetRelid(rv, NoLock, false); - if (IsGlobalTempTable(myrelid)) { - lockmode = RowExclusiveLock; - } else { - lockmode = AccessExclusiveLock; - } - rel = heap_openrv(rv, lockmode); - myrel = rel; - } else { - rel = heap_openrv(rv, AccessExclusiveLock); - myrel = rel; - myrelid = RelationGetRelid(rel); - lockmode = AccessExclusiveLock; - } - - TrForbidAccessRbObject(RelationRelationId, myrelid, rv->relname); - - /* find matview exists or not. */ - Oid mlogid = find_matview_mlog_table(myrelid); - if (OidIsValid(mlogid)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Not support truncate table under materialized view. "))); - } - - /* don't throw error for "TRUNCATE foo, foo" */ - if (list_member_oid(relids, myrelid)) { - heap_close(rel, lockmode); - continue; - } - truncate_check_rel(rel); - rels = lappend(rels, rel); - relids = lappend_oid(relids, myrelid); - - if (recurse) { - ListCell* child = NULL; - List* children = NIL; - - children = find_all_inheritors(myrelid, AccessExclusiveLock, NULL); - - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - - if (list_member_oid(relids, childrelid)) - continue; - - /* find_all_inheritors already got lock */ - rel = heap_open(childrelid, NoLock); - truncate_check_rel(rel); - rels = lappend(rels, rel); - relids = lappend_oid(relids, childrelid); - } - } - -#ifdef ENABLE_MULTIPLE_NODES - PreventDDLIfTsdbDisabled(myrelid); - /* - * for timeseries relation, truncate tag table and delta table(if exists) on each DN - */ - if (IS_PGXC_DATANODE && unlikely(RelationIsTsStore(rel))) { - Oid tag_relid = get_tag_relid(RelationGetRelationName(rel), rel->rd_rel->relnamespace); - Relation tag_rel = heap_open(tag_relid, AccessExclusiveLock); - truncate_check_rel(tag_rel); - rels = lappend(rels, tag_rel); - relids = lappend_oid(relids, tag_relid); - - if (Tsdb::RelationEnablesTsdbDelta(rel)) { - Relation delta_rel = Tsdb::RelationGetDeltaRelation(rel, AccessExclusiveLock); - truncate_check_rel(delta_rel); - rels = lappend(rels, delta_rel); - relids = lappend_oid(relids, RelationGetRelid(delta_rel)); - } - } -#endif /* ENABLE_MULTIPLE_NODES */ - -#ifdef PGXC - /* - * If the truncate table is in the process of redistribution i.e - * ALTER TABLE myrelid.table SET (APPEND_MODE=ON, rel_cn_oid = myrelid), - * we have to truncate delete delta table and myrelid new table assocated - * with myrelid. - */ - if (IS_PGXC_DATANODE && RelationInClusterResizing(myrel) && !RelationInClusterResizingReadOnly(myrel)) { - /* - * Always keep the order consistent by operating on multi catchup delete delta first and then the delete - * delta. - */ - Relation delete_delta_rel_x = - GetAndOpenDeleteDeltaRel(myrel, AccessExclusiveLock, true); /* Multi catchup delete delta */ - Relation delete_delta_rel = GetAndOpenDeleteDeltaRel(myrel, AccessExclusiveLock, false); - Relation new_table_rel = GetAndOpenNewTableRel(myrel, AccessExclusiveLock); - - /* Multi catchup delta relation can be NULL. */ - if (delete_delta_rel_x) { - truncate_check_rel(delete_delta_rel_x); - rels = lappend(rels, delete_delta_rel_x); - relids = lappend_oid(relids, RelationGetRelid(delete_delta_rel_x)); - } - - /* - * delete_delta_rel and new_table_rel won't be NULL when myrel is in - * redistribution. GetAndOpenDeleteDeltaRel and GetAndOpenNewTableRel will - * issue errors if they are NULL. delete_delt_rel and new_table_rel will - * be heap_close in the end when we travers rels list. - */ - truncate_check_rel(delete_delta_rel); - rels = lappend(rels, delete_delta_rel); - relids = lappend_oid(relids, RelationGetRelid(delete_delta_rel)); - truncate_check_rel(new_table_rel); - rels = lappend(rels, new_table_rel); - relids = lappend_oid(relids, RelationGetRelid(new_table_rel)); - - /* - * Saved non partition relation in list and will truncate - * redistribution relationed aux table before exit. - * Partition table will be handled at paritioin table flow - */ - rels_in_redis = lappend(rels_in_redis, myrel); - } -#endif - } - - /* - * In CASCADE mode, suck in all referencing relations as well. This - * requires multiple iterations to find indirectly-dependent relations. At - * each phase, we need to exclusive-lock new rels before looking for their - * dependencies, else we might miss something. Also, we check each rel as - * soon as we open it, to avoid a faux pas such as holding lock for a long - * time on a rel we have no permissions for. - */ - if (stmt->behavior == DROP_CASCADE) { - for (;;) { - List* newrelids = NIL; - - newrelids = heap_truncate_find_FKs(relids); - if (newrelids == NIL) - break; /* nothing else to add */ - - foreach (cell, newrelids) { - Oid relid = lfirst_oid(cell); - Relation rel; - - if (IsGlobalTempTableParallelTrunc() && IsGlobalTempTable(relid)) { - rel = heap_open(relid, RowExclusiveLock); - } else { - rel = heap_open(relid, AccessExclusiveLock); - } - ereport(NOTICE, (errmsg("truncate cascades to table \"%s\"", RelationGetRelationName(rel)))); - truncate_check_rel(rel); - rels = lappend(rels, rel); - relids = lappend_oid(relids, relid); - } - } - } - - /* - * Check foreign key references. In CASCADE mode, this should be - * unnecessary since we just pulled in all the references; but as a - * cross-check, do it anyway if in an Assert-enabled build. - */ -#ifdef USE_ASSERT_CHECKING - heap_truncate_check_FKs(rels, false); -#else - if (stmt->behavior == DROP_RESTRICT) - heap_truncate_check_FKs(rels, false); -#endif - - /* - * If we are asked to restart sequences, find all the sequences, lock them - * (we need AccessExclusiveLock for ResetSequence), and check permissions. - * We want to do this early since it's pointless to do all the truncation - * work only to fail on sequence permissions. - */ - if (stmt->restart_seqs) { - foreach (cell, rels) { - Relation rel = (Relation)lfirst(cell); - List* seqlist = getOwnedSequences(RelationGetRelid(rel)); - Oid autoinc_seqoid = RelAutoIncSeqOid(rel); - ListCell* seqcell = NULL; - - foreach (seqcell, seqlist) { - Oid seq_relid = lfirst_oid(seqcell); - Relation seq_rel; - - seq_rel = relation_open(seq_relid, AccessExclusiveLock); - - /* This check must match AlterSequence! */ - AclResult aclresult = pg_class_aclcheck(seq_relid, GetUserId(), ACL_ALTER); - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(seq_relid, GetUserId()) && - !(isOperatoradmin(GetUserId()) && u_sess->proc_cxt.clientIsGsroach && - u_sess->attr.attr_security.operation_mode)) { - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, RelationGetRelationName(seq_rel)); - } - if (seq_relid == autoinc_seqoid) { - autoinc_seqoids = lappend_oid(autoinc_seqoids, seq_relid); - } else { - seq_relids = lappend_oid(seq_relids, seq_relid); - } - - relation_close(seq_rel, NoLock); - } - } - } else { - /* - * Even if we are not asked to restart sequences, - * we should restart sequences created by auto_increment column. - */ - foreach (cell, rels) { - Relation rel = (Relation)lfirst(cell); - Oid seq_relid = RelAutoIncSeqOid(rel); - if (!OidIsValid(seq_relid)) { - continue; - } - Relation seq_rel = relation_open(seq_relid, AccessExclusiveLock); - /* This check must match AlterSequence! */ - AclResult aclresult = pg_class_aclcheck(seq_relid, GetUserId(), ACL_ALTER); - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(seq_relid, GetUserId()) && - !(isOperatoradmin(GetUserId()) && u_sess->proc_cxt.clientIsGsroach && - u_sess->attr.attr_security.operation_mode)) { - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, RelationGetRelationName(seq_rel)); - } - autoinc_seqoids = lappend_oid(autoinc_seqoids, seq_relid); - relation_close(seq_rel, NoLock); - } - } - - /* Prepare to catch AFTER triggers. */ - AfterTriggerBeginQuery(); - - /* - * To fire triggers, we'll need an EState as well as a ResultRelInfo for - * each relation. We don't need to call ExecOpenIndices, though. - */ - estate = CreateExecutorState(); - resultRelInfos = (ResultRelInfo*)palloc(list_length(rels) * sizeof(ResultRelInfo)); - resultRelInfo = resultRelInfos; - foreach (cell, rels) { - Relation rel = (Relation)lfirst(cell); - - InitResultRelInfo(resultRelInfo, - rel, - 0, /* dummy rangetable index */ - 0); - resultRelInfo++; - } - estate->es_result_relations = resultRelInfos; - estate->es_num_result_relations = list_length(rels); - - /* - * Process all BEFORE STATEMENT TRUNCATE triggers before we begin - * truncating (this is because one of them might throw an error). Also, if - * we were to allow them to prevent statement execution, that would need - * to be handled here. - */ - resultRelInfo = resultRelInfos; - foreach (cell, rels) { - estate->es_result_relation_info = resultRelInfo; - ExecBSTruncateTriggers(estate, resultRelInfo); - resultRelInfo++; - } - - /* - * OK, truncate each table. - */ - mySubid = GetCurrentSubTransactionId(); - - foreach (cell, rels) { - Relation rel = (Relation)lfirst(cell); - Oid heap_relid; - Oid toast_relid; - bool is_shared = rel->rd_rel->relisshared; - MultiXactId minmulti; - /* - * This effectively deletes all rows in the table, and may be done - * in a serializable transaction. In that case we must record a - * rw-conflict in to this transaction from each transaction - * holding a predicate lock on the table. - */ - CheckTableForSerializableConflictIn(rel); - - if (RELATION_IS_GLOBAL_TEMP(rel) && !gtt_storage_attached(RelationGetRelid(rel))) { - continue; - } - - minmulti = GetOldestMultiXactId(); - -#ifdef ENABLE_MOT - if (RelationIsForeignTable(rel) && isMOTFromTblOid(RelationGetRelid(rel))) { - FdwRoutine* fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(rel)); - if (fdwroutine->TruncateForeignTable != NULL) { - fdwroutine->TruncateForeignTable(stmt, rel); - } - } else if (!RELATION_IS_PARTITIONED(rel)) { -#else - if (!RELATION_IS_PARTITIONED(rel)) { -#endif - RelationSetNewRelfilenode(rel, u_sess->utils_cxt.RecentXmin, - RelationIsColStore(rel) ? InvalidMultiXactId : minmulti, - isDfsTruncate); - - if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED) - heap_create_init_fork(rel); - - heap_relid = RelationGetRelid(rel); - toast_relid = rel->rd_rel->reltoastrelid; - - /* - * The same for the toast table, if any. - */ - if (OidIsValid(toast_relid)) { - if (IsGlobalTempTableParallelTrunc() && RELATION_IS_GLOBAL_TEMP(rel)) { - rel = relation_open(toast_relid, RowExclusiveLock); - } else { - rel = relation_open(toast_relid, AccessExclusiveLock); - } - RelationSetNewRelfilenode(rel, u_sess->utils_cxt.RecentXmin, minmulti); - if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED) - heap_create_init_fork(rel); - heap_close(rel, NoLock); - } - - /* report truncate to PgStatCollector */ - pgstat_count_truncate(rel); - - /* - * Reconstruct the indexes to match, and we're done. - */ - (void)ReindexRelation(heap_relid, REINDEX_REL_PROCESS_TOAST, REINDEX_ALL_INDEX, NULL, - NULL, false, ALL_KIND, RELATION_IS_GLOBAL_TEMP(rel) ? true : false); - } else { - /* truncate partitioned table */ - List* partTupleList = NIL; - ListCell* partCell = NULL; - - heap_relid = RelationGetRelid(rel); - /* partitioned table unspport the unlogged table */ - Assert(rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED); - - /* process all partition and toast */ - partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, rel->rd_id); - foreach (partCell, partTupleList) { - if (RelationIsSubPartitioned(rel)) { - HeapTuple tup = (HeapTuple)lfirst(partCell); - Oid partOid = HeapTupleGetOid(tup); - Partition p = partitionOpen(rel, partOid, AccessExclusiveLock); - Relation partRel = partitionGetRelation(rel, p); - List* subPartTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, partOid); - ListCell* subPartCell = NULL; - foreach (subPartCell, subPartTupleList) { - HeapTuple tup = (HeapTuple)lfirst(subPartCell); - TruncateOnePart(partRel, tup); - } - freePartList(subPartTupleList); - - releaseDummyRelation(&partRel); - partitionClose(rel, p, AccessExclusiveLock); - } else { - HeapTuple tup = (HeapTuple)lfirst(partCell); - TruncateOnePart(rel, tup); - } - } - - RelationSetNewRelfilenode(rel, u_sess->utils_cxt.RecentXmin, - RelationIsColStore(rel) ? InvalidMultiXactId : minmulti, - isDfsTruncate); - freePartList(partTupleList); - pgstat_report_truncate( - heap_relid, InvalidOid, is_shared); /* report truncate partitioned table to PgStatCollector */ - - /* process all index - * - * Global temporary tables do not support partitions, no need to add IsTruncGTT here. - */ - (void)ReindexRelation(heap_relid, REINDEX_REL_PROCESS_TOAST, REINDEX_ALL_INDEX, NULL); - } - } - - /* - * Restart owned sequences if we were asked to. - */ - foreach (cell, seq_relids) { - Oid seq_relid = lfirst_oid(cell); - - ResetSequence(seq_relid, false); - } - foreach (cell, autoinc_seqoids) { - Oid seq_relid = lfirst_oid(cell); - - ResetSequence(seq_relid, true); - } - - /* - * Process all AFTER STATEMENT TRUNCATE triggers. - */ - resultRelInfo = resultRelInfos; - foreach (cell, rels) { - estate->es_result_relation_info = resultRelInfo; - ExecASTruncateTriggers(estate, resultRelInfo); - resultRelInfo++; - } - -#ifdef ENABLE_MULTIPLE_NODES - /* - * In Postgres-XC, TRUNCATE needs to be launched to remote nodes before the - * AFTER triggers are launched. This insures that the triggers are being fired - * by correct events. - */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - if (u_sess->attr.attr_sql.enable_parallel_ddl && !isFirstNode) { - bool is_temp = false; - RemoteQuery* step = makeNode(RemoteQuery); - ExecNodes* exec_nodes = NULL; - - /* Check un-allowed case where truncate tables from different node groups */ - if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); - } - - foreach (cell, stmt->relations) { - Oid relid; - RangeVar* rel = (RangeVar*)lfirst(cell); - - relid = RangeVarGetRelid(rel, NoLock, false); - - if (exec_nodes == NULL) { - exec_nodes = RelidGetExecNodes(relid); - } - - if (IsTempTable(relid)) { - is_temp = true; - break; - } - } - - step->combine_type = COMBINE_TYPE_SAME; - step->exec_nodes = exec_nodes; - step->sql_statement = pstrdup(sql_statement); - step->force_autocommit = false; - step->exec_type = EXEC_ON_DATANODES; - step->is_temp = is_temp; - ExecRemoteUtility_ParallelDDLMode(step, FirstExecNode); - pfree_ext(step->sql_statement); - pfree_ext(step); - } else { - bool is_temp = false; - RemoteQuery* step = makeNode(RemoteQuery); - ExecNodes* exec_nodes = NULL; - - /* Check un-allowed case where truncate tables from different node groups */ - if (!ObjectsInSameNodeGroup(stmt->relations, T_TruncateStmt)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("NOT-SUPPORT: Not support TRUNCATE multiple objects different nodegroup"))); - } - - foreach (cell, stmt->relations) { - Oid relid; - RangeVar* rel = (RangeVar*)lfirst(cell); - - relid = RangeVarGetRelid(rel, NoLock, false); - - if (exec_nodes == NULL) { - exec_nodes = RelidGetExecNodes(relid); - } - - if (IsTempTable(relid)) { - is_temp = true; - break; - } - } - - step->combine_type = COMBINE_TYPE_SAME; - step->exec_nodes = exec_nodes; - step->sql_statement = pstrdup(sql_statement); - step->force_autocommit = false; - step->exec_type = is_temp ? EXEC_ON_DATANODES : EXEC_ON_ALL_NODES; - step->is_temp = is_temp; - ExecRemoteUtility(step); - pfree_ext(step->sql_statement); - pfree_ext(step); - } - } -#endif - - /* Handle queued AFTER triggers */ - AfterTriggerEndQuery(estate); - -#ifdef PGXC - /* Need to reset start and end ctid of rel in rels_in_redis list */ - if (IS_PGXC_DATANODE) { - foreach (cell, rels_in_redis) { - Relation rel = (Relation)lfirst(cell); - if (!RELATION_IS_PARTITIONED(rel)) { - ResetRelRedisCtidRelOptions( - rel, InvalidOid, RELOID, Natts_pg_class, Anum_pg_class_reloptions, RelationRelationId); - } else { - ResetPartsRedisCtidRelOptions(rel); - } - elog(LOG, "reset redis rel %s start and end ctid.", RelationGetRelationName(rel)); - } - } -#endif - - /* We can clean up the EState now */ - FreeExecutorState(estate); - - /* And close the rels (can't do this while EState still holds refs) */ - foreach (cell, rels) { - Relation rel = (Relation)lfirst(cell); - - /* - * In order to execute truncate concurrently for GTT, GTT does not record time of truancate relation. - */ - if (!(IsGlobalTempTableParallelTrunc() && RELATION_IS_GLOBAL_TEMP(rel))) { - /* Record time of truancate relation. */ - recordRelationMTime(rel->rd_id, rel->rd_rel->relkind); - } - - heap_close(rel, NoLock); - } -} - -/* - * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate - */ -void truncate_check_rel(Relation rel) -{ - AclResult aclresult; - - /* Only allow truncate on regular tables or MOT tables */ - /* @hdfs - * Add error msg for a foreign table - */ - if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) { -#ifdef ENABLE_MOT - if (!isMOTFromTblOid(RelationGetRelid(rel))) { -#endif - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("It is not supported to truncate foreign table \"%s\".", RelationGetRelationName(rel)))); -#ifdef ENABLE_MOT - } -#endif - } else if (rel->rd_rel->relkind == RELKIND_STREAM) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("It is not supported to truncate stream \"%s\".", RelationGetRelationName(rel)))); - } else if (rel->rd_rel->relkind == RELKIND_MATVIEW) { -#ifdef ENABLE_MULTIPLE_NODES - if (!IS_PGXC_DATANODE) -#endif - { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("It is not supported to truncate matview \"%s\"", RelationGetRelationName(rel)))); - } - } else if (rel->rd_rel->relkind != RELKIND_RELATION) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("It is not supported to truncate non-table \"%s\"", RelationGetRelationName(rel)))); - } - - if (is_ledger_related_rel(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("It is not supported to truncate blockchain table \"%s\"", RelationGetRelationName(rel)))); - } - - /* Permissions checks */ - aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), ACL_TRUNCATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, RelationGetRelationName(rel)); - - if (!g_instance.attr.attr_common.allowSystemTableMods && !u_sess->attr.attr_common.IsInplaceUpgrade && - IsSystemRelation(rel) && !WLMRelationCanTruncate(rel)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", RelationGetRelationName(rel)))); - - /* Forbid truncate on shared relation during upgrade, to protect global/pg_filenode.map not changed */ - if (u_sess->attr.attr_common.upgrade_mode != 0 && - rel->rd_id < FirstBootstrapObjectId && rel->rd_rel->relisshared && - t_thrd.proc->workingVersionNum < RELMAP_4K_VERSION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot truncate shared relation during upgrade"))); - } - - /* - * Don't allow truncate on temp tables of other backends ... their local - * buffer manager is not going to cope. - */ - if (RELATION_IS_OTHER_TEMP(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot truncate temporary tables of other sessions"))); - } - - /* - * Also check for active uses of the relation in the current transaction, - * including open scans and pending AFTER trigger events. - */ - CheckTableNotInUse(rel, "TRUNCATE"); -} - -/* - * storage_name - * returns the name corresponding to a typstorage/attstorage enum value - */ -static const char* storage_name(char c) -{ - switch (c) { - case 'p': - return "PLAIN"; - case 'm': - return "MAIN"; - case 'x': - return "EXTENDED"; - case 'e': - return "EXTERNAL"; - default: - return "???"; - } -} - -/* ---------- - * MergeAttributes - * Returns new schema given initial schema and superclasses. - * - * Input arguments: - * 'schema' is the column/attribute definition for the table. (It's a list - * of ColumnDef's.) It is destructively changed. - * 'supers' is a list of names (as RangeVar nodes) of parent relations. - * 'relpersistence' is a persistence type of the table. - * - * Output arguments: - * 'supOids' receives a list of the OIDs of the parent relations. - * 'supconstr' receives a list of constraints belonging to the parents, - * updated as necessary to be valid for the child. - * 'supOidCount' is set to the number of parents that have OID columns. - * - * Return value: - * Completed schema list. - * - * Notes: - * The order in which the attributes are inherited is very important. - * Intuitively, the inherited attributes should come first. If a table - * inherits from multiple parents, the order of those attributes are - * according to the order of the parents specified in CREATE TABLE. - * - * Here's an example: - * - * create table person (name text, age int4, location point); - * create table emp (salary int4, manager text) inherits(person); - * create table student (gpa float8) inherits (person); - * create table stud_emp (percent int4) inherits (emp, student); - * - * The order of the attributes of stud_emp is: - * - * person {1:name, 2:age, 3:location} - * / \ - * {6:gpa} student emp {4:salary, 5:manager} - * \ / - * stud_emp {7:percent} - * - * If the same attribute name appears multiple times, then it appears - * in the result table in the proper location for its first appearance. - * - * Constraints (including NOT NULL constraints) for the child table - * are the union of all relevant constraints, from both the child schema - * and parent tables. - * - * The default value for a child column is defined as: - * (1) If the child schema specifies a default, that value is used. - * (2) If neither the child nor any parent specifies a default, then - * the column will not have a default. - * (3) If conflicting defaults are inherited from different parents - * (and not overridden by the child), an error is raised. - * (4) Otherwise the inherited default is used. - * Rule (3) is new in Postgres 7.1; in earlier releases you got a - * rather arbitrary choice of which parent default to use. - * ---------- - */ -static List* MergeAttributes( - List* schema, List* supers, char relpersistence, List** supOids, List** supconstr, int* supOidCount) -{ - ListCell* entry = NULL; - List* inhSchema = NIL; - List* parentOids = NIL; - List* constraints = NIL; - int parentsWithOids = 0; - bool have_bogus_defaults = false; - int child_attno; - - /* - * Check for and reject tables with too many columns. We perform this - * check relatively early for two reasons: (a) we don't run the risk of - * overflowing an AttrNumber in subsequent code (b) an O(n^2) algorithm is - * okay if we're processing <= 1600 columns, but could take minutes to - * execute if the user attempts to create a table with hundreds of - * thousands of columns. - * - * Note that we also need to check that any we do not exceed this figure - * after including columns from inherited relations. - */ - if (list_length(schema) > MaxHeapAttributeNumber) { - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_COLUMNS), errmsg("tables can have at most %d columns", MaxHeapAttributeNumber))); - } - - /* - * Check for duplicate names in the explicit list of attributes. - * - * Although we might consider merging such entries in the same way that we - * handle name conflicts for inherited attributes, it seems to make more - * sense to assume such conflicts are errors. - */ - foreach (entry, schema) { - ColumnDef* coldef = (ColumnDef*)lfirst(entry); - ListCell* rest = lnext(entry); - ListCell* prev = entry; - - if (u_sess->attr.attr_sql.enable_cluster_resize && coldef->dropped_attr != NULL) { - continue; - } - if (coldef->typname == NULL) { - - /* - * Typed table column option that does not belong to a column from - * the type. This works because the columns from the type come - * first in the list. - */ - ereport( - ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" does not exist", coldef->colname))); - } - - while (rest != NULL) { - ColumnDef* restdef = (ColumnDef*)lfirst(rest); - ListCell* next = lnext(rest); /* need to save it in case we - * delete it */ - - if (strcmp(coldef->colname, restdef->colname) == 0) { - if (coldef->is_from_type) { - /* - * merge the column options into the column from the type - */ - coldef->is_not_null = restdef->is_not_null; - coldef->raw_default = restdef->raw_default; - coldef->cooked_default = restdef->cooked_default; - coldef->update_default = restdef->update_default; - coldef->constraints = restdef->constraints; - coldef->is_from_type = false; - coldef->kvtype = restdef->kvtype; - coldef->generatedCol = restdef->generatedCol; - coldef->cmprs_mode = restdef->cmprs_mode; - list_delete_cell(schema, rest, prev); - } else { -#ifndef ENABLE_MULTIPLE_NODES - if (u_sess->attr.attr_common.plsql_show_all_error) { - StringInfoData message; - initStringInfo(&message); - appendStringInfo(&message, "column \"%s\" specified more than once", coldef->colname); - InsertErrorMessage(message.data, 0, true); - } -#endif - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" specified more than once", coldef->colname))); - } - } - prev = rest; - rest = next; - } - } - - /* - * Scan the parents left-to-right, and merge their attributes to form a - * list of inherited attributes (inhSchema). Also check to see if we need - * to inherit an OID column. - */ - child_attno = 0; - foreach (entry, supers) { - RangeVar* parent = (RangeVar*)lfirst(entry); - Relation relation; - TupleDesc tupleDesc; - TupleConstr* constr = NULL; - AttrNumber* newattno = NULL; - AttrNumber parent_attno; - - /* - * A self-exclusive lock is needed here. If two backends attempt to - * add children to the same parent simultaneously, and that parent has - * no pre-existing children, then both will attempt to update the - * parent's relhassubclass field, leading to a "tuple concurrently - * updated" error. Also, this interlocks against a concurrent ANALYZE - * on the parent table, which might otherwise be attempting to clear - * the parent's relhassubclass field, if its previous children were - * recently dropped. - */ - relation = heap_openrv(parent, ShareUpdateExclusiveLock); - - if (relation->rd_rel->relkind != RELKIND_RELATION) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("inherited relation \"%s\" is not a table", parent->relname))); - } - - /* Permanent rels cannot inherit from temporary ones */ - if (relpersistence != RELPERSISTENCE_TEMP && relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot inherit from temporary relation \"%s\"", parent->relname))); - } - - /* If existing rel is temp, it must belong to this session */ - if (relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP && !RelationIsLocalTemp(relation)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot inherit from temporary relation of another session"))); - } - - /* - * We should have an UNDER permission flag for this, but for now, - * demand that creator of a child table own the parent. - */ - if (!pg_class_ownercheck(RelationGetRelid(relation), GetUserId())) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(relation)); - } - - /* - * Reject duplications in the list of parents. - */ - if (list_member_oid(parentOids, RelationGetRelid(relation))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_TABLE), - errmsg("relation \"%s\" would be inherited from more than once", parent->relname))); - } - - parentOids = lappend_oid(parentOids, RelationGetRelid(relation)); - - if (relation->rd_rel->relhasoids) { - parentsWithOids++; - } - - tupleDesc = RelationGetDescr(relation); - constr = tupleDesc->constr; - - /* - * newattno[] will contain the child-table attribute numbers for the - * attributes of this parent table. (They are not the same for - * parents after the first one, nor if we have dropped columns.) - */ - newattno = (AttrNumber*)palloc0(tupleDesc->natts * sizeof(AttrNumber)); - - for (parent_attno = 1; parent_attno <= tupleDesc->natts; parent_attno++) { - Form_pg_attribute attribute = &tupleDesc->attrs[parent_attno - 1]; - char* attributeName = NameStr(attribute->attname); - int exist_attno; - ColumnDef* def = NULL; - - /* - * Ignore dropped columns in the parent. - */ - if (attribute->attisdropped) { - continue; /* leave newattno entry as zero */ - } - - /* - * Does it conflict with some previously inherited column? - */ - exist_attno = findAttrByName(attributeName, inhSchema); - if (exist_attno > 0) { - Oid defTypeId; - int32 deftypmod; - Oid defCollId; - - /* - * Yes, try to merge the two column definitions. They must - * have the same type, typmod, and collation. - */ - ereport(NOTICE, (errmsg("merging multiple inherited definitions of column \"%s\"", attributeName))); - def = (ColumnDef*)list_nth(inhSchema, exist_attno - 1); - typenameTypeIdAndMod(NULL, def->typname, &defTypeId, &deftypmod); - if (defTypeId != attribute->atttypid || deftypmod != attribute->atttypmod) { - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("inherited column \"%s\" has a type conflict", attributeName), - errdetail( - "%s versus %s", TypeNameToString(def->typname), format_type_be(attribute->atttypid)))); - } - defCollId = GetColumnDefCollation(NULL, def, defTypeId); - if (defCollId != attribute->attcollation) { - ereport(ERROR, - (errcode(ERRCODE_COLLATION_MISMATCH), - errmsg("inherited column \"%s\" has a collation conflict", attributeName), - errdetail("\"%s\" versus \"%s\"", - get_collation_name(defCollId), - get_collation_name(attribute->attcollation)))); - } - - /* Copy storage parameter */ - if (def->storage == 0) { - def->storage = attribute->attstorage; - } else if (def->storage != attribute->attstorage) { - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("inherited column \"%s\" has a storage parameter conflict", attributeName), - errdetail( - "%s versus %s", storage_name(def->storage), storage_name(attribute->attstorage)))); - } - - def->inhcount++; - /* Merge of NOT NULL constraints = OR 'em together */ - if (attribute->attnotnull) { - def->is_not_null = true; - } - if (def->kvtype == ATT_KV_UNDEFINED) { - def->kvtype = attribute->attkvtype; - } - if (def->cmprs_mode == ATT_CMPR_UNDEFINED) { - def->cmprs_mode = attribute->attcmprmode; - } - /* Default and other constraints are handled below */ - newattno[parent_attno - 1] = exist_attno; - - /* Check for GENERATED conflicts */ - if (def->generatedCol != GetGeneratedCol(tupleDesc, parent_attno - 1)) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("inherited column \"%s\" has a generation conflict", attributeName))); - } - } else { - /* - * No, create a new inherited column - */ - def = makeNode(ColumnDef); - def->colname = pstrdup(attributeName); - def->typname = makeTypeNameFromOid(attribute->atttypid, attribute->atttypmod); - def->inhcount = 1; - def->is_local = false; - def->is_not_null = attribute->attnotnull; - def->is_from_type = false; - def->storage = attribute->attstorage; - def->kvtype = attribute->attkvtype; - def->cmprs_mode = attribute->attcmprmode; - def->raw_default = NULL; - def->update_default = NULL; - def->generatedCol = '\0'; - def->cooked_default = NULL; - def->collClause = NULL; - def->collOid = attribute->attcollation; - def->constraints = NIL; - inhSchema = lappend(inhSchema, def); - newattno[parent_attno - 1] = ++child_attno; - } - - /* - * Copy default if any - */ - if (attribute->atthasdef) { - Node* this_default = NULL; - Node* this_update_default = NULL; - AttrDefault* attrdef = NULL; - int i; - - /* Find default in constraint structure */ - if (unlikely(constr == NULL)) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("constr is NULL."))); - } - attrdef = constr->defval; - for (i = 0; i < constr->num_defval; i++) { - if (attrdef[i].adnum == parent_attno) { - this_default = (Node*)stringToNode_skip_extern_fields(attrdef[i].adbin); - if (attrdef[i].has_on_update) { - this_update_default = (Node*)stringToNode_skip_extern_fields(attrdef[i].adbin_on_update); - } - break; - } - } - Assert(this_default != NULL || this_update_default != NULL); - - /* - * If default expr could contain any vars, we'd need to fix - * 'em, but it can't; so default is ready to apply to child. - * - * If we already had a default from some prior parent, check - * to see if they are the same. If so, no problem; if not, - * mark the column as having a bogus default. Below, we will - * complain if the bogus default isn't overridden by the child - * schema. - */ - Assert(def->raw_default == NULL); - if (def->cooked_default == NULL) - def->cooked_default = this_default; - else if (!equal(def->cooked_default, this_default)) { - def->cooked_default = &u_sess->cmd_cxt.bogus_marker; - have_bogus_defaults = true; - } - if (def->update_default == NULL) - def->update_default = this_update_default; - } - } - - /* - * Now copy the CHECK constraints of this parent, adjusting attnos - * using the completed newattno[] map. Identically named constraints - * are merged if possible, else we throw error. - */ - if (constr != NULL && constr->num_check > 0) { - ConstrCheck* check = constr->check; - int i; - - for (i = 0; i < constr->num_check; i++) { - char* name = check[i].ccname; - Node* expr = NULL; - bool found_whole_row = false; - - /* ignore if the constraint is non-inheritable */ - if (check[i].ccnoinherit) - continue; - - /* Adjust Vars to match new table's column numbering */ - expr = map_variable_attnos( - (Node*)stringToNode(check[i].ccbin), 1, 0, newattno, tupleDesc->natts, &found_whole_row); - - /* - * For the moment we have to reject whole-row variables. - * We could convert them, if we knew the new table's rowtype - * OID, but that hasn't been assigned yet. - */ - if (found_whole_row) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert whole-row table reference"), - errdetail("Constraint \"%s\" contains a whole-row reference to table \"%s\".", - name, - RelationGetRelationName(relation)))); - - /* check for duplicate */ - if (!MergeCheckConstraint(constraints, name, expr)) { - /* nope, this is a new one */ - CookedConstraint* cooked = NULL; - - cooked = (CookedConstraint*)palloc(sizeof(CookedConstraint)); - cooked->contype = CONSTR_CHECK; - cooked->conoid = InvalidOid; /* until created */ - cooked->name = pstrdup(name); - cooked->attnum = 0; /* not used for constraints */ - cooked->expr = expr; - cooked->skip_validation = false; - cooked->is_local = false; - cooked->inhcount = 1; - cooked->is_no_inherit = false; - constraints = lappend(constraints, cooked); - } - } - } - - pfree_ext(newattno); - - /* - * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing - * the parent before the child is committed. - */ - heap_close(relation, NoLock); - } - - /* - * If we had no inherited attributes, the result schema is just the - * explicitly declared columns. Otherwise, we need to merge the declared - * columns into the inherited schema list. - */ - if (inhSchema != NIL) { - foreach (entry, schema) { - ColumnDef* newdef = (ColumnDef*)lfirst(entry); - char* attributeName = newdef->colname; - int exist_attno; - - /* - * Does it conflict with some previously inherited column? - */ - exist_attno = findAttrByName(attributeName, inhSchema); - if (exist_attno > 0) { - Oid defTypeId, newTypeId; - int32 deftypmod, newtypmod; - Oid defcollid, newcollid; - - /* - * Yes, try to merge the two column definitions. They must - * have the same type, typmod, and collation. - */ - ereport(LOG, - (errmsg("%s: merging column \"%s\" with inherited definition", - g_instance.attr.attr_common.PGXCNodeName, - attributeName))); - ereport(NOTICE, (errmsg("merging column \"%s\" with inherited definition", attributeName))); - ColumnDef* def = (ColumnDef*)list_nth(inhSchema, exist_attno - 1); - typenameTypeIdAndMod(NULL, def->typname, &defTypeId, &deftypmod); - typenameTypeIdAndMod(NULL, newdef->typname, &newTypeId, &newtypmod); - if (defTypeId != newTypeId || deftypmod != newtypmod) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" has a type conflict", attributeName), - errdetail( - "%s versus %s", TypeNameToString(def->typname), TypeNameToString(newdef->typname)))); - defcollid = GetColumnDefCollation(NULL, def, defTypeId); - newcollid = GetColumnDefCollation(NULL, newdef, newTypeId); - if (defcollid != newcollid) - ereport(ERROR, - (errcode(ERRCODE_COLLATION_MISMATCH), - errmsg("column \"%s\" has a collation conflict", attributeName), - errdetail( - "\"%s\" versus \"%s\"", get_collation_name(defcollid), get_collation_name(newcollid)))); - - /* Copy storage parameter */ - if (def->storage == 0) - def->storage = newdef->storage; - else if (newdef->storage != 0 && def->storage != newdef->storage) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" has a storage parameter conflict", attributeName), - errdetail("%s versus %s", storage_name(def->storage), storage_name(newdef->storage)))); - - /* Mark the column as locally defined */ - def->is_local = true; - /* Merge of NOT NULL constraints = OR 'em together */ - if (newdef->is_not_null) - def->is_not_null = true; - /* Copy kv type parameter */ - if (def->kvtype == ATT_KV_UNDEFINED) { - def->kvtype = newdef->storage; - } else if (newdef->kvtype != ATT_KV_UNDEFINED && def->kvtype != newdef->kvtype) { - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" has a kvtype parameter conflict", attributeName), - errdetail("%s versus %s", storage_name(def->kvtype), storage_name(newdef->kvtype)))); - } - if (def->cmprs_mode == ATT_CMPR_UNDEFINED) - def->cmprs_mode = newdef->cmprs_mode; - /* If new def has a default, override previous default */ - if (newdef->raw_default != NULL) { - def->raw_default = newdef->raw_default; - def->cooked_default = newdef->cooked_default; - } - if (newdef->update_default != NULL) { - def->update_default = newdef->update_default; - } - } else { - /* - * No, attach new column to result schema - */ - inhSchema = lappend(inhSchema, newdef); - } - } - - schema = inhSchema; - - /* - * Check that we haven't exceeded the legal # of columns after merging - * in inherited columns. - */ - if (list_length(schema) > MaxHeapAttributeNumber) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_COLUMNS), - errmsg("tables can have at most %d columns", MaxHeapAttributeNumber))); - } - - /* - * If we found any conflicting parent default values, check to make sure - * they were overridden by the child. - */ - if (have_bogus_defaults) { - foreach (entry, schema) { - ColumnDef* def = (ColumnDef*)lfirst(entry); - - if (def->cooked_default == &u_sess->cmd_cxt.bogus_marker) { - if (def->generatedCol) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_INVALID_COLUMN_DEFINITION), - errmsg("column \"%s\" inherits conflicting generated column", def->colname), - errhint("To resolve the conflict, specify a generated expr explicitly."))); - } else { - ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_DEFINITION), - errmsg("column \"%s\" inherits conflicting default values", def->colname), - errhint("To resolve the conflict, specify a default explicitly."))); - } - } - } - } - - *supOids = parentOids; - *supconstr = constraints; - *supOidCount = parentsWithOids; - return schema; -} - -/* - * MergeCheckConstraint - * Try to merge an inherited CHECK constraint with previous ones - * - * If we inherit identically-named constraints from multiple parents, we must - * merge them, or throw an error if they don't have identical definitions. - * - * constraints is a list of CookedConstraint structs for previous constraints. - * - * Returns TRUE if merged (constraint is a duplicate), or FALSE if it's - * got a so-far-unique name, or throws error if conflict. - */ -static bool MergeCheckConstraint(List* constraints, char* name, Node* expr) -{ - ListCell* lc = NULL; - - foreach (lc, constraints) { - CookedConstraint* ccon = (CookedConstraint*)lfirst(lc); - - Assert(ccon->contype == CONSTR_CHECK); - - /* Non-matching names never conflict */ - if (strcmp(ccon->name, name) != 0) - continue; - - if (equal(expr, ccon->expr)) { - /* OK to merge */ - ccon->inhcount++; - return true; - } - - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("check constraint name \"%s\" appears multiple times but with different expressions", name))); - } - - return false; -} - -/* - * StoreCatalogInheritance - * Updates the system catalogs with proper inheritance information. - * - * supers is a list of the OIDs of the new relation's direct ancestors. - */ -static void StoreCatalogInheritance(Oid relationId, List* supers) -{ - Relation relation; - int16 seqNumber; - ListCell* entry = NULL; - - /* - * sanity checks - */ - AssertArg(OidIsValid(relationId)); - - if (supers == NIL) - return; - - /* - * Store INHERITS information in pg_inherits using direct ancestors only. - * Also enter dependencies on the direct ancestors, and make sure they are - * marked with relhassubclass = true. - * - * (Once upon a time, both direct and indirect ancestors were found here - * and then entered into pg_ipl. Since that catalog doesn't exist - * anymore, there's no need to look for indirect ancestors.) - */ - relation = heap_open(InheritsRelationId, RowExclusiveLock); - - seqNumber = 1; - foreach (entry, supers) { - Oid parentOid = lfirst_oid(entry); - - StoreCatalogInheritance1(relationId, parentOid, seqNumber, relation); - seqNumber++; - } - - heap_close(relation, RowExclusiveLock); -} - -/* - * Make catalog entries showing relationId as being an inheritance child - * of parentOid. inhRelation is the already-opened pg_inherits catalog. - */ -static void StoreCatalogInheritance1(Oid relationId, Oid parentOid, int16 seqNumber, Relation inhRelation) -{ - TupleDesc desc = RelationGetDescr(inhRelation); - Datum values[Natts_pg_inherits]; - bool nulls[Natts_pg_inherits]; - ObjectAddress childobject, parentobject; - HeapTuple tuple; - errno_t rc = EOK; - - /* - * Make the pg_inherits entry - */ - values[Anum_pg_inherits_inhrelid - 1] = ObjectIdGetDatum(relationId); - values[Anum_pg_inherits_inhparent - 1] = ObjectIdGetDatum(parentOid); - values[Anum_pg_inherits_inhseqno - 1] = Int16GetDatum(seqNumber); - - rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); - securec_check(rc, "\0", "\0"); - - tuple = heap_form_tuple(desc, values, nulls); - - (void)simple_heap_insert(inhRelation, tuple); - - CatalogUpdateIndexes(inhRelation, tuple); - - tableam_tops_free_tuple(tuple); - - /* - * Store a dependency too - */ - parentobject.classId = RelationRelationId; - parentobject.objectId = parentOid; - parentobject.objectSubId = 0; - childobject.classId = RelationRelationId; - childobject.objectId = relationId; - childobject.objectSubId = 0; - - recordDependencyOn(&childobject, &parentobject, DEPENDENCY_NORMAL); - - /* - * Mark the parent as having subclasses. - */ - SetRelationHasSubclass(parentOid, true); -} - -/* - * Look for an existing schema entry with the given name. - * - * Returns the index (starting with 1) if attribute already exists in schema, - * 0 if it doesn't. - */ -static int findAttrByName(const char* attributeName, List* schema) -{ - ListCell* s = NULL; - int i = 1; - - foreach (s, schema) { - ColumnDef* def = (ColumnDef*)lfirst(s); - - if (strcmp(attributeName, def->colname) == 0) - return i; - - i++; - } - return 0; -} - -/* - * SetRelationHasSubclass - * Set the value of the relation's relhassubclass field in pg_class. - * - * NOTE: caller must be holding an appropriate lock on the relation. - * ShareUpdateExclusiveLock is sufficient. - * - * NOTE: an important side-effect of this operation is that an SI invalidation - * message is sent out to all backends --- including me --- causing plans - * referencing the relation to be rebuilt with the new list of children. - * This must happen even if we find that no change is needed in the pg_class - * row. - */ -void SetRelationHasSubclass(Oid relationId, bool relhassubclass) -{ - Relation relationRelation; - HeapTuple tuple; - Form_pg_class classtuple; - - /* - * Fetch a modifiable copy of the tuple, modify it, update pg_class. - */ - relationRelation = heap_open(RelationRelationId, RowExclusiveLock); - tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relationId)); - if (!HeapTupleIsValid(tuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relationId))); - } - - classtuple = (Form_pg_class)GETSTRUCT(tuple); - if (classtuple->relhassubclass != relhassubclass) { - classtuple->relhassubclass = relhassubclass; - simple_heap_update(relationRelation, &tuple->t_self, tuple); - - /* keep the catalog indexes up to date */ - CatalogUpdateIndexes(relationRelation, tuple); - } else { - /* no need to change tuple, but force relcache rebuild anyway */ - CacheInvalidateRelcacheByTuple(tuple); - } - - tableam_tops_free_tuple(tuple); - heap_close(relationRelation, RowExclusiveLock); -} - -/* - * Must be owner or have alter privilege to alter table - */ -static void ATPermissionCheck(Form_pg_class classform, Oid relid) -{ - AclResult aclresult; - if (classform->relkind == RELKIND_COMPOSITE_TYPE) { - Oid typeoid = get_typeoid(classform->relnamespace, NameStr(classform->relname)); - aclresult = pg_type_aclcheck(typeoid, GetUserId(), ACL_ALTER); - } else if (classform->relkind == RELKIND_INDEX) { - Oid tableoid = IndexGetRelation(relid, false); - aclresult = pg_class_aclcheck(tableoid, GetUserId(), ACL_INDEX); - bool anyResult = false; - if (aclresult != ACLCHECK_OK && !IsSysSchema(GetNamespaceIdbyRelId(tableoid))) { - anyResult = HasSpecAnyPriv(GetUserId(), ALTER_ANY_INDEX, false); - } - aclresult = anyResult ? ACLCHECK_OK : aclresult; - } else { - aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_ALTER); - } - - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(relid, GetUserId())) { - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, NameStr(classform->relname)); - } -} - -/* - * renameatt_check - basic sanity checks before attribute rename - */ -static void renameatt_check(Oid myrelid, Form_pg_class classform, bool recursing) -{ - char relkind = classform->relkind; - - if (classform->reloftype && !recursing) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot rename column of typed table"))); - } - - /* - * Renaming the columns of sequences or toast tables doesn't actually - * break anything from the system's point of view, since internal - * references are by attnum. But it doesn't seem right to allow users to - * change names that are hardcoded into the system, hence the following - * restriction. - */ - if (relkind != RELKIND_RELATION && - relkind != RELKIND_VIEW && - relkind != RELKIND_CONTQUERY && - relkind != RELKIND_MATVIEW && - relkind != RELKIND_COMPOSITE_TYPE && - relkind != RELKIND_INDEX && - relkind != RELKIND_GLOBAL_INDEX && - relkind != RELKIND_STREAM && - relkind != RELKIND_FOREIGN_TABLE) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, materialized view, composite type, index, stream or foreign table", - NameStr(classform->relname)))); - } - - /* Permission check */ - ATPermissionCheck(classform, myrelid); - - if (!(g_instance.attr.attr_common.allowSystemTableMods && myrelid >= FirstBootstrapObjectId) && - !u_sess->attr.attr_common.IsInplaceUpgrade && IsSystemClass(classform)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", NameStr(classform->relname)))); -} - -/* - * renameatt_internal - workhorse for renameatt - */ -static AttrNumber renameatt_internal(Oid myrelid, const char* oldattname, const char* newattname, bool recurse, - bool recursing, int expected_parents, DropBehavior behavior) -{ - Relation targetrelation; - Relation attrelation; - HeapTuple atttup; - Form_pg_attribute attform; - AttrNumber attnum; - - Relation ce_relation; - HeapTuple ce_tuple; - Form_gs_encrypted_columns ce_form; - - /* - * Grab an exclusive lock on the target table, which we will NOT release - * until end of transaction. - */ - targetrelation = relation_open(myrelid, AccessExclusiveLock); - renameatt_check(myrelid, RelationGetForm(targetrelation), recursing); - - /* - * if the 'recurse' flag is set then we are supposed to rename this - * attribute in all classes that inherit from 'relname' (as well as in - * 'relname'). - * - * any permissions or problems with duplicate attributes will cause the - * whole transaction to abort, which is what we want -- all or nothing. - */ - if (recurse) { - List* child_oids = NIL; - List* child_numparents = NIL; - ListCell* lo = NULL; - ListCell* li = NULL; - - /* - * we need the number of parents for each child so that the recursive - * calls to renameatt() can determine whether there are any parents - * outside the inheritance hierarchy being processed. - */ - child_oids = find_all_inheritors(myrelid, AccessExclusiveLock, &child_numparents); - - /* - * find_all_inheritors does the recursive search of the inheritance - * hierarchy, so all we have to do is process all of the relids in the - * list that it returns. - */ - forboth(lo, child_oids, li, child_numparents) - { - Oid childrelid = lfirst_oid(lo); - int numparents = lfirst_int(li); - - if (childrelid == myrelid) - continue; - /* note we need not recurse again */ - renameatt_internal(childrelid, oldattname, newattname, false, true, numparents, behavior); - } - } else { - /* - * If we are told not to recurse, there had better not be any child - * tables; else the rename would put them out of step. - * - * expected_parents will only be 0 if we are not already recursing. - */ - if (expected_parents == 0 && find_inheritance_children(myrelid, NoLock) != NIL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("inherited column \"%s\" must be renamed in child tables too", oldattname))); - } - - /* rename attributes in typed tables of composite type */ - if (targetrelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) { - List* child_oids = NIL; - ListCell* lo = NULL; - - child_oids = find_typed_table_dependencies( - targetrelation->rd_rel->reltype, RelationGetRelationName(targetrelation), behavior); - - foreach (lo, child_oids) - renameatt_internal(lfirst_oid(lo), oldattname, newattname, true, true, 0, behavior); - } - - attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - - atttup = SearchSysCacheCopyAttName(myrelid, oldattname); - if (!HeapTupleIsValid(atttup)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" does not exist", oldattname))); - } - attform = (Form_pg_attribute)GETSTRUCT(atttup); - - attnum = attform->attnum; - if (attnum <= 0) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot rename system column \"%s\"", oldattname))); - } - - /* - * if the attribute is inherited, forbid the renaming. if this is a - * top-level call to renameatt(), then expected_parents will be 0, so the - * effect of this code will be to prohibit the renaming if the attribute - * is inherited at all. if this is a recursive call to renameatt(), - * expected_parents will be the number of parents the current relation has - * within the inheritance hierarchy being processed, so we'll prohibit the - * renaming only if there are additional parents from elsewhere. - */ - if (attform->attinhcount > expected_parents) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot rename inherited column \"%s\"", oldattname))); - } - - /* new name should not already exist */ - check_for_column_name_collision(targetrelation, newattname); - - /* new name should not conflict with system columns */ - if (CHCHK_PSORT_RESERVE_COLUMN(newattname)) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column name \"%s\" conflicts with a system column name", newattname))); - } - - /* apply the update */ - (void)namestrcpy(&(attform->attname), newattname); - bool should_rename_ce = false; - if (attform->atttypid == BYTEAWITHOUTORDERWITHEQUALCOLOID || attform->atttypid == BYTEAWITHOUTORDERCOLOID) { - should_rename_ce = true; - } - - simple_heap_update(attrelation, &atttup->t_self, atttup); - - /* keep system catalog indexes current */ - CatalogUpdateIndexes(attrelation, atttup); - tableam_tops_free_tuple(atttup); - heap_close(attrelation, RowExclusiveLock); - - /* recurse rename cstore delta table column name */ -#ifdef ENABLE_MULTIPLE_NODES - if (g_instance.attr.attr_storage.enable_delta_store && RelationIsCUFormat(targetrelation)) { -#else - /* - * Under centrailzed mode, there may be unique index on delta table. When checking unique - * constraint, unique index on delta will be used. So we ignore enable_delta_store here - * and alter delta table at the same time. - */ - if (RelationIsCUFormat(targetrelation)) { -#endif - List* child_oids = NIL; - ListCell* child = NULL; - child_oids = find_cstore_delta(targetrelation, AccessExclusiveLock); - - foreach (child, child_oids) { - Oid childrelid = lfirst_oid(child); - if (childrelid == myrelid) - continue; - renameatt_internal(childrelid, oldattname, newattname, false, true, 1, behavior); - } - } - - /* Recode time of reaname relation att. */ - recordRelationMTime(myrelid, targetrelation->rd_rel->relkind); - - relation_close(targetrelation, NoLock); /* close rel but keep lock */ - - if (should_rename_ce) { -#ifdef ENABLE_MULTIPLE_NODES - if (!IS_PGXC_COORDINATOR) { - return; - } -#endif - ce_relation = heap_open(ClientLogicCachedColumnsId, RowExclusiveLock); - ce_tuple = search_sys_cache_copy_ce_col_name(myrelid, oldattname); - if (!HeapTupleIsValid(ce_tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("encrypted column \"%s\" does not exist", - oldattname))); - ce_form = (Form_gs_encrypted_columns) GETSTRUCT(ce_tuple); - namestrcpy(&(ce_form->column_name), newattname); - simple_heap_update(ce_relation, &ce_tuple->t_self, ce_tuple); - - /* keep system catalog indexes current */ - CatalogUpdateIndexes(ce_relation, ce_tuple); - - heap_freetuple_ext(ce_tuple); - - heap_close(ce_relation, RowExclusiveLock); - } - return attnum; -} - -/* - * Perform permissions and integrity checks before acquiring a relation lock. - */ -static void RangeVarCallbackForRenameAttribute( - const RangeVar* rv, Oid relid, Oid oldrelid, bool target_is_partition, void* arg) -{ - HeapTuple tuple; - Form_pg_class form; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) - return; /* concurrently dropped */ - form = (Form_pg_class)GETSTRUCT(tuple); - renameatt_check(relid, form, false); - ReleaseSysCache(tuple); -} - -/* - * renameatt - changes the name of a attribute in a relation - */ -ObjectAddress renameatt(RenameStmt* stmt) -{ - Oid relid; - AttrNumber attnum; - ObjectAddress address; - - /* lock level taken here should match renameatt_internal */ - relid = RangeVarGetRelidExtended(stmt->relation, - AccessExclusiveLock, - stmt->missing_ok, - false, - false, - false, - RangeVarCallbackForRenameAttribute, - NULL); - - if (!OidIsValid(relid)) { - ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", stmt->relation->relname))); - return InvalidObjectAddress; - } - - TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); - - // Check relations's internal mask - Relation rel = relation_open(relid, AccessShareLock); - if ((((uint32)RelationGetInternalMask(rel)) & INTERNAL_MASK_DALTER)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("internal relation doesn't allow ALTER"))); -#ifdef ENABLE_MULTIPLE_NODES - if (RelationIsTsStore(rel)) { - bool kvtype_tag = get_kvtype_by_oldname(rel, stmt->subname); - if (kvtype_tag) { - Oid tag_relid = get_tag_relid(RelationGetRelationName(rel), RelationGetNamespace(rel)); - renameatt_internal(tag_relid, - stmt->subname, - stmt->newname, - interpretInhOption(stmt->relation->inhOpt), - false, - 0, - stmt->behavior); - } - } -#endif - /* We allow to alter global temp table only this session use it */ - CheckGttTableInUse(rel); - if (is_masked_relation(relid, stmt->subname) && !u_sess->attr.attr_common.IsInplaceUpgrade){ - ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Column: %s has bound some masking policies, can not be renamed.", stmt->subname), - errdetail("cannot rename masking column"))); - } - -#ifdef ENABLE_MOT - if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE && isMOTFromTblOid(RelationGetRelid(rel))) { - RenameForeingTableCmd cmd = { - T_RenameForeingTableCmd, - relid, - stmt->renameType, - stmt->subname, - stmt->newname - }; - FdwRoutine* fdwroutine; - - if (rel->rd_fdwroutine != nullptr) { - fdwroutine = rel->rd_fdwroutine; - } else { - fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(rel)); - } - - if (fdwroutine->ValidateTableDef != nullptr) { - fdwroutine->ValidateTableDef((Node*)&cmd); - } - } -#endif - - relation_close(rel, AccessShareLock); - - attnum = renameatt_internal(relid, - stmt->subname, /* old att name */ - stmt->newname, /* new att name */ - interpretInhOption(stmt->relation->inhOpt), /* recursive? */ - false, /* recursing? */ - 0, /* expected inhcount */ - stmt->behavior); - - /* This is an ALTER TABLE command so it's about the relid */ - ObjectAddressSubSet(address, RelationRelationId, relid, attnum); - return address; - -} - -/* - * same logic as renameatt_internal - */ -static ObjectAddress rename_constraint_internal(Oid myrelid, Oid mytypid, const char* oldconname, const char* newconname, - bool recurse, bool recursing, int expected_parents) -{ - Relation targetrelation = NULL; - Oid constraintOid; - HeapTuple tuple; - Form_pg_constraint con; - ObjectAddress address; - - AssertArg(!myrelid || !mytypid); - - if (mytypid) { - constraintOid = get_domain_constraint_oid(mytypid, oldconname, false); - } else { - targetrelation = relation_open(myrelid, AccessExclusiveLock); - - /* We allow to alter global temp table only this session use it */ - CheckGttTableInUse(targetrelation); - - /* - * don't tell it whether we're recursing; we allow changing typed - * tables here - */ - renameatt_check(myrelid, RelationGetForm(targetrelation), false); - - constraintOid = get_relation_constraint_oid(myrelid, oldconname, false); - } - - TrForbidAccessRbObject(ConstraintRelationId, constraintOid, oldconname); - - tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraintOid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for constraint %u", constraintOid))); - } - con = (Form_pg_constraint)GETSTRUCT(tuple); - - if (myrelid && con->contype == CONSTRAINT_CHECK && !con->connoinherit) { - if (recurse) { - List* child_oids = NIL; - List* child_numparents = NIL; - ListCell* lo = NULL; - ListCell* li = NULL; - - child_oids = find_all_inheritors(myrelid, AccessExclusiveLock, &child_numparents); - - forboth(lo, child_oids, li, child_numparents) - { - Oid childrelid = lfirst_oid(lo); - int numparents = lfirst_int(li); - - if (childrelid == myrelid) - continue; - - rename_constraint_internal(childrelid, InvalidOid, oldconname, newconname, false, true, numparents); - } - } else { - if (expected_parents == 0 && find_inheritance_children(myrelid, NoLock) != NIL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("inherited constraint \"%s\" must be renamed in child tables too", oldconname))); - } - - if (con->coninhcount > expected_parents) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot rename inherited constraint \"%s\"", oldconname))); - } - - if (con->conindid && (con->contype == CONSTRAINT_PRIMARY || con->contype == CONSTRAINT_UNIQUE || - con->contype == CONSTRAINT_EXCLUSION)) - /* rename the index; this renames the constraint as well */ - RenameRelationInternal(con->conindid, newconname); - else - RenameConstraintById(constraintOid, newconname); - - ObjectAddressSet(address, ConstraintRelationId, constraintOid); - ReleaseSysCache(tuple); - - if (targetrelation) { - /* Recode time of rename relation constraint. */ - recordRelationMTime(targetrelation->rd_id, targetrelation->rd_rel->relkind); - - /* - * Invalidate relcache so as others can see the new constraint name. - */ - CacheInvalidateRelcache(targetrelation); - - relation_close(targetrelation, NoLock); /* close rel but keep lock */ - } - return address; -} - -ObjectAddress RenameConstraint(RenameStmt* stmt) -{ - Oid relid = InvalidOid; - Oid typid = InvalidOid; - - if (stmt->relationType == OBJECT_DOMAIN) { - Relation rel; - HeapTuple tup; - - typid = typenameTypeId(NULL, makeTypeNameFromNameList(stmt->object)); - rel = heap_open(TypeRelationId, RowExclusiveLock); - tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", typid))); - } - checkDomainOwner(tup); - ReleaseSysCache(tup); - heap_close(rel, NoLock); - } else { - /* lock level taken here should match rename_constraint_internal */ - relid = RangeVarGetRelidExtended(stmt->relation, - AccessExclusiveLock, - stmt->missing_ok, - false, - false, - false, - RangeVarCallbackForRenameAttribute, - NULL); - - if (!OidIsValid(relid)) { - ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", stmt->relation->relname))); - return InvalidObjectAddress; - } - } - - return rename_constraint_internal(relid, - typid, - stmt->subname, - stmt->newname, - stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */ - false, /* recursing? */ - 0 /* expected inhcount */); -} - -static bool FindSynonymExist(char* relname, char* relnamespace) -{ - HeapTuple htup = NULL; - bool isnull = false; - bool result = false; - Relation rel_synonym = heap_open(PgSynonymRelationId, RowExclusiveLock); - SysScanDesc adscan = systable_beginscan(rel_synonym, InvalidOid, false, NULL, 0, NULL); - while (HeapTupleIsValid(htup = systable_getnext(adscan))) { - Datum val = heap_getattr(htup, Anum_pg_synonym_synobjschema, rel_synonym->rd_att, &isnull); - if (val && pg_strcasecmp(DatumGetCString(val), relnamespace) == 0) { - val = heap_getattr(htup, Anum_pg_synonym_synobjname, rel_synonym->rd_att, &isnull); - if (val && pg_strcasecmp(DatumGetCString(val), relname) == 0) { - result = true; - } - } - } - systable_endscan(adscan); - heap_close(rel_synonym, RowExclusiveLock); - return result; -} - -static int Compare_RenameTableNameData_func(const void* a, const void* b) -{ - if (strcmp(((const RenameTableNameData*)a)->schemaname, ((const RenameTableNameData*)b)->schemaname) == 0) { - return strcmp(((const RenameTableNameData*)a)->relname, ((const RenameTableNameData*)b)->relname); - } else { - return strcmp(((const RenameTableNameData*)a)->schemaname, ((const RenameTableNameData*)b)->schemaname); - } -} - -static ObjectAddress RenameTableFeature(RenameStmt* stmt) -{ - char *orgiSchema = NULL, *orgitable = NULL, *modfySchema = NULL, *modfytable = NULL; - Oid orgiNameSpace = InvalidOid, modfyNameSpace = InvalidOid; - List* search_path = fetch_search_path(false); - Oid relnamespace = InvalidOid; - RangeVar* temp_name = NULL; - Oid relid = InvalidOid, relid_temp = InvalidOid; - Relation rel_pg_class, rel_pg_type; - HeapTuple tup; - HeapTuple newtup; - Form_pg_class relform; - ObjectAddress address; - - Datum values[Natts_pg_class] = { 0 }; - bool nulls[Natts_pg_class] = { false }; - bool replaces[Natts_pg_class] = { false }; - Datum type_values[Natts_pg_type] = { 0 }; - bool type_nulls[Natts_pg_type] = { false }; - bool type_replaces[Natts_pg_type] = { false }; - - if (stmt->renameTargetList == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Cannot get rename table name and modify name"))); - } - RenameTableNameData storageTable[stmt->renameTargetList->length]; - bool tempSchema[stmt->renameTargetList->length] = { false }; - Relation lockRelation[stmt->renameTargetList->length]; - int tableName_Count = 0; - ListCell* rename_Cell = NULL; - foreach(rename_Cell, stmt->renameTargetList) { - RenameCell* renameInfo = (RenameCell*)lfirst(rename_Cell); - temp_name = renameInfo->original_name; - orgiSchema = temp_name->schemaname; - /* orgitable NOT NULL */ - Assert(temp_name->relname != NULL); - orgitable = temp_name->relname; - /* if schema name don't assign */ - if (orgiSchema == NULL && search_path != NIL) { - ListCell* l = NULL; - foreach(l, search_path) { - relnamespace = lfirst_oid(l); - if (!OidIsValid(relnamespace)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Cannot get current namespace on Rename Table."))); - } - relid = get_relname_relid(orgitable, relnamespace); - /* Traversal the search_path until the correct schema of table is found */ - if (OidIsValid(relid)) { - orgiSchema = get_namespace_name(relnamespace); - break; - } - } - } - if (orgiSchema == NULL) { - orgiSchema = get_namespace_name(PG_PUBLIC_NAMESPACE); - tempSchema[tableName_Count] = true; - } - storageTable[tableName_Count].schemaname = pstrdup(orgiSchema); - storageTable[tableName_Count].relname = pstrdup(orgitable); - tableName_Count++; - } - - if (stmt->renameTargetList->length >= 2) { - qsort((void*)storageTable, (size_t)stmt->renameTargetList->length, sizeof(RenameTableNameData), Compare_RenameTableNameData_func); - } - for (int num = 0; num < stmt->renameTargetList->length; num++) { - if (orgiSchema != NULL && !tempSchema[num]) { - orgiNameSpace = get_namespace_oid(storageTable[num].schemaname, false); - } else if (OidIsValid(u_sess->catalog_cxt.myTempNamespace)) { - orgiNameSpace = u_sess->catalog_cxt.myTempNamespace; - } - relid = get_relname_relid(storageTable[num].relname, orgiNameSpace); - if(!OidIsValid(relid)) { - lockRelation[num] = NULL; - orgiNameSpace = InvalidOid; - continue; - } else { - /* Don't support Tempporary Table */ - if (IsTempTable(relid) || IsGlobalTempTable(relid)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("relation %s is temporary table, Rename table don't support.", get_rel_name(relid)))); - } - lockRelation[num] = relation_open(relid, AccessExclusiveLock); - } - } - - foreach(rename_Cell, stmt->renameTargetList) { - /* acquire the schema and table name in renameTargetList */ - RenameCell* renameInfo = (RenameCell*)lfirst(rename_Cell); - temp_name = renameInfo->original_name; - orgiSchema = temp_name->schemaname; - if (orgiSchema != NULL) { - orgiNameSpace = get_namespace_oid(orgiSchema, false); - } - orgitable = temp_name->relname; - temp_name = renameInfo->modify_name; - modfySchema = temp_name->schemaname; - if (modfySchema != NULL) { - modfyNameSpace = get_namespace_oid(modfySchema, false); - } - /* modfytable NOT NULL */ - Assert(temp_name->relname); - modfytable = temp_name->relname; - - /* obtain search_path, get schema name */ - if (orgiSchema == NULL && search_path != NIL) { - ListCell* l = NULL; - foreach (l, search_path) { - relnamespace = lfirst_oid(l); - if (!OidIsValid(relnamespace)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Cannot get current namespace on Rename Table."))); - } - relid = get_relname_relid(orgitable, relnamespace); - /* Traversal the search_path until the correct schema of table is found */ - if (OidIsValid(relid)) { - orgiSchema = get_namespace_name(relnamespace); - orgiNameSpace = relnamespace; - if (modfySchema == NULL) { - modfyNameSpace = relnamespace; - } - break; - } - } - } else if (search_path == NIL && orgiSchema == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Rename Table search_path get NIL in error."))); - } - if (modfySchema == NULL && orgiSchema != NULL) { - /* if modfytable table has no schema specified, - * it's the same as orgiNameSpace */ - modfyNameSpace = orgiNameSpace; - } - - /* Check whether exist Synonym on old table name and new table name */ - if (orgiSchema == NULL) { - orgiSchema = get_namespace_name(relnamespace); - } - if (FindSynonymExist(orgitable, orgiSchema)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Rename Table \"%s.%s\" exist Synonym, so Rename table can't execute.", - orgiSchema, orgitable))); - } else if (modfySchema != NULL && SearchSysCacheExists2(SYNONYMNAMENSP, PointerGetDatum(modfytable), ObjectIdGetDatum(modfyNameSpace))) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Rename Table \"%s.%s\" exist Synonym, so Rename table can't execute.", - modfySchema, modfytable))); - } else if ((orgiSchema != NULL && modfySchema == NULL) && - SearchSysCacheExists2(SYNONYMNAMENSP, PointerGetDatum(modfytable), ObjectIdGetDatum(orgiNameSpace))) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Rename Table \"%s.%s\" exist Synonym, so Rename table can't execute.", - orgiSchema, modfytable))); - } else if ((orgiSchema == NULL && modfySchema == NULL) && - SearchSysCacheExists2(SYNONYMNAMENSP, PointerGetDatum(modfytable), ObjectIdGetDatum(relnamespace))) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Rename Table \"%s.%s\" exist Synonym, so Rename table can't execute.", - get_namespace_name(relnamespace), modfytable))); - } else if (orgitable != NULL) { - Oid temp_namespace = InvalidOid; - if (orgiSchema != NULL) { - if (OidIsValid(orgiNameSpace)) { - temp_namespace = orgiNameSpace; - } else { - temp_namespace = relnamespace; - } - } else { - temp_namespace = relnamespace; - } - if (SearchSysCacheExists2(SYNONYMNAMENSP, PointerGetDatum(orgitable), ObjectIdGetDatum(temp_namespace))) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Rename Table \"%s.%s\" is Synonym, so Rename table can't support.", - get_namespace_name(temp_namespace), orgitable))); - } - } - - /* check a user's access privileges to a namespace */ - if (pg_namespace_aclcheck(orgiNameSpace, GetUserId(), ACL_USAGE) != ACLCHECK_OK) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("User %s don't have creat privileges on Schema %s.", - GetUserNameFromId(GetUserId()), get_namespace_name(orgiNameSpace)))); - } - if (pg_namespace_aclcheck(modfyNameSpace, GetUserId(), ACL_USAGE) != ACLCHECK_OK) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("User %s don't have creat privileges on Schema %s.", - GetUserNameFromId(GetUserId()), get_namespace_name(modfyNameSpace)))); - } - - /* Do rename table work */ - rel_pg_class = heap_open(RelationRelationId, RowExclusiveLock); - relid = get_relname_relid(orgitable, orgiNameSpace); - - /* Support view but cannot span schemaes */ - if (!OidIsValid(relid)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("relation \"%s.%s\" does not exist, skipping", get_namespace_name(orgiNameSpace), orgitable))); - } else if (IsRelaionView(relid) && modfyNameSpace != orgiNameSpace) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("relation %s is view, Rename table don't support span schemaes.", get_rel_name(relid)))); - } else if (orgiNameSpace == modfyNameSpace && pg_strcasecmp(orgitable, modfytable) == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("relation \"%s.%s\" already exists", get_namespace_name(modfyNameSpace), modfytable))); - } else if (pg_class_aclcheck(relid, GetUserId(), ACL_ALTER) == ACLCHECK_NO_PRIV) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("permission denied for relation %s.%s", get_namespace_name(orgiNameSpace), orgitable))); - } else if (OidIsValid(get_relname_relid(modfytable, modfyNameSpace))) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("relation \"%s.%s\" already exists", get_namespace_name(modfyNameSpace), modfytable))); - } - - /* Rename regular table */ - replaces[Anum_pg_class_relname - 1] = true; - values[Anum_pg_class_relname - 1] = DirectFunctionCall1(namein, CStringGetDatum(modfytable)); - type_replaces[Anum_pg_type_typname - 1] = true; - type_values[Anum_pg_type_typname - 1] = DirectFunctionCall1(namein, CStringGetDatum(modfytable)); - if (modfySchema != NULL) { - replaces[Anum_pg_class_relnamespace - 1] = true; - values[Anum_pg_class_relnamespace - 1] = ObjectIdGetDatum(modfyNameSpace); - type_replaces[Anum_pg_type_typnamespace - 1] = true; - type_values[Anum_pg_type_typnamespace - 1] = ObjectIdGetDatum(modfyNameSpace); - } - - /* delete table privileges */ - /* delete the table relacl. only superuser can operate the table */ - nulls[Anum_pg_class_relacl - 1] = true; - replaces[Anum_pg_class_relacl - 1] = true; - - tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cache lookup failed for relation %s", get_rel_name(relid)))); - } - - relform = (Form_pg_class)GETSTRUCT(tup); - if (relform->relkind == RELKIND_RELATION && relform->parttype == PARTTYPE_PARTITIONED_RELATION) { - renamePartitionedTable(relid, modfytable); - } else if (relform->relhastriggers && modfyNameSpace != orgiNameSpace) { - ScanKeyData key; - bool is_find = false; - HeapTuple tuple = NULL; - Relation tgrel = heap_open(TriggerRelationId, RowExclusiveLock); - ScanKeyInit(&key, Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - SysScanDesc scan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true, NULL, 1, &key); - while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - Form_pg_trigger pg_trigger = (Form_pg_trigger)GETSTRUCT(tuple); - if (!pg_trigger->tgisinternal) { - is_find = true; - break; - } - } - systable_endscan(scan); - heap_close(tgrel, RowExclusiveLock); - if (is_find) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("Trigger in wrong schema on table %s", get_rel_name(relid)))); - } - } - /* Fix other dependent stuff */ - if (relform->relkind == RELKIND_RELATION || relform->relkind == RELKIND_MATVIEW) { - ObjectAddresses* objsMoved = NULL; - objsMoved = new_object_addresses(); - Relation rel; - rel = relation_open(relid, NoLock); - // AlterIndexNamespaces(rel_pg_class, rel, orgiNameSpace, modfyNameSpace, objsMoved); - // AlterSeqNamespaces(rel_pg_class, rel, orgiNameSpace, modfyNameSpace, objsMoved, AccessExclusiveLock); - AlterConstraintNamespaces(RelationGetRelid(rel), orgiNameSpace, modfyNameSpace, false, objsMoved); - relation_close(rel, NoLock); - } - - relid_temp = relid; - newtup = heap_modify_tuple(tup, RelationGetDescr(rel_pg_class), values, nulls, replaces); - simple_heap_update(rel_pg_class, &newtup->t_self, newtup); - CatalogUpdateIndexes(rel_pg_class, newtup); - ReleaseSysCache(tup); - tableam_tops_free_tuple(newtup); - heap_close(rel_pg_class, RowExclusiveLock); - CommandCounterIncrement(); - - rel_pg_type = heap_open(TypeRelationId, RowExclusiveLock); - relid = get_typeoid(orgiNameSpace, orgitable); - if (!OidIsValid(relid)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("type \"%s.%s\" does not exist, skipping", get_namespace_name(orgiNameSpace), orgitable))); - } - tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cache lookup failed for type %s", get_rel_name(relid)))); - } - newtup = heap_modify_tuple(tup, RelationGetDescr(rel_pg_type), type_values, type_nulls, type_replaces); - simple_heap_update(rel_pg_type, &newtup->t_self, newtup); - CatalogUpdateIndexes(rel_pg_type, newtup); - ReleaseSysCache(tup); - tableam_tops_free_tuple(newtup); - heap_close(rel_pg_type, RowExclusiveLock); - CommandCounterIncrement(); - - /* update dependencies to point to the new schema */ - (void)changeDependencyFor(RelationRelationId, relid_temp, NamespaceRelationId, orgiNameSpace, modfyNameSpace); - } - for (int num = stmt->renameTargetList->length - 1; num >= 0; num--) { - if (lockRelation[num] != NULL) { - relation_close(lockRelation[num], AccessExclusiveLock); - } - } - for (int num = 0; num < stmt->renameTargetList->length; num++) { - pfree(storageTable[num].schemaname); - pfree(storageTable[num].relname); - } - list_free_ext(search_path); - ObjectAddressSet(address, RelationRelationId, relid); - return address; -} - -/* - * Execute ALTER TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE RENAME - */ -ObjectAddress RenameRelation(RenameStmt* stmt) -{ - if (stmt->renameTableflag) { - return RenameTableFeature(stmt); - } else { - Oid relid; - ObjectAddress address; - HeapTuple tuple; - Datum name; - bool isnull = false; - char *relname = NULL; - - /* - * Grab an exclusive lock on the target table, index, sequence or view, - * which we will NOT release until end of transaction. - * - * Lock level used here should match RenameRelationInternal, to avoid lock - * escalation. - */ - relid = RangeVarGetRelidExtended(stmt->relation, - AccessExclusiveLock, - stmt->missing_ok, - false, - false, - false, - RangeVarCallbackForAlterRelation, - (void*)stmt); - - if (!OidIsValid(relid)) { - ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", stmt->relation->relname))); - return InvalidObjectAddress; - } - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid))); - } - name = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relname, &isnull); - Assert(!isnull); - relname = DatumGetName(name)->data; - if (ISMLOG(relname) || ISMATMAP(relname)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errmsg("Un-support feature"), - errdetail("%s table doesn't support this ALTER yet.", ISMLOG(relname) ? "mlog" : "matviewmap")))); - } - ReleaseSysCache(tuple); - - TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); - /* If table has history table, we need rename corresponding history table */ - if (is_ledger_usertable(relid)) { - rename_hist_by_usertable(relid, stmt->newname); - } - -#ifdef ENABLE_MOT - if (stmt->renameType == OBJECT_INDEX) { - Oid relOid = IndexGetRelation(relid, false); - Relation rel = RelationIdGetRelation(relOid); - if (RelationIsForeignTable(rel) && isMOTFromTblOid(RelationGetRelid(rel))) { - FdwRoutine* fdwroutine = rel->rd_fdwroutine; - if (fdwroutine == nullptr) { - fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(rel)); - } - if (fdwroutine->ValidateTableDef != nullptr) { - fdwroutine->ValidateTableDef((Node*)stmt); - } - } - RelationClose(rel); - } -#endif - - /* Do the work */ - RenameRelationInternal(relid, stmt->newname); - /* - * Record the changecsn of the table that defines the index - */ - if (stmt->renameType == OBJECT_INDEX) { - Oid relOid = IndexGetRelation(relid, false); - Relation userRelaiton = RelationIdGetRelation(relOid); - UpdatePgObjectChangecsn(relOid, userRelaiton->rd_rel->relkind); - RelationClose(userRelaiton); - } - ObjectAddressSet(address, RelationRelationId, relid); - - return address; -} -} - -/* - * RenameRelationInternal - change the name of a relation - * - * XXX - When renaming sequences, we don't bother to modify the - * sequence name that is stored within the sequence itself - * (this would cause problems with MVCC). In the future, - * the sequence name should probably be removed from the - * sequence, AFAIK there's no need for it to be there. - */ -void RenameRelationInternal(Oid myrelid, const char* newrelname) -{ - Relation targetrelation; - Relation relrelation; /* for RELATION relation */ - HeapTuple reltup; - Form_pg_class relform; - Oid namespaceId; - - /* - * Grab an exclusive lock on the target table, index, sequence or view, - * which we will NOT release until end of transaction. - */ - targetrelation = relation_open(myrelid, AccessExclusiveLock); - - if (RelationIsSubPartitioned(targetrelation)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, ALTER TABLE ... RENAME is not yet supported."), - errcause("The function is not implemented."), erraction("Create a new table to replace it.")))); - } - - if (OBJECT_IS_SEQUENCE(targetrelation->rd_rel->reltype)|| RELKIND_IS_SEQUENCE(targetrelation->rd_rel->relkind)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("RENAME SEQUENCE is not yet supported."))); - } - - /* We allow to alter global temp table only this session use it */ - CheckGttTableInUse(targetrelation); - - namespaceId = RelationGetNamespace(targetrelation); - - /* - * Find relation's pg_class tuple, and make sure newrelname isn't in use. - */ - relrelation = heap_open(RelationRelationId, RowExclusiveLock); - - reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(myrelid)); - if (!HeapTupleIsValid(reltup)) { - /* shouldn't happen */ - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", myrelid))); - } - - relform = (Form_pg_class)GETSTRUCT(reltup); - - /* - * Check relation name to ensure that it doesn't conflict with existing synonym. - */ - if (!IsInitdb && GetSynonymOid(newrelname, namespaceId, true) != InvalidOid) { - ereport(ERROR, - (errmsg("relation name is already used by an existing synonym in schema \"%s\"", - get_namespace_name(namespaceId)))); - } - - if (get_relname_relid(newrelname, namespaceId) != InvalidOid) - ereport(ERROR, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists", newrelname))); - -#ifdef ENABLE_MULTIPLE_NODES - if (RelationIsTsStore(targetrelation)) { - if (!g_instance.attr.attr_common.enable_tsdb) { - ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), - errmsg("Cannot rename timeseries table when enable_tsdb is off."))); - } - Tsdb::CheckTsRelname(newrelname); - /* Rename corresponding tag table synchronously */ - RenameTsTagRelaion(RelationGetRelationName(targetrelation), - targetrelation->rd_rel->relnamespace, newrelname); - } - - /* Rename range/list distributed table */ - if (IsLocatorDistributedBySlice(GetLocatorType(myrelid))) { - RenameDistributedTable(myrelid, newrelname); - } - -#endif /* ENABLE_MULTIPLE_NODES */ - - /* - * Update pg_class tuple with new relname. (Scribbling on reltup is OK - * because it's a copy...) - */ - (void)namestrcpy(&(relform->relname), newrelname); - - simple_heap_update(relrelation, &reltup->t_self, reltup); - - /* keep the system catalog indexes current */ - CatalogUpdateIndexes(relrelation, reltup); - - /* Recode time of rename relation. */ - recordRelationMTime(myrelid, targetrelation->rd_rel->relkind); - - /* - * check rename's target is partitioned table - */ - relform = (Form_pg_class)GETSTRUCT(reltup); - if (relform->relkind == RELKIND_RELATION && relform->parttype == PARTTYPE_PARTITIONED_RELATION) { - renamePartitionedTable(myrelid, newrelname); - } - - tableam_tops_free_tuple(reltup); - heap_close(relrelation, RowExclusiveLock); - - /* - * Also rename the associated type, if any. - */ - if (OidIsValid(targetrelation->rd_rel->reltype)) - RenameTypeInternal(targetrelation->rd_rel->reltype, newrelname, namespaceId); - - /* - * Also rename the associated constraint, if any. - */ - if (RelationIsIndex(targetrelation)) { - Oid constraintId = get_index_constraint(myrelid); - - if (OidIsValid(constraintId)) - RenameConstraintById(constraintId, newrelname); - } - /* - * Close rel, but keep exclusive lock! - */ - relation_close(targetrelation, NoLock); -} - -/* - * @@GaussDB@@ - * Target : data distributed by range or list - * Brief : Change the name of distributed table in pgxc_slice - * Description : - * Notes : - */ -void RenameDistributedTable(Oid distributedTableOid, const char* distributedTableNewName) -{ - Relation distributedRelation = NULL; - HeapTuple distributedRelationTuple = NULL; - Form_pgxc_slice relationForm = NULL; - - /* shouldn't happen */ - if (!OidIsValid(distributedTableOid) || !PointerIsValid(distributedTableNewName)) { - ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), errmsg("internal error, rename distributed table failed"))); - } - - /* - * Find relation's pgxc_slice tuple. - */ - distributedRelation = relation_open(PgxcSliceRelationId, RowExclusiveLock); - distributedRelationTuple = SearchTableEntryCopy(PGXC_SLICE_TYPE_TABLE, distributedTableOid); - - /* shouldn't happen */ - if (!HeapTupleIsValid(distributedRelationTuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", distributedTableOid))); - } - - relationForm = (Form_pgxc_slice)GETSTRUCT(distributedRelationTuple); - - (void)namestrcpy(&(relationForm->relname), distributedTableNewName); - simple_heap_update(distributedRelation, &(distributedRelationTuple->t_self), distributedRelationTuple); - CatalogUpdateIndexes(distributedRelation, distributedRelationTuple); - - tableam_tops_free_tuple(distributedRelationTuple); - - relation_close(distributedRelation, RowExclusiveLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Change the name of partitioned table in pg_partition - * Description : - * Notes : - */ -void renamePartitionedTable(Oid partitionedTableOid, const char* partitionedTableNewName) -{ - Relation partitionRelRelation = NULL; - HeapTuple partitionRelationTuple = NULL; - Form_pg_partition relationForm = NULL; - - /* shouldn't happen */ - if (!OidIsValid(partitionedTableOid) || !PointerIsValid(partitionedTableNewName)) { - ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), errmsg("internal error, rename partitioned table failed"))); - } - - /* - * Find relation's pg_partition tuple. - */ - partitionRelRelation = relation_open(PartitionRelationId, RowExclusiveLock); - partitionRelationTuple = searchPgPartitionByParentIdCopy(PART_OBJ_TYPE_PARTED_TABLE, partitionedTableOid); - - /* shouldn't happen */ - if (!HeapTupleIsValid(partitionRelationTuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", partitionedTableOid))); - } - - relationForm = (Form_pg_partition)GETSTRUCT(partitionRelationTuple); - - /* - * Update pg_partition tuple with new relname. (Scribbling on reltup is OK - * because it's a copy...) - */ - (void)namestrcpy(&(relationForm->relname), partitionedTableNewName); - simple_heap_update(partitionRelRelation, &(partitionRelationTuple->t_self), partitionRelationTuple); - - /* - * keep the system catalog indexes current - */ - CatalogUpdateIndexes(partitionRelRelation, partitionRelationTuple); - - tableam_tops_free_tuple(partitionRelationTuple); - - relation_close(partitionRelRelation, RowExclusiveLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Execute rename partition - * Description : - * Notes : - */ -ObjectAddress renamePartition(RenameStmt* stmt) -{ - Oid partitionedTableOid = InvalidOid; - Oid partitionOid = InvalidOid; - ParseState* pstate = NULL; - RangePartitionDefState* rangePartDef = NULL; - Relation rel = NULL; - ObjectAddress address; - - /* shouldn't happen */ - if (!PointerIsValid(stmt) || !PointerIsValid(stmt->newname)) { - ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), errmsg("internal error, rename partition failed"))); - } - - /* Get oid of target partitioned table. - * - * Grab a shared lock on the target partitioned table, - * which we will NOT release until end of transaction. - * - * Lock level used here should match renamePartitonInternal, to avoid lock - * escalation. - */ - partitionedTableOid = RangeVarGetRelidExtended(stmt->relation, - ShareUpdateExclusiveLock, - stmt->missing_ok, - false, - false, - false, - RangeVarCallbackForAlterRelation, - (void*)stmt); - - if (!OidIsValid(partitionedTableOid)) { - ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", stmt->relation->relname))); - - return InvalidObjectAddress; - } - - TrForbidAccessRbObject(RelationRelationId, partitionedTableOid, stmt->relation->relname); - - rel = relation_open(partitionedTableOid, NoLock); - - if (RelationIsSubPartitioned(rel)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - /* - * check relation is partitioned table - */ - if (!PointerIsValid(rel->partMap)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("relation \"%s\" is not partitioned table", stmt->relation->relname))); - } - - /* - * Get oid of target partition. - * 1. If rename partition by name. - */ - if (PointerIsValid(stmt->subname)) { - partitionOid = PartitionNameGetPartitionOid(partitionedTableOid, - stmt->subname, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - true, - false, - NULL, - NULL, - NoLock); - - if (!OidIsValid(partitionOid)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg( - "partition \"%s\" of relation \"%s\" does not exist", stmt->subname, stmt->relation->relname))); - } - } else { - /* - * 2. If rename partition by partition key value. - */ - pstate = make_parsestate(NULL); - - rangePartDef = makeNode(RangePartitionDefState); - rangePartDef->boundary = stmt->object; - - transformPartitionValue(pstate, (Node*)rangePartDef, false); - - rangePartDef->boundary = transformConstIntoTargetType( - rel->rd_att->attrs, ((RangePartitionMap*)rel->partMap)->partitionKey, rangePartDef->boundary); - - partitionOid = - PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, true, false); - - pfree_ext(pstate); - list_free_deep(rangePartDef->boundary); - pfree_ext(rangePartDef); - - if (!OidIsValid(partitionOid)) { - ereport( - ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range"))); - } - } - - /* - * check partition new name does not exist. - */ - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, - NameGetDatum(stmt->newname), - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION), - ObjectIdGetDatum(partitionedTableOid))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_TABLE), - errmsg("partition \"%s\" of relation \"%s\" already exists", stmt->newname, stmt->relation->relname))); - } - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(rel)) { - LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - /* Do the work */ - renamePartitionInternal(partitionedTableOid, partitionOid, stmt->newname); - - UpdatePgObjectChangecsn(partitionedTableOid, rel->rd_rel->relkind); - - /* hold lock until committed */ - relation_close(rel, NoLock); - ObjectAddressSet(address, PartitionRelationId, partitionOid); - return address; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Execute rename partition index - * Description : - * Notes : - */ -ObjectAddress renamePartitionIndex(RenameStmt* stmt) -{ - Oid partitionedTableIndexOid = InvalidOid; - Oid partitionIndexOid = InvalidOid; - ObjectAddress address; - - /* shouldn't happen */ - if (!PointerIsValid(stmt) || !PointerIsValid(stmt->subname) || !PointerIsValid(stmt->newname)) { - ereport( - ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("internal error, rename partition index failed"))); - } - - /* Get oid of target partitioned table index. - * - * Grab an RowExclusive lock on the target partitioned table, - * which we will NOT release until end of transaction. - * - * Lock level used here should match renamePartitonInternal, to avoid lock - * escalation. - */ - partitionedTableIndexOid = RangeVarGetRelidExtended(stmt->relation, - AccessShareLock, - stmt->missing_ok, - false, - false, - false, - RangeVarCallbackForAlterRelation, - (void*)stmt); - - if (!OidIsValid(partitionedTableIndexOid)) { - ereport(NOTICE, (errmsg("index \"%s\" does not exist, skipping", stmt->relation->relname))); - - return InvalidObjectAddress; - } - - TrForbidAccessRbObject(RelationRelationId, partitionedTableIndexOid, stmt->relation->relname); - - /* get partition index oid */ - partitionIndexOid = PartitionNameGetPartitionOid(partitionedTableIndexOid, - stmt->subname, - PART_OBJ_TYPE_INDEX_PARTITION, - AccessExclusiveLock, - true, - false, - NULL, - NULL, - NoLock); - if (InvalidOid == partitionIndexOid) { - ereport( - ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("partition index \"%s\" does not exist", stmt->subname))); - } - - /* - * check partition index new name does not exist. - */ - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, - NameGetDatum(stmt->newname), - CharGetDatum(PART_OBJ_TYPE_INDEX_PARTITION), - ObjectIdGetDatum(partitionedTableIndexOid))) { - ereport( - ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("partition index \"%s\" already exists", stmt->newname))); - } - - /* Do the work */ - renamePartitionInternal(partitionedTableIndexOid, partitionIndexOid, stmt->newname); - - Oid parRelOid = IndexGetRelation(partitionedTableIndexOid, false); - Relation partRel = RelationIdGetRelation(parRelOid); - UpdatePgObjectChangecsn(parRelOid, partRel->rd_rel->relkind); - RelationClose(partRel); - ObjectAddressSet(address, PartitionRelationId, partitionIndexOid); - return address; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Change the name of partition object (partition/index) in pg_partition - * Description : - * Notes : - */ -void renamePartitionInternal(Oid partitionedTableOid, Oid partitionOid, const char* partitionNewName) -{ - Relation partitionRelRelation = NULL; - HeapTuple partitionRelationTuple = NULL; - Form_pg_partition relationForm = NULL; - - /* shouldn't happen */ - if (!OidIsValid(partitionedTableOid) || !OidIsValid(partitionOid) || !PointerIsValid(partitionNewName)) { - ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("internal error, rename partition failed"))); - } - - /* - * Find relation's pg_partition tuple. - */ - partitionRelRelation = relation_open(PartitionRelationId, RowExclusiveLock); - partitionRelationTuple = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(partitionOid)); - - /* shouldn't happen */ - if (!HeapTupleIsValid(partitionRelationTuple)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("cache lookup failed for partition %u of relation %u", partitionOid, partitionedTableOid))); - } - - relationForm = (Form_pg_partition)GETSTRUCT(partitionRelationTuple); - - /* - * Update pg_partition tuple with new relname. (Scribbling on reltup is OK - * because it's a copy...) - */ - (void)namestrcpy(&(relationForm->relname), partitionNewName); - simple_heap_update(partitionRelRelation, &(partitionRelationTuple->t_self), partitionRelationTuple); - - /* - * Keep the system catalog indexes current. - */ - CatalogUpdateIndexes(partitionRelRelation, partitionRelationTuple); - - tableam_tops_free_tuple(partitionRelationTuple); - - relation_close(partitionRelRelation, RowExclusiveLock); -} - -/* - * Disallow ALTER TABLE (and similar commands) when the current backend has - * any open reference to the target table besides the one just acquired by - * the calling command; this implies there's an open cursor or active plan. - * We need this check because our lock doesn't protect us against stomping - * on our own foot, only other people's feet! - * - * For ALTER TABLE, the only case known to cause serious trouble is ALTER - * COLUMN TYPE, and some changes are obviously pretty benign, so this could - * possibly be relaxed to only error out for certain types of alterations. - * But the use-case for allowing any of these things is not obvious, so we - * won't work hard at it for now. - * - * We also reject these commands if there are any pending AFTER trigger events - * for the rel. This is certainly necessary for the rewriting variants of - * ALTER TABLE, because they don't preserve tuple TIDs and so the pending - * events would try to fetch the wrong tuples. It might be overly cautious - * in other cases, but again it seems better to err on the side of paranoia. - * - * REINDEX calls this with "rel" referencing the index to be rebuilt; here - * we are worried about active indexscans on the index. The trigger-event - * check can be skipped, since we are doing no damage to the parent table. - * - * The statement name (eg, "ALTER TABLE") is passed for use in error messages. - */ -void CheckTableNotInUse(Relation rel, const char* stmt) -{ - int expected_refcnt; - - expected_refcnt = rel->rd_isnailed ? 2 : 1; - if (rel->rd_refcnt != expected_refcnt) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_IN_USE), - /* translator: first %s is a SQL command, eg ALTER TABLE */ - errmsg("cannot %s \"%s\" because " - "it is being used by active queries in this session", - stmt, - RelationGetRelationName(rel)))); - - if (!RelationIsIndex(rel) && AfterTriggerPendingOnRel(RelationGetRelid(rel))) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_IN_USE), - /* translator: first %s is a SQL command, eg ALTER TABLE */ - errmsg("cannot %s \"%s\" because " - "it has pending trigger events", - stmt, - RelationGetRelationName(rel)))); -} - -void CheckPartitionNotInUse(Partition part, const char* stmt) -{ - const int expected_refcnt = 1; - - if (part->pd_refcnt != expected_refcnt) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_IN_USE), - /* translator: first %s is a SQL command, eg ALTER TABLE */ - errmsg("cannot %s \"%s\" because " - "it is being used by active queries in this session", - stmt, - PartitionGetPartitionName(part)))); -} - -/* - * Check whether the type is a table of type, which can not be altered - */ -static void checkTypeIsTableOfOrPackage(AlterTableStmt* stmt) -{ - Oid typeNamespace = InvalidOid; - Oid typeOid = InvalidOid; - if (stmt->relation->schemaname != NULL) { - typeNamespace = get_namespace_oid(stmt->relation->schemaname, false); - } else { - ListCell* cell = NULL; - foreach (cell, u_sess->catalog_cxt.activeSearchPath) { - typeNamespace = lfirst_oid(cell); - typeOid = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum(stmt->relation->relname), - ObjectIdGetDatum(typeNamespace)); - if (OidIsValid(typeOid)) { -#ifndef ENABLE_MULTIPLE_NODES - /* don't allow to alter package or procedure type */ - if (IsPackageDependType(typeOid, InvalidOid)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmodule(MOD_PLSQL), - errmsg("Not allowed to alter type \"%s\"", stmt->relation->relname), - errdetail("\"%s\" is a package or procedure type", stmt->relation->relname), - errcause("feature not supported"), - erraction("check type name"))); - } -#endif - HeapTuple typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); - - if (HeapTupleIsValid(typeTup)) { - if (((Form_pg_type)GETSTRUCT(typeTup))->typtype == TYPTYPE_TABLEOF) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("table type does not support alter."))); - } - ReleaseSysCache(typeTup); - } - } - } - } -} - -/* - * AlterTableLookupRelation - * Look up, and lock, the OID for the relation named by an alter table - * statement. - */ -Oid AlterTableLookupRelation(AlterTableStmt* stmt, LOCKMODE lockmode, bool unlock) -{ - // treat unusable index and unusable index partition ops like drop index or dop index partition, - // though we do not provide interface to drop index partition - if ((stmt->relkind == OBJECT_INDEX) && (stmt->cmds->length == 1)) { - AlterTableCmd* index_cmd = (AlterTableCmd*)lfirst(stmt->cmds->head); - struct DropRelationCallbackState state = {0}; - state.concurrent = false; - state.heapOid = InvalidOid; - state.relkind = RELKIND_INDEX; - - if (index_cmd->subtype == AT_UnusableIndex) { - Oid relid = InvalidOid; - relid = RangeVarGetRelidExtended(stmt->relation, - lockmode, - stmt->missing_ok, - false, - false, // not on index partition - false, // not support synonym - RangeVarCallbackForDropRelation, - (void*)&state); - if (OidIsValid(state.heapOid) && u_sess->attr.attr_sql.enable_parallel_ddl && unlock) { - UnlockRelationOid(state.heapOid, state.concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock); - } - - return relid; - } else if (index_cmd->subtype == AT_UnusableIndexPartition) { - Oid relid = InvalidOid; - relid = RangeVarGetRelidExtended(stmt->relation, - lockmode, - stmt->missing_ok, - false, - true, // on index partition - false, // not support synonym - RangeVarCallbackForDropRelation, - (void*)&state); - if (OidIsValid(state.heapOid) && u_sess->attr.attr_sql.enable_parallel_ddl && unlock) { - UnlockRelationOid(state.heapOid, AccessShareLock); - } - - return relid; - } - } - - if (stmt->relkind == OBJECT_TABLE) { - ListCell* lcmd = NULL; - - foreach (lcmd, stmt->cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); - if (AT_TruncatePartition == cmd->subtype || AT_TruncateSubPartition == cmd->subtype) { - return RangeVarGetRelidExtended(stmt->relation, - lockmode, - stmt->missing_ok, - false, - true, - false, // not support synonym - RangeVarCallbackForAlterRelation, - (void*)stmt); - } - } - } - - /* TABLE OF TYPE and package type don't support alter */ - if (stmt->relkind == OBJECT_TYPE) { - checkTypeIsTableOfOrPackage(stmt); - } - - // all other ops on index and heap, goes on - return RangeVarGetRelidExtended(stmt->relation, - lockmode, - stmt->missing_ok, - false, - false, - false, // not support synonym - RangeVarCallbackForAlterRelation, - (void*)stmt); -} - -/* enum of alter-table-instantly - * DEFAULT_NULL means no operation to pg_attribute. - * DEFAULT_NOT_NULL_CONST means updating the attinitdefval of pg_attribute. - * DEFAULT_OTHER means rewriting the all tuples. - */ -typedef enum { - DEFAULT_NULL, /* attribute that has no default value or has null default value */ - DEFAULT_NOT_NULL_CONST, /* attribute that has const-expression default value */ - DEFAULT_OTHER /* other value */ -} AT_INSTANT_DEFAULT_VALUE; - -/* - * Estimate whether alter-table-instantly is effective. - * The follow does not adapt to alter-table-instantly: - * 1. unsupported data type - refer to supportType - * 2. the default value expression include volatile function - * 3. the memory used by default value is more than 128 bytes. - * 4. the default value is actually null - */ -static AT_INSTANT_DEFAULT_VALUE shouldUpdateAllTuples( - Expr* defaultExpr, Oid typeOid, int attLen, bool attByVal, bytea** defaultVal) -{ - bool isNull = false; - int i; - bool tooLong = false; - bytea* result = NULL; - errno_t rc; - - /* it's difficult to handle all the datatypes, espicially complex datatypes. - * so we just handle those datatypes which are normal and used most time. - */ - const Oid supportType[] = {BOOLOID, - BYTEAOID, - CHAROID, - INT8OID, - INT2OID, - INT4OID, - TEXTOID, - FLOAT4OID, - FLOAT8OID, - ABSTIMEOID, - RELTIMEOID, - TINTERVALOID, - BPCHAROID, - VARCHAROID, - NUMERICOID, - DATEOID, - TIMEOID, - TIMESTAMPOID, - TIMESTAMPTZOID, - INTERVALOID, - TIMETZOID, - INT1OID, - SMALLDATETIMEOID}; - - /* During inplace upgrade, we may allow extra column types. */ - const Oid extraSupportTypeInUpgrade[] = {OIDOID, NAMEOID, ACLITEMARRAYOID}; - - /* check data type, if the data type is not supported, rewrite table */ - bool support = false; - for (i = 0; i < int(sizeof(supportType) / sizeof(Oid)); ++i) { - if (supportType[i] == typeOid) { - support = true; - break; - } - } - - /* check extra supported type during upgrade if needed */ - if (!support && u_sess->attr.attr_common.IsInplaceUpgrade) { - for (i = 0; i < int(sizeof(extraSupportTypeInUpgrade) / sizeof(Oid)); ++i) { - if (extraSupportTypeInUpgrade[i] == typeOid) { - support = true; - break; - } - } - } - - if (!support) { - return DEFAULT_OTHER; - } - - /* alter-table-instantly does not support volatile default value. */ - if (contain_volatile_functions((Node*)defaultExpr)) - return DEFAULT_OTHER; - - EState* estate = CreateExecutorState(); - ExprState* exprstate = ExecInitExpr(expression_planner(defaultExpr), NULL); - ExprContext* econtext = GetPerTupleExprContext(estate); - - MemoryContext newcxt = GetPerTupleMemoryContext(estate); - MemoryContext oldcxt = MemoryContextSwitchTo(newcxt); - Datum value = ExecEvalExpr(exprstate, econtext, &isNull); - (void)MemoryContextSwitchTo(oldcxt); - - if (!isNull) { - if (attByVal) { - result = (bytea*)palloc(attLen + VARHDRSZ); - SET_VARSIZE(result, attLen + VARHDRSZ); - store_att_byval(VARDATA(result), value, attLen); - } else { - int len = att_addlength_datum(0, attLen, DatumGetPointer(value)); - if (len >= ATT_DEFAULT_LEN) { - /* if the length of default value > 128, need rewrite table. - * this limitation ensure attinitdefval of relcache do not consume - * too many memory space. - */ - tooLong = true; - } else { - result = (bytea*)palloc(len + VARHDRSZ); - SET_VARSIZE(result, len + VARHDRSZ); - Assert((char*)result + VARHDRSZ == VARDATA(result)); - rc = memcpy_s((char*)result + VARHDRSZ, len, DatumGetPointer(value), len); - securec_check(rc, "", ""); - } - } - } - - FreeExecutorState(estate); - - if (tooLong) { - return DEFAULT_OTHER; - } - - *defaultVal = result; - - return isNull ? DEFAULT_NULL : DEFAULT_NOT_NULL_CONST; -} - -/* updateInitDefVal - * - * Update the attinitdefval field of pg_attribute for altering table instantly. - */ -static void updateInitDefVal(bytea* value, Relation rel, int16 attNum) -{ - Relation attRelation; - HeapTuple tup; - HeapTuple newTuple; - Datum replVals[Natts_pg_attribute]; - bool replNulls[Natts_pg_attribute]; - bool replRepls[Natts_pg_attribute]; - errno_t rc; - - attRelation = heap_open(AttributeRelationId, RowExclusiveLock); - - tup = SearchSysCache2(ATTNUM, ObjectIdGetDatum(RelationGetRelid(rel)), Int16GetDatum(attNum)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute %d of relation %u", attNum, RelationGetRelid(rel)))); - } - - /* Build new tuple. */ - rc = memset_s(replNulls, sizeof(replNulls), false, sizeof(replNulls)); - securec_check(rc, "\0", "\0"); - - rc = memset_s(replRepls, sizeof(replRepls), false, sizeof(replRepls)); - securec_check(rc, "\0", "\0"); - - replVals[Anum_pg_attribute_attinitdefval - 1] = PointerGetDatum(value); - replRepls[Anum_pg_attribute_attinitdefval - 1] = true; - newTuple = (HeapTuple) tableam_tops_modify_tuple(tup, RelationGetDescr(attRelation), replVals, replNulls, replRepls); - ReleaseSysCache(tup); - - /* Update system catalog. */ - simple_heap_update(attRelation, &newTuple->t_self, newTuple); - CatalogUpdateIndexes(attRelation, newTuple); - tableam_tops_free_tuple(newTuple); - - heap_close(attRelation, RowExclusiveLock); - - /* Make the attribute's catalog entry visible */ - CommandCounterIncrement(); -} - -/* - * Clear all attrinitdefvals of relation with relid. It is called when rewrite - * table is finished and all tuple attrnum are equal to attrnum of tupledesc. - */ -void clearAttrInitDefVal(Oid relid) -{ - Datum replVals[Natts_pg_attribute]; - bool replNulls[Natts_pg_attribute]; - bool replRepls[Natts_pg_attribute]; - errno_t rc; - - Relation rel = heap_open(relid, AccessExclusiveLock); - - if (rel->rd_att->initdefvals != NULL) { - rc = memset_s(replNulls, sizeof(replNulls), false, sizeof(replNulls)); - securec_check(rc, "\0", "\0"); - rc = memset_s(replRepls, sizeof(replRepls), false, sizeof(replRepls)); - securec_check(rc, "\0", "\0"); - - /* set the attribute *attinitdefval* null to clear its value */ - replVals[Anum_pg_attribute_attinitdefval - 1] = (Datum)0; - replRepls[Anum_pg_attribute_attinitdefval - 1] = true; - replNulls[Anum_pg_attribute_attinitdefval - 1] = true; - - int natts = rel->rd_att->natts; - HeapTuple* tuples = (HeapTuple*)palloc0(natts * sizeof(HeapTuple)); - Relation attRelation = heap_open(AttributeRelationId, RowExclusiveLock); - - /* Clear all attrinitdefvals of relation */ - for (int attno = 0; attno < natts; ++attno) { - if (rel->rd_att->initdefvals[attno].isNull) - continue; - - /* step 1: Build new tuple. */ - HeapTuple tup = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attno + 1)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute %d of relation %u", attno + 1, relid))); - } - - tuples[attno] = (HeapTuple) tableam_tops_modify_tuple(tup, RelationGetDescr(attRelation), replVals, replNulls, replRepls); - ReleaseSysCache(tup); - - /* step 2: Update system catalog. */ - simple_heap_update(attRelation, &tuples[attno]->t_self, tuples[attno]); - CatalogUpdateIndexes(attRelation, tuples[attno]); - - /* free newtuple */ - tableam_tops_free_tuple(tuples[attno]); - tuples[attno] = NULL; - } - - pfree_ext(tuples); - heap_close(attRelation, RowExclusiveLock); - } - - heap_close(rel, AccessExclusiveLock); - - /* Make the attribute's catalog entry visible */ - CommandCounterIncrement(); -} - -/* - * AlterTable - * Execute ALTER TABLE, which can be a list of subcommands - * - * ALTER TABLE is performed in three phases: - * 1. Examine subcommands and perform pre-transformation checking. - * 2. Update system catalogs. - * 3. Scan table(s) to check new constraints, and optionally recopy - * the data into new table(s). - * Phase 3 is not performed unless one or more of the subcommands requires - * it. The intention of this design is to allow multiple independent - * updates of the table schema to be performed with only one pass over the - * data. - * - * ATPrepCmd performs phase 1. A "work queue" entry is created for - * each table to be affected (there may be multiple affected tables if the - * commands traverse a table inheritance hierarchy). Also we do preliminary - * validation of the subcommands, including parse transformation of those - * expressions that need to be evaluated with respect to the old table - * schema. - * - * ATRewriteCatalogs performs phase 2 for each affected table. (Note that - * phases 2 and 3 normally do no explicit recursion, since phase 1 already - * did it --- although some subcommands have to recurse in phase 2 instead.) - * Certain subcommands need to be performed before others to avoid - * unnecessary conflicts; for example, DROP COLUMN should come before - * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple - * lists, one for each logical "pass" of phase 2. - * - * ATRewriteTables performs phase 3 for those tables that need it. - * - * Thanks to the magic of MVCC, an error anywhere along the way rolls back - * the whole operation; we don't have to do anything special to clean up. - * - * The caller must lock the relation, with an appropriate lock level - * for the subcommands requested. Any subcommand that needs to rewrite - * tuples in the table forces the whole command to be executed with - * AccessExclusiveLock (actually, that is currently required always, but - * we hope to relax it at some point). We pass the lock level down - * so that we can apply it recursively to inherited tables. Note that the - * lock level we want as we recurse might well be higher than required for - * that specific subcommand. So we pass down the overall lock requirement, - * rather than reassess it at lower levels. - * - */ -#ifdef PGXC -/* - * In Postgres-XC, an extension is added to ALTER TABLE for modification - * of the data distribution. Depending on the old and new distribution type - * of the relation redistributed, a list of redistribution subcommands is built. - * Data redistribution cannot be done in parallel of operations that need - * the table to be rewritten like column addition/deletion. - */ -#endif - -void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt* stmt) -{ - Relation rel; - - /* Caller is required to provide an adequate lock. */ - rel = relation_open(relid, lockmode); - - if ((stmt->relkind == OBJECT_TABLE) && (rel->rd_rel->relkind == RELKIND_MATVIEW)) { - stmt->relkind = OBJECT_MATVIEW; - } - - /* We allow to alter global temp table only this session use it */ - CheckGttTableInUse(rel); - - CheckTableNotInUse(rel, "ALTER TABLE"); - -#ifdef ENABLE_MULTIPLE_NODES - PreventDDLIfTsdbDisabled(relid); -#endif /* ENABLE_MULTIPLE_NODES */ - - /* - * cmd list for 'ALTER FOREIGN TABLE ADD NODE'. - * cmd list for 'ALTER TABLE DFSTbl ADD NODE'. - */ - List* addNodeCmds = NIL; - - /* Unsupport ALTER statement for Foreign table */ - if (!stmt->fromCreate && (stmt->relkind == OBJECT_FOREIGN_TABLE || stmt->relkind == OBJECT_STREAM)) { - ListCell* lc = NULL; - - /* @hdfs - * Check whether support alter foreign table. Many FDW types for foreign tables exists, - * Each type foreign table has own mchanism, we use function pointor to realize. For example, - * wheather support alter owner, support alter colomn type, etc. - */ - FdwRoutine* fdwroutine = GetFdwRoutineByRelId(relid, true); - - if (fdwroutine != NULL && NULL != fdwroutine->ValidateTableDef) { - fdwroutine->ValidateTableDef((Node*)stmt); - } - - foreach (lc, stmt->cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lc); - - if (AT_AddNodeList == cmd->subtype || AT_DeleteNodeList == cmd->subtype || AT_SubCluster == cmd->subtype || - AT_UpdateSliceLike == cmd->subtype) { - /* append 'ALTER FOREIGN TABLE ADD NODE' cmd to ftAddNodeCmds */ - addNodeCmds = lappend(addNodeCmds, cmd); - } - } - } - - if (!stmt->fromCreate && (((uint32)RelationGetInternalMask(rel)) & INTERNAL_MASK_DALTER)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), errdetail("internal relation doesn't allow ALTER")))); - } - - // Unsupport ALTER statement for column store - if (!stmt->fromCreate) { - ListCell* lc = NULL; - foreach (lc, stmt->cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lc); - if (cmd->subtype == AT_SetAutoIncrement) { - if (rel->rd_rel->relkind != RELKIND_RELATION) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("ALTER TABLE AUTO_INCREMENT only supports ordinary table.")))); - } - continue; - } - - if (AT_AddOids == cmd->subtype) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("ALTER TABLE ... SET WITH OIDS is not yet supported.")))); - } - - if (AT_DropOids == cmd->subtype) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("ALTER TABLE ... SET WITHOUT OIDS is not yet supported.")))); - } - - /* We only check basetable when not in redistribution */ - if (rel->rd_rel->relkind == RELKIND_RELATION && !u_sess->attr.attr_sql.enable_cluster_resize) { - /* Forbid some ALTER statements for Table that owns Matview */ - if ((InvalidOid != rel->rd_mlogoid) && !BASETABLE_SUPPORT_AT_CMD(cmd->subtype)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("table owning matview doesn't support this ALTER yet.")))); - } - - /* Unsupport ALTER statement for mlog/matviewmap table */ - if ((ISMLOG(rel->rd_rel->relname.data) || ISMATMAP(rel->rd_rel->relname.data)) && - !MLOG_MAP_SUPPORT_AT_CMD(cmd->subtype)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), errdetail("%s table doesn't support this ALTER yet.", - ISMLOG(rel->rd_rel->relname.data) ? "mlog" : "matviewmap")))); - } - } - - /* Forbid some ALTER statements for Matview */ - if ((stmt->relkind == OBJECT_MATVIEW) && !MATVIEW_SUPPORT_AT_CMD(cmd->subtype)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER MATERIALIZED VIEW is not yet supported."))); - } - - if (RelationIsSubPartitioned(rel) && cmd->subtype == AT_ClusterOn) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster a subpartition table"))); - } - - if (RelationIsCUFormat(rel) && !CStoreSupportATCmd(cmd->subtype)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column-store relation doesn't support this ALTER yet"))); - } - - if (RelationIsTsStore(rel)) { - at_timeseries_check(rel, cmd); - } - - // We open up SetRelOptions for HDFS during online expansion so gs_redis could - // set append_mode=read_only. Also we need to open up in CheckRedistributeOption. - if (RelationIsPAXFormat(rel) && - !(DFS_SUPPORT_AT_CMD(cmd->subtype) || - (u_sess->attr.attr_sql.enable_cluster_resize && AT_SetRelOptions == cmd->subtype))) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("It is not supported to alter a DFS table."))); - } - - if (AT_SetTableSpace == cmd->subtype) { - char* tblspcName = cmd->name; - Oid tblspcOid = get_tablespace_oid(tblspcName, false); - if (IsSpecifiedTblspc(tblspcOid, FILESYSTEM_HDFS)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("It is not supported to alter DFS tablespace."))); - } - } - } - } - - // Next version remove hack patch for 'ALTER FOREIGN TABLE ... ADD NODE' - if (stmt->cmds != NIL) { - /* process 'ALTER TABLE' cmd */ - ATController(stmt, rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt), lockmode); - } else { - /* if do not call ATController, close the relation in here, but keep lock until commit */ - relation_close(rel, NoLock); - } - - if (addNodeCmds != NIL) { - /* process 'ALTER FOREIGN TABLE ... ADD NODE' cmd */ - DefElem* def = GetForeignTableOptionByName(relid, optErrorRel); - if (def != NULL) { - char* relname = strVal(def->arg); - Oid errtableid = get_relname_relid(relname, get_rel_namespace(relid)); - - /* open error table releation, closed in ATController */ - Relation errtablerel = relation_open(errtableid, lockmode); - - ATController(stmt, errtablerel, addNodeCmds, interpretInhOption(stmt->relation->inhOpt), lockmode); - } - list_free_ext(addNodeCmds); - } -} - -/* - * AlterTableInternal - * - * ALTER TABLE with target specified by OID - * - * We do not reject if the relation is already open, because it's quite - * likely that one or more layers of caller have it open. That means it - * is unsafe to use this entry point for alterations that could break - * existing query plans. On the assumption it's not used for such, we - * don't have to reject pending AFTER triggers, either. - */ -void AlterTableInternal(Oid relid, List* cmds, bool recurse) -{ - Relation rel; - LOCKMODE lockmode = AlterTableGetLockLevel(cmds); - - rel = relation_open(relid, lockmode); - - EventTriggerAlterTableRelid(relid); - ATController(NULL, rel, cmds, recurse, lockmode); -} - -static LOCKMODE set_lockmode(LOCKMODE mode, LOCKMODE cmd_mode) -{ - if (cmd_mode > mode) { - mode = cmd_mode; - } - return mode; -} - -#ifndef ENABLE_MULTIPLE_NODES -static LOCKMODE GetPartitionLockLevel(AlterTableType subType) -{ - if (PARTITION_DDL_CMD(subType)) { - return ShareUpdateExclusiveLock; - } else { - return AccessExclusiveLock; - } -} -#endif - -/* - * AlterTableGetLockLevel - * - * Sets the overall lock level required for the supplied list of subcommands. - * Policy for doing this set according to needs of AlterTable(), see - * comments there for overall explanation. - * - * Function is called before and after parsing, so it must give same - * answer each time it is called. Some subcommands are transformed - * into other subcommand types, so the transform must never be made to a - * lower lock level than previously assigned. All transforms are noted below. - * - * Since this is called before we lock the table we cannot use table metadata - * to influence the type of lock we acquire. - * - * There should be no lockmodes hardcoded into the subcommand functions. All - * lockmode decisions for ALTER TABLE are made here only. The one exception is - * ALTER TABLE RENAME which is treated as a different statement type T_RenameStmt - * and does not travel through this section of code and cannot be combined with - * any of the subcommands given here. - */ -LOCKMODE AlterTableGetLockLevel(List* cmds) -{ - /* - * Late in 9.1 dev cycle a number of issues were uncovered with access to - * catalog relations, leading to the decision to re-enforce all DDL at - * AccessExclusiveLock level by default. - * - * The issues are that there is a pervasive assumption in the code that - * the catalogs will not be read unless an AccessExclusiveLock is held. If - * that rule is relaxed, we must protect against a number of potential - * effects - infrequent, but proven possible with test cases where - * multiple DDL operations occur in a stream against frequently accessed - * tables. - * - * 1. Catalog tables are read using SnapshotNow, which has a race bug that - * allows a scan to return no valid rows even when one is present in the - * case of a commit of a concurrent update of the catalog table. - * SnapshotNow also ignores transactions in progress, so takes the latest - * committed version without waiting for the latest changes. - * - * 2. Relcache needs to be internally consistent, so unless we lock the - * definition during reads we have no way to guarantee that. - * - * 3. Catcache access isn't coordinated at all so refreshes can occur at - * any time. - */ - ListCell* lcmd = NULL; - - /* default lock mode of DDL is the highest mode 8, even if commands list is empty */ - LOCKMODE lockmode = AccessExclusiveLock; - - if (cmds && list_length(cmds) > 0) { - /* clear the default lock mode, so it's safe to compare with other lock modes. */ - lockmode = NoLock; - foreach (lcmd, cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); - LOCKMODE cmd_lockmode = u_sess->attr.attr_sql.enable_cluster_resize && cmd->subtype == AT_SetRelOptions ? - ExclusiveLock : AccessExclusiveLock; -#ifndef ENABLE_MULTIPLE_NODES - cmd_lockmode = GetPartitionLockLevel(cmd->subtype); -#endif - /* if the partitionno is set first time in upgrade mode, we set lockmode to ShareUpdateExclusiveLock */ - if (cmd->subtype == AT_ResetPartitionno) { - if (list_length(cmds) != 1) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("RESET PARTITIONNO cannot be performed during multiple subcommands"))); - } - if (t_thrd.proc->workingVersionNum >= PARTITION_ENHANCE_VERSION_NUM) { - cmd_lockmode = AccessExclusiveLock; - } else { - cmd_lockmode = ShareUpdateExclusiveLock; - } - } - /* update with the higher lock mode */ - lockmode = set_lockmode(lockmode, cmd_lockmode); - } - } - - return lockmode; -} - -static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bool recurse, LOCKMODE lockmode) -{ - List* wqueue = NIL; - ListCell* lcmd = NULL; -#ifdef PGXC - RedistribState* redistribState = NULL; - bool doRedistribute = false; -#endif - - /* Phase 1: preliminary examination of commands, create work queue */ - foreach (lcmd, cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); - -#ifdef PGXC - /* Check restrictions of ALTER TABLE in cluster */ - ATCheckCmd(rel, cmd); -#endif - ATCheckLedgerTableCmd(rel, cmd); - ATPrepCmd(&wqueue, rel, cmd, recurse, false, lockmode); - } - -#ifdef PGXC - /* Only check that on local Coordinator */ - if (IS_PGXC_COORDINATOR) { - ListCell* ltab = NULL; - - /* - * Redistribution is only applied to the parent table and not subsequent - * children. It is also not applied in recursion. This needs to be done - * once all the commands have been treated. - */ - foreach (ltab, wqueue) { - AlteredTableInfo* tab = (AlteredTableInfo*)lfirst(ltab); - - if (RelationGetRelid(rel) == tab->relid && list_length(tab->subcmds[AT_PASS_DISTRIB]) > 0) { - /* - * Check if there are any commands incompatible - * with redistribution. For the time being no other commands - * are authorized. - */ - doRedistribute = true; - if (!IsConnFromCoord()) { - if (list_length(tab->subcmds[AT_PASS_ADD_COL]) > 0 || list_length(tab->subcmds[AT_PASS_DROP]) > 0 || - list_length(tab->subcmds[AT_PASS_ALTER_TYPE]) > 0 || - list_length(tab->subcmds[AT_PASS_OLD_CONSTR]) > 0 || - list_length(tab->subcmds[AT_PASS_COL_ATTRS]) > 0 || - list_length(tab->subcmds[AT_PASS_ADD_INDEX]) > 0 || - list_length(tab->subcmds[AT_PASS_ADD_CONSTR]) > 0 || - list_length(tab->subcmds[AT_PASS_MISC]) > 0) - ereport(ERROR, - (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), - errmsg("Incompatible operation with data redistribution"))); - - /* Scan redistribution commands and improve operation */ - redistribState = BuildRedistribCommands(RelationGetRelid(rel), tab->subcmds[AT_PASS_DISTRIB]); - } - - break; - } - } - } -#endif - - /* Close the relation, but keep lock until commit */ - relation_close(rel, NoLock); - - /* Phase 2: update system catalogs */ - ATRewriteCatalogs(&wqueue, lockmode); - -#ifdef PGXC - /* Invalidate cache for redistributed relation */ - if (doRedistribute) { - Relation rel2 = relation_open(RelationGetRelid(rel), NoLock); - - /* Invalidate all entries related to this relation */ - CacheInvalidateRelcache(rel2); - - /* Make sure locator info is rebuilt */ - RelationCacheInvalidateEntry(RelationGetRelid(rel)); - relation_close(rel2, NoLock); - } - - if (redistribState != NULL) - FreeRedistribState(redistribState); -#endif - - /* Phase 3: scan/rewrite tables as needed */ - ATRewriteTables(parsetree, &wqueue, lockmode); -} - -/* - * ATPrepCmd - * - * Traffic cop for ALTER TABLE Phase 1 operations, including simple - * recursion and permission checks. - * - * Caller must have acquired appropriate lock type on relation already. - * This lock should be held until commit. - */ -static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recurse, bool recursing, LOCKMODE lockmode, - bool isDeltaTable) -{ - AlteredTableInfo* tab = NULL; - int pass; - - /* Find or create work queue entry for this table */ - tab = ATGetQueueEntry(wqueue, rel, isDeltaTable); - - /* - * Copy the original subcommand for each table. This avoids conflicts - * when different child tables need to make different parse - * transformations (for example, the same column may have different column - * numbers in different children). - */ - cmd = (AlterTableCmd*)copyObject(cmd); - /* - * Do permissions checking, recursion to child tables if needed, and any - * additional phase-1 processing needed. - */ - switch (cmd->subtype) { - case AT_AddColumn: /* ADD COLUMN */ - ATSimplePermissions(rel, ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE | ATT_SEQUENCE); - ATPrepAddColumn(wqueue, tab, rel, recurse, recursing, cmd, lockmode); - /* Recursion occurs during execution phase */ - pass = AT_PASS_ADD_COL; - break; - case AT_AddPartition: /* ADD PARTITION */ - ATSimplePermissions(rel, ATT_TABLE); - ATPrepAddPartition(rel); - /* Recursion occurs during execution phase */ - pass = AT_PASS_ADD_PARTITION; - break; - case AT_AddSubPartition: /* ADD SUBPARTITION */ - ATSimplePermissions(rel, ATT_TABLE); - ATPrepAddSubPartition(rel); - /* ADD SUBPARTITION obeys the same recursion order with ADD PARTITION */ - pass = AT_PASS_ADD_PARTITION; - break; - case AT_AddColumnToView: /* add column via CREATE OR REPLACE - * VIEW */ - ATSimplePermissions(rel, ATT_VIEW); - ATPrepAddColumn(wqueue, NULL, rel, recurse, recursing, cmd, lockmode); - /* Recursion occurs during execution phase */ - pass = AT_PASS_ADD_COL; - break; - case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */ - - /* - * We allow defaults on views so that INSERT into a view can have - * default-ish behavior. This works because the rewriter - * substitutes default values into INSERTs before it expands - * rules. - */ - ATSimplePermissions(rel, ATT_TABLE | ATT_VIEW); - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - ATPrepCheckDefault(cmd->def); - /* No command-specific prep needed */ - pass = cmd->def ? AT_PASS_ADD_CONSTR : AT_PASS_DROP; - break; - case AT_DropNotNull: /* ALTER COLUMN DROP NOT NULL */ - ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - /* No command-specific prep needed */ - pass = AT_PASS_DROP; - ATCheckNotNullConstr(cmd, tab); - break; - case AT_SetNotNull: /* ALTER COLUMN SET NOT NULL */ - ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - /* No command-specific prep needed */ - pass = AT_PASS_ADD_CONSTR; - ATCheckNotNullConstr(cmd, tab); - break; - case AT_SetStatistics: /* ALTER COLUMN SET STATISTICS */ - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - /* Performs own permission checks */ - ATPrepSetStatistics(rel); - pass = AT_PASS_MISC; - break; - case AT_AddStatistics: /* ADD STATISTICS */ - case AT_DeleteStatistics: /* DELETE STATISTICS */ - ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - /* Performs own permission checks */ - ATPrepSetStatistics(rel); - es_check_alter_table_statistics(rel, cmd); - pass = AT_PASS_MISC; - break; - case AT_SetOptions: /* ALTER COLUMN SET ( options ) */ - case AT_ResetOptions: /* ALTER COLUMN RESET ( options ) */ - ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW | ATT_INDEX | ATT_FOREIGN_TABLE); - /* This command never recurses */ - pass = AT_PASS_MISC; - break; - case AT_SetStorage: /* ALTER COLUMN SET STORAGE */ - ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW); - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_DropColumn: /* DROP COLUMN */ - ATSimplePermissions(rel, - ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE | - (u_sess->attr.attr_common.IsInplaceUpgrade ? ATT_VIEW : ATT_NULL)); - ATPrepDropColumn(wqueue, rel, recurse, recursing, cmd, lockmode); - /* Recursion occurs during execution phase */ - pass = AT_PASS_DROP; - break; - case AT_DropPartition: /* DROP PARTITION */ - ATSimplePermissions(rel, ATT_TABLE); - ATPrepDropPartition(rel); - /* Recursion occurs during execution phase */ - pass = AT_PASS_DROP; - break; - case AT_DropSubPartition: /* DROP SUBPARTITION */ - ATSimplePermissions(rel, ATT_TABLE); - ATPrepDropSubPartition(rel); - /* Recursion occurs during execution phase */ - pass = AT_PASS_DROP; - break; - case AT_UnusableIndexPartition: /* UNUSEABLE INDEX PARTITION */ - ATSimplePermissions(rel, ATT_INDEX); - ATPrepUnusableIndexPartition(rel); - /* Recursion occurs during execution phase */ - pass = AT_PASS_MISC; - break; - case AT_UnusableAllIndexOnPartition: /* UNUSEABLE ALL INDEX ON PARTITION */ - ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX); - ATPrepUnusableAllIndexOnPartition(rel); - /* Recursion occurs during execution phase */ - pass = AT_PASS_MISC; - break; - case AT_AddIndex: /* ADD INDEX */ - ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW | ATT_FOREIGN_TABLE); - /* This command never recurses */ - /* No command-specific prep needed */ - pass = AT_PASS_ADD_INDEX; - break; - case AT_AddConstraint: /* ADD CONSTRAINT */ - ATSimplePermissions(rel, ATT_TABLE); - /* Recursion occurs during execution phase */ - /* No command-specific prep needed except saving recurse flag */ - if (recurse) - cmd->subtype = AT_AddConstraintRecurse; - pass = AT_PASS_ADD_CONSTR; - break; - case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ - ATSimplePermissions(rel, ATT_TABLE); - /* This command never recurses */ - /* No command-specific prep needed */ - pass = AT_PASS_ADD_CONSTR; - break; - case AT_DropConstraint: /* DROP CONSTRAINT */ - /* @hdfs - * ATSimplePermissions's second parameter is change from ATT_TABLE to - * ATT_TABLE|ATT_FOREIGN_TABLE to suppert droping HDFS foreign table. - */ - ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); - /* Recursion occurs during execution phase */ - /* No command-specific prep needed except saving recurse flag */ - if (recurse) - cmd->subtype = AT_DropConstraintRecurse; - pass = AT_PASS_DROP; - break; - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ - ATSimplePermissions(rel, ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE); - /* Performs own recursion */ - ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode); - pass = AT_PASS_ALTER_TYPE; - ATCheckDuplicateColumn(cmd, tab->subcmds[pass]); - break; - case AT_AlterColumnGenericOptions: - ATSimplePermissions(rel, ATT_FOREIGN_TABLE); - /* This command never recurses */ - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_ChangeOwner: /* ALTER OWNER */ - /* This command never recurses */ - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_ClusterOn: /* CLUSTER ON */ - case AT_DropCluster: /* SET WITHOUT CLUSTER */ - ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW); - /* These commands never recurse */ - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_AddOids: /* SET WITH OIDS */ - - /* - * partitioned table can not be setted with or without oids - */ - if (RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot set with oids on partitioned table"))); - } - - ATSimplePermissions(rel, ATT_TABLE); - if (!rel->rd_rel->relhasoids || recursing) - ATPrepAddOids(wqueue, rel, recurse, cmd, lockmode); - /* Recursion occurs during execution phase */ - pass = AT_PASS_ADD_COL; - break; - case AT_DropOids: /* SET WITHOUT OIDS */ - - /* - * partitioned table can not be setted with or without oids - */ - if (RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot set without oids on partitioned table"))); - } - - ATSimplePermissions(rel, ATT_TABLE); - /* Performs own recursion */ - if (rel->rd_rel->relhasoids) { - AlterTableCmd* dropCmd = makeNode(AlterTableCmd); - - dropCmd->subtype = AT_DropColumn; - dropCmd->name = pstrdup("oid"); - dropCmd->behavior = cmd->behavior; - ATPrepCmd(wqueue, rel, dropCmd, recurse, false, lockmode); - } - pass = AT_PASS_DROP; - break; - case AT_SetTableSpace: /* SET TABLESPACE */ - case AT_SetPartitionTableSpace: - ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX | ATT_MATVIEW); - /* This command never recurses */ - ATPrepSetTableSpace(tab, rel, cmd->name, lockmode); - pass = AT_PASS_MISC; /* doesn't actually matter */ - break; - case AT_UnusableIndex: - case AT_SetRelOptions: /* SET (...) */ - case AT_ResetRelOptions: /* RESET (...) */ - case AT_ReplaceRelOptions: /* reset them all, then set just these */ - case AT_InvisibleIndex: - case AT_VisibleIndex: - ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX | ATT_VIEW); - /* This command never recurses */ - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_AddInherit: /* INHERIT */ - ATSimplePermissions(rel, ATT_TABLE); - /* This command never recurses */ - ATPrepAddInherit(rel); - pass = AT_PASS_MISC; - break; - case AT_ValidateConstraint: /* VALIDATE CONSTRAINT */ - ATSimplePermissions(rel, ATT_TABLE); - /* Recursion occurs during execution phase */ - /* No command-specific prep needed except saving recurse flag */ - if (recurse) - cmd->subtype = AT_ValidateConstraintRecurse; - pass = AT_PASS_MISC; - break; - case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */ - ATSimplePermissions(rel, ATT_TABLE); - pass = AT_PASS_MISC; - /* This command never recurses */ - /* No command-specific prep needed */ - break; - case AT_EnableTrig: /* ENABLE TRIGGER variants */ - case AT_EnableAlwaysTrig: - case AT_EnableReplicaTrig: - case AT_EnableTrigAll: - case AT_EnableTrigUser: - case AT_DisableTrig: /* DISABLE TRIGGER variants */ - case AT_DisableTrigAll: - case AT_DisableTrigUser: - case AT_EnableRule: /* ENABLE/DISABLE RULE variants */ - case AT_EnableAlwaysRule: - case AT_EnableReplicaRule: - case AT_DisableRule: - case AT_EnableRls: /* ENABLE/DISABLE ROW LEVEL SECURITY */ - case AT_DisableRls: - case AT_ForceRls: /* FORCE/NO-FORCE ROW LEVEL SECURITY */ - case AT_NoForceRls: - case AT_EncryptionKeyRotation: - case AT_DropInherit: /* NO INHERIT */ - case AT_AddOf: /* OF */ - case AT_DropOf: /* NOT OF */ - case AT_SetAutoIncrement: - case AT_SetCharsetCollate: - ATSimplePermissions(rel, ATT_TABLE); - /* These commands never recurse */ - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_ConvertCharset: - ATSimplePermissions(rel, ATT_TABLE); - sqlcmd_alter_prep_convert_charset(tab, rel, cmd, lockmode); - pass = AT_PASS_MISC; - break; - case AT_GenericOptions: - ATSimplePermissions(rel, ATT_FOREIGN_TABLE); - /* No command-specific prep needed */ - pass = AT_PASS_MISC; - break; - case AT_SET_COMPRESS: - ATSimplePermissions(rel, ATT_TABLE); - pass = AT_PASS_MISC; - break; - case AT_EnableRowMoveMent: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepEnableRowMovement(rel); - pass = AT_PASS_MISC; - break; - case AT_DisableRowMoveMent: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepDisableRowMovement(rel); - pass = AT_PASS_MISC; - break; - case AT_TruncatePartition: - ATPrepTruncatePartition(rel); - pass = AT_PASS_MISC; - break; - case AT_TruncateSubPartition: - ATPrepTruncateSubPartition(rel); - pass = AT_PASS_MISC; - break; - case AT_ExchangePartition: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepExchangePartition(rel); - pass = AT_PASS_MISC; - break; - case AT_MergePartition: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepMergePartition(rel); - pass = AT_PASS_MISC; - break; - case AT_SplitPartition: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepSplitPartition(rel); - pass = AT_PASS_MISC; - break; - case AT_AddIntoCBI: - ATSimplePermissions(rel, ATT_INDEX); - pass = AT_PASS_MISC; - break; - case AT_SplitSubPartition: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepSplitSubPartition(rel); - pass = AT_PASS_MISC; - break; - case AT_ResetPartitionno: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepResetPartitionno(rel); - pass = AT_PASS_MISC; - break; - case AT_ModifyColumn: - ATSimplePermissions(rel, ATT_TABLE); - ATPrepAlterModifyColumn(wqueue, tab, rel, recurse, recursing, cmd, lockmode); - pass = AT_PASS_ALTER_TYPE; - ATAlterCheckModifiyColumnRepeatedly(cmd, tab->subcmds[pass]); - break; -#ifdef PGXC - case AT_DistributeBy: - case AT_SubCluster: - ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE | ATT_MATVIEW); - /* No command-specific prep needed */ - pass = AT_PASS_DISTRIB; - break; - - /* @hdfs - * The HDFS foreign table support 'ALTER FOREIGN TABLE ADD NODE/DELETE NODE' cmd. - */ - case AT_AddNodeList: - case AT_DeleteNodeList: - case AT_UpdateSliceLike: - ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); - /* No command-specific prep needed */ - pass = AT_PASS_DISTRIB; - break; - case AT_COMMENTS: - pass = AT_COMMENT; - break; -#endif - default: /* oops */ - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized alter table type: %d", (int)cmd->subtype))); - pass = 0; /* keep compiler quiet */ - break; - } - - /* Add the subcommand to the appropriate list for phase 2 */ - tab->subcmds[pass] = lappend(tab->subcmds[pass], cmd); -} - -static bool ATCheckLedgerTableCmd(Relation rel, AlterTableCmd* cmd) -{ - switch (cmd->subtype) { - case AT_AddColumn: /* ADD COLUMN */ - case AT_DropColumn: /* DROP COLUMN */ - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ - case AT_ModifyColumn: /* MODIFY/CHANGE COLUMN */ - case AT_ExchangePartition: /* EXCHANGE PARTITION */ - case AT_DropPartition: /* DROP PARTITION */ - case AT_DropSubPartition: /* DROP PARTITION */ - case AT_TruncatePartition: /* TRUNCATE PARTITION */ - case AT_TruncateSubPartition: /* TRUNCATE PARTITION */ - /* Blockchain related tables can't ALTER */ - if (rel->rd_isblockchain) { - ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("Unsupport to ALTER the structure of blockchain related table [%s].", - RelationGetRelationName(rel)))); - } - break; - default: /* other option will pass */ - break; - } - return true; -} - -/* - * ATCheckDuplicateColumn: Check AT if exists duplicate column name - */ -static void ATCheckDuplicateColumn(const AlterTableCmd* cmd, const List* tabCmds) -{ - ListCell* tcmd = NULL; - foreach (tcmd, tabCmds) { - AlterTableCmd* acmd = (AlterTableCmd*)lfirst(tcmd); - if (strcmp(acmd->name, cmd->name) == 0) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type of column \"%s\" twice", cmd->name))); - } - } -} - -/* - * ATCheckNotNullConstr: Check AT set/drop not null if exists duplicate column name - */ -static void ATCheckNotNullConstr(const AlterTableCmd* cmd, const AlteredTableInfo* tab) -{ - ATCheckDuplicateColumn(cmd, tab->subcmds[AT_PASS_ADD_CONSTR]); - ATCheckDuplicateColumn(cmd, tab->subcmds[AT_PASS_DROP]); -} - -static Node* GetGeneratedAdbin(Relation rel, AttrNumber myattnum) -{ - Oid atttype = rel->rd_att->attrs[myattnum - 1].atttypid; - int32 atttypmod = rel->rd_att->attrs[myattnum - 1].atttypmod; - ScanKeyData key[2]; - HeapTuple def_tuple; - Relation def_rel; - SysScanDesc scan; - Oid exprtype; - Node *expr = NULL; - - def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock); - ScanKeyInit(&key[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(myattnum)); - - scan = systable_beginscan(def_rel, AttrDefaultIndexId, true, NULL, 2, key); - - while (HeapTupleIsValid(def_tuple = systable_getnext(scan))) { - bool is_null = false; - Datum adbin_datum; - char *adbin_string = NULL; - - adbin_datum = fastgetattr(def_tuple, Anum_pg_attrdef_adbin, def_rel->rd_att, &is_null); - AssertEreport(!is_null, MOD_OPT, ""); - adbin_string = TextDatumGetCString(adbin_datum); - expr = (Node *)stringToNode_skip_extern_fields(adbin_string); - - exprtype = exprType(expr); - - expr = coerce_to_target_type(NULL, /* no UNKNOWN params here */ - expr, - exprtype, - atttype, - atttypmod, - COERCION_ASSIGNMENT, - COERCE_IMPLICIT_CAST, - -1); - - /* - * If there is nextval FuncExpr, we should lock the quoted sequence to avoid deadlock, this has beed done in - * transformFuncExpr. See sqlcmd_lock_nextval_on_cn for more details. - */ - (void)lockNextvalWalker(expr, NULL); - - pfree_ext(adbin_string); - } - systable_endscan(scan); - heap_close(def_rel, RowExclusiveLock); - - return expr; -} - -static void UpdateGeneratedExpr(AlteredTableInfo* tab) -{ - ListCell* l = NULL; - foreach(l, tab->newvals) { - NewColumnValue* ex = (NewColumnValue*)lfirst(l); - Relation rel; - AttrNumber attnum; - - if (!ex->is_generated) { - continue; - } - - rel = relation_open(tab->relid, NoLock); - - attnum = get_attnum(RelationGetRelid(rel), ex->col_name); - if (attnum <= InvalidAttrNumber) { /* shouldn't happen */ - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", ex->col_name, RelationGetRelationName(rel)))); - } - - Expr *defval = (Expr *)GetGeneratedAdbin(rel, attnum); - ex->expr = expression_planner(defval); - ex->generate_attnum = attnum; - - relation_close(rel, NoLock); - } -} - -/* - * ATRewriteCatalogs - * - * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are - * dispatched in a "safe" execution order (designed to avoid unnecessary - * conflicts). - */ -static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode) -{ - int pass; - ListCell* ltab = NULL; - - /* - * We process all the tables "in parallel", one pass at a time. This is - * needed because we may have to propagate work from one table to another - * (specifically, ALTER TYPE on a foreign key's PK has to dispatch the - * re-adding of the foreign key constraint to the other table). Work can - * only be propagated into later passes, however. - */ - for (pass = 0; pass < AT_NUM_PASSES; pass++) { - /* Go through each table that needs to be processed */ - foreach (ltab, *wqueue) { - AlteredTableInfo* tab = (AlteredTableInfo*)lfirst(ltab); - List* subcmds = tab->subcmds[pass]; - Relation rel; - ListCell* lcmd = NULL; - - if (subcmds == NIL) - continue; - - /* - * Appropriate lock was obtained by phase 1, needn't get it again - */ - rel = relation_open(tab->relid, NoLock); - - foreach (lcmd, subcmds) - ATExecCmd(wqueue, tab, rel, (AlterTableCmd*)lfirst(lcmd), lockmode); - - /* - * After the ALTER TYPE pass, do cleanup work (this is not done in - * ATExecAlterColumnType since it should be done only once if - * multiple columns of a table are altered). - */ - if (pass == AT_PASS_ALTER_TYPE && !tab->isDeltaTable) - ATPostAlterTypeCleanup(wqueue, tab, lockmode); - - relation_close(rel, NoLock); - } - } - - /* Check to see if a toast table must be added. */ - foreach (ltab, *wqueue) { - AlteredTableInfo* tab = (AlteredTableInfo*)lfirst(ltab); - - if (tab->is_first_after) { - UpdateGeneratedExpr(tab); - } - - if (get_rel_persistence(tab->relid) == RELPERSISTENCE_GLOBAL_TEMP) { - gtt_create_storage_files(tab->relid); - } - /* u_sess->attr.attr_sql.enable_cluster_resize = true, alter operation don't handle toast */ - if ((tab->relkind == RELKIND_RELATION || tab->relkind == RELKIND_MATVIEW) && - !u_sess->attr.attr_sql.enable_cluster_resize) { - Relation rel = relation_open(tab->relid, NoLock); - Datum toast_reloptions = (Datum)0; - - if (rel->rd_options != NULL && RelationIsTableAccessMethodUStoreType(rel->rd_options)) { - List* optsList = NIL; - DefElem* def = makeDefElem(pstrdup("storage_type"), - (Node*)makeString((char*)(TABLE_ACCESS_METHOD_USTORE))); - optsList = lappend(optsList, def); - toast_reloptions = transformRelOptions((Datum)0, optsList, NULL, NULL, false, false); - } - AlterTableCreateToastTable(tab->relid, toast_reloptions); - relation_close(rel, NoLock); - } - /* check auto_increment indexes after rewrite catalogs */ - if (tab->relkind == RELKIND_RELATION) { - CheckRelAutoIncrementIndex(tab->relid, NoLock); - } - /* recreate every table triggers */ - foreach_cell(def_item, tab->changedTriggerDefs) { - char* cmd_str = (char*)lfirst(def_item); - List* raw_parsetree_list = raw_parser(cmd_str); - Node* stmt = (Node*)linitial(raw_parsetree_list); - Assert(IsA(stmt, CreateTrigStmt)); - (void)CreateTrigger( - (CreateTrigStmt*)stmt, cmd_str, InvalidOid, InvalidOid, InvalidOid, InvalidOid, false); - } - } -} - -static void ATExecSetAutoIncrement(Relation rel, Value* value) -{ - int128 autoinc; - if (!RelHasAutoInc(rel)) { - return; - } - if (IsA(value, Integer)) { - autoinc = (int128)intVal(value); - } else { /* T_Float */ - autoinc = DatumGetInt128(DirectFunctionCall1(int16in, CStringGetDatum(strVal(value)))); - } - - if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - tmptable_autoinc_setval(rel->rd_rel->relfilenode, TempRelAutoInc(rel), autoinc, false); - } else { - autoinc_setval(RelAutoIncSeqOid(rel), autoinc, false); - } -} - -static void sqlcmd_alter_exec_set_charsetcollate(Relation rel, CharsetCollateOptions* cc, LOCKMODE lockmode) -{ - List* new_reloption = NULL; - - (void)fill_relation_collation(cc->collate, cc->charset, &new_reloption); - ATExecSetRelOptions(rel, new_reloption, AT_SetRelOptions, lockmode); -} - -static void sqlcmd_alter_prep_convert_charset(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, - LOCKMODE lockmode) -{ - CharsetCollateOptions* cc = (CharsetCollateOptions*)cmd->def; - Node* transform; - Oid targetcollid = InvalidOid; - Oid targettypid = InvalidOid; - ParseState* pstate = make_parsestate(NULL); - CatCList *catlist = NULL; - - if (tab->relkind != RELKIND_RELATION) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" should be a normal table", RelationGetRelationName(rel)))); - - if (cc->charset == PG_INVALID_ENCODING) { - cc->charset = get_charset_by_collation(get_nsp_default_collation(RelationGetNamespace(rel))); - if (cc->charset == PG_INVALID_ENCODING) { - cc->charset = GetDatabaseEncoding(); - } - } - targetcollid = transform_default_collation(cc->collate, cc->charset); - - catlist = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(rel->rd_id)); - for (int i = 0; i < catlist->n_members; i++) { - HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); - Form_pg_attribute attTup = (Form_pg_attribute)GETSTRUCT(tuple); - int attnum = attTup->attnum; - if (attnum <= 0 || attTup->attisdropped || !type_is_collatable(attTup->atttypid) || - get_charset_by_collation(attTup->attcollation) == cc->charset) - continue; - - transform = (Node*)makeVar(1, attnum, attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0); - /* When the charset is converted to the binary, the string type is converted to the corresponding binary type */ - targettypid = binary_need_transform_typeid(attTup->atttypid, &targetcollid); - if (targettypid != attTup->atttypid) { - transform = coerce_to_target_type(pstate, - transform, - exprType(transform), - targettypid, - attTup->atttypmod, - COERCION_ASSIGNMENT, - COERCE_IMPLICIT_CAST, - -1); - if (transform == NULL) - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" cannot be cast automatically to type %s", NameStr(attTup->attname), - format_type_be(targettypid)))); - } - - transform = coerce_to_target_charset(transform, cc->charset, targettypid, attTup->atttypmod, targetcollid); - - exprSetCollation(transform, targetcollid); - - /* Fix collations after all else */ - assign_expr_collations(pstate, transform); - - /* Plan the expr now so we can accurately assess the need to rewrite. */ - transform = (Node*)expression_planner((Expr*)transform); - - /* - * Add a work queue item to make ATRewriteTable update the column - * contents. - */ - NewColumnValue* newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue)); - newval->attnum = attnum; - newval->expr = (Expr*)transform; - newval->is_generated = false; - newval->is_autoinc = false; - - tab->newvals = lappend(tab->newvals, newval); - tab->rewrite = true; - } - - ReleaseSysCacheList(catlist); -} - -static void sqlcmd_alter_exec_convert_charset(AlteredTableInfo* tab, Relation rel, CharsetCollateOptions* cc, - LOCKMODE lockmode) -{ - List* new_reloption = NULL; - ListCell* lc = NULL; - HeapTuple heapTup; - Form_pg_attribute attTup; - Relation attrelation; - - int target_charset = cc->charset; - Oid target_coll_oid = fill_relation_collation(cc->collate, target_charset, &new_reloption); - - ATExecSetRelOptions(rel, new_reloption, AT_SetRelOptions, lockmode); - - attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - foreach(lc, tab->newvals) { - NewColumnValue* newval = (NewColumnValue*)lfirst(lc); - heapTup = SearchSysCacheCopy2(ATTNUM, RelationGetRelid(rel), newval->attnum); - attTup = (Form_pg_attribute)GETSTRUCT(heapTup); - attTup->attcollation = target_coll_oid; - attTup->atttypid = binary_need_transform_typeid(attTup->atttypid, &target_coll_oid); - - simple_heap_update(attrelation, &heapTup->t_self, heapTup); - CatalogUpdateIndexes(attrelation, heapTup); - add_column_collation_dependency(RelationGetRelid(rel), newval->attnum, target_coll_oid); - - tableam_tops_free_tuple(heapTup); - } - - heap_close(attrelation, RowExclusiveLock); -} - -static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef) -{ - List *columnOptions = columnDef->columnOptions; - ListCell *ColumnOption = NULL; - foreach (ColumnOption, columnOptions) { - void *pointer = lfirst(ColumnOption); - if (IsA(pointer, CommentStmt)) { - CommentStmt *commentStmt = (CommentStmt *)pointer; - CreateComments(relOid, RelationRelationId, get_attnum(relOid, columnDef->colname), - commentStmt->comment); - break; - } - } -} - -/* - * ATExecCmd: dispatch a subcommand to appropriate execution routine - */ -static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - ObjectAddress address = InvalidObjectAddress; - elog(ES_LOGLEVEL, "[ATExecCmd] cmd subtype: %d", cmd->subtype); - - if (PARTITION_DDL_CMD(cmd->subtype) && RELATION_IS_PARTITIONED(rel)) { - int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id)); - if (!PARTITIONNO_IS_VALID(partitionno)) { - RelationResetPartitionno(rel->rd_id, ShareUpdateExclusiveLock); - } - } - - switch (cmd->subtype) { - case AT_AddColumn: /* ADD COLUMN */ - case AT_AddColumnToView: /* add column via CREATE OR REPLACE - * VIEW */ - address = ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, false, false, false, - cmd->is_first, cmd->after_name, lockmode); - break; - case AT_AddColumnRecurse: - address = ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, false, true, false, - cmd->is_first, cmd->after_name, lockmode); - break; - case AT_AddPartition: /* add partition */ - ATExecAddPartition(rel, (AddPartitionState*)cmd->def); - break; - case AT_AddSubPartition: /* add subpartition */ - ATExecAddSubPartition(rel, (AddSubPartitionState*)cmd->def); - break; - case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */ - address = ATExecColumnDefault(rel, cmd->name, cmd->def, lockmode); - break; - case AT_DropNotNull: /* ALTER COLUMN DROP NOT NULL */ - address = ATExecDropNotNull(rel, cmd->name, lockmode); - break; - case AT_SetNotNull: /* ALTER COLUMN SET NOT NULL */ - address = ATExecSetNotNull(tab, rel, cmd->name, lockmode); - break; - case AT_SetStatistics: /* ALTER COLUMN SET STATISTICS */ - address = ATExecSetStatistics(rel, cmd->name, cmd->def, cmd->additional_property, lockmode); - break; - case AT_AddStatistics: /* ADD STATISTICS */ - ATExecAddStatistics(rel, cmd->def, lockmode); - break; - case AT_DeleteStatistics: /* DELETE STATISTICS */ - ATExecDeleteStatistics(rel, cmd->def, lockmode); - break; - case AT_SetOptions: /* ALTER COLUMN SET ( options ) */ - address = ATExecSetOptions(rel, cmd->name, cmd->def, false, lockmode); - break; - case AT_ResetOptions: /* ALTER COLUMN RESET ( options ) */ - address = ATExecSetOptions(rel, cmd->name, cmd->def, true, lockmode); - break; - case AT_SetStorage: /* ALTER COLUMN SET STORAGE */ - address = ATExecSetStorage(rel, cmd->name, cmd->def, lockmode); - break; - case AT_DropColumn: /* DROP COLUMN */ - address = ATExecDropColumn(wqueue, rel, cmd->name, cmd->behavior, false, false, cmd->missing_ok, lockmode); - break; - case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ - address = ATExecDropColumn(wqueue, rel, cmd->name, cmd->behavior, true, false, cmd->missing_ok, lockmode); - break; - case AT_DropPartition: /* drop partition */ - ATExecDropPartition(rel, cmd); - break; - case AT_DropSubPartition: /* drop subpartition */ - ATExecDropSubPartition(rel, cmd); - break; - case AT_UnusableIndexPartition: /* unusable index partition */ - ATExecUnusableIndexPartition(rel, cmd->name); - break; - case AT_UnusableAllIndexOnPartition: /* unusable all index on partition */ - ATExecUnusableAllIndexOnPartition(rel, cmd->name); - break; - case AT_UnusableIndex: - ATExecUnusableIndex(rel); - break; - case AT_InvisibleIndex: - ATExecVisibleIndex(rel, cmd->name, false); - break; - case AT_VisibleIndex: - ATExecVisibleIndex(rel, cmd->name, true); - break; - case AT_AddIndex: /* ADD INDEX */ - address = ATExecAddIndex(tab, rel, (IndexStmt*)cmd->def, false, lockmode); - break; - case AT_ReAddIndex: /* ADD INDEX */ - address = ATExecAddIndex(tab, rel, (IndexStmt*)cmd->def, true, lockmode); - break; - case AT_AddConstraint: /* ADD CONSTRAINT */ - address = ATExecAddConstraint(wqueue, tab, rel, (Constraint*)cmd->def, false, false, lockmode); - break; - case AT_AddConstraintRecurse: /* ADD CONSTRAINT with recursion */ - address = ATExecAddConstraint(wqueue, tab, rel, (Constraint*)cmd->def, true, false, lockmode); - break; - case AT_ReAddConstraint: /* Re-add pre-existing check constraint */ - address = ATExecAddConstraint(wqueue, tab, rel, (Constraint*)cmd->def, false, true, lockmode); - break; - case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ - address = ATExecAddIndexConstraint(tab, rel, (IndexStmt*)cmd->def, lockmode); - break; - case AT_ValidateConstraint: /* VALIDATE CONSTRAINT */ - address = ATExecValidateConstraint(rel, cmd->name, false, false, lockmode); - break; - case AT_ValidateConstraintRecurse: /* VALIDATE CONSTRAINT with - * recursion */ - address = ATExecValidateConstraint(rel, cmd->name, true, false, lockmode); - break; - case AT_DropConstraint: /* DROP CONSTRAINT */ - ATExecDropConstraint(rel, cmd->name, cmd->behavior, false, false, cmd->missing_ok, lockmode); - break; - case AT_DropConstraintRecurse: /* DROP CONSTRAINT with recursion */ - ATExecDropConstraint(rel, cmd->name, cmd->behavior, true, false, cmd->missing_ok, lockmode); - break; - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ - address = ATExecAlterColumnType(tab, rel, cmd, lockmode); - break; - case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */ - address = ATExecAlterColumnGenericOptions(rel, cmd->name, (List*)cmd->def, lockmode); - break; - case AT_ChangeOwner: /* ALTER OWNER */ - ATExecChangeOwner(RelationGetRelid(rel), get_role_oid(cmd->name, false), false, lockmode); - break; - case AT_ClusterOn: /* CLUSTER ON */ - address = ATExecClusterOn(rel, cmd->name, lockmode); - break; - case AT_DropCluster: /* SET WITHOUT CLUSTER */ - ATExecDropCluster(rel, lockmode); - break; - case AT_AddOids: /* SET WITH OIDS */ - /* Use the ADD COLUMN code, unless prep decided to do nothing */ - if (cmd->def != NULL) - address = ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, true, false, false, false, NULL, lockmode); - break; - case AT_AddOidsRecurse: /* SET WITH OIDS */ - /* Use the ADD COLUMN code, unless prep decided to do nothing */ - if (cmd->def != NULL) - address = ATExecAddColumn(wqueue, tab, rel, (ColumnDef*)cmd->def, true, true, false, false, NULL, lockmode); - break; - case AT_DropOids: /* SET WITHOUT OIDS */ - - /* - * Nothing to do here; we'll have generated a DropColumn - * subcommand to do the real work - */ - break; - case AT_SetTableSpace: /* SET TABLESPACE */ - - /* - * Nothing to do here; Phase 3 does the work - */ - break; - case AT_SetPartitionTableSpace: - ATExecSetTableSpaceForPartitionP2(tab, rel, cmd->def); - break; - case AT_SetRelOptions: /* SET (...) */ - case AT_ResetRelOptions: /* RESET (...) */ - case AT_ReplaceRelOptions: /* replace entire option list */ - ATExecSetRelOptions(rel, (List*)cmd->def, cmd->subtype, lockmode, false, tab); - break; - case AT_EnableTrig: /* ENABLE TRIGGER name */ - ATExecEnableDisableTrigger(rel, cmd->name, TRIGGER_FIRES_ON_ORIGIN, false, lockmode); - break; - case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */ - ATExecEnableDisableTrigger(rel, cmd->name, TRIGGER_FIRES_ALWAYS, false, lockmode); - break; - case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */ - ATExecEnableDisableTrigger(rel, cmd->name, TRIGGER_FIRES_ON_REPLICA, false, lockmode); - break; - case AT_DisableTrig: /* DISABLE TRIGGER name */ - ATExecEnableDisableTrigger(rel, cmd->name, TRIGGER_DISABLED, false, lockmode); - break; - case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */ - ATExecEnableDisableTrigger(rel, NULL, TRIGGER_FIRES_ON_ORIGIN, false, lockmode); - break; - case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */ - ATExecEnableDisableTrigger(rel, NULL, TRIGGER_DISABLED, false, lockmode); - break; - case AT_EnableTrigUser: /* ENABLE TRIGGER USER */ - ATExecEnableDisableTrigger(rel, NULL, TRIGGER_FIRES_ON_ORIGIN, true, lockmode); - break; - case AT_DisableTrigUser: /* DISABLE TRIGGER USER */ - ATExecEnableDisableTrigger(rel, NULL, TRIGGER_DISABLED, true, lockmode); - break; - - case AT_EnableRule: /* ENABLE RULE name */ - ATExecEnableDisableRule(rel, cmd->name, RULE_FIRES_ON_ORIGIN, lockmode); - break; - case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */ - ATExecEnableDisableRule(rel, cmd->name, RULE_FIRES_ALWAYS, lockmode); - break; - case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */ - ATExecEnableDisableRule(rel, cmd->name, RULE_FIRES_ON_REPLICA, lockmode); - break; - case AT_DisableRule: /* DISABLE RULE name */ - ATExecEnableDisableRule(rel, cmd->name, RULE_DISABLED, lockmode); - break; - case AT_EnableRls: /* ENABLE ROW LEVEL SECURITY */ - ATExecEnableDisableRls(rel, RELATION_RLS_ENABLE, lockmode); - break; - case AT_DisableRls: /* DISABLE ROW LEVEL SECURITY */ - ATExecEnableDisableRls(rel, RELATION_RLS_DISABLE, lockmode); - break; - case AT_ForceRls: /* FORCE ROW LEVEL SECURITY */ - ATExecEnableDisableRls(rel, RELATION_RLS_FORCE_ENABLE, lockmode); - break; - case AT_NoForceRls: /* NO FORCE ROW LEVEL SECURITY */ - ATExecEnableDisableRls(rel, RELATION_RLS_FORCE_DISABLE, lockmode); - break; - case AT_EncryptionKeyRotation: - ATExecEncryptionKeyRotation(rel, lockmode); - break; - case AT_AddInherit: - address = ATExecAddInherit(rel, (RangeVar*)cmd->def, lockmode); - break; - case AT_DropInherit: - address = ATExecDropInherit(rel, (RangeVar*)cmd->def, lockmode); - break; - case AT_AddOf: - address = ATExecAddOf(rel, (TypeName*)cmd->def, lockmode); - break; - case AT_DropOf: - ATExecDropOf(rel, lockmode); - break; - case AT_ReplicaIdentity: - ATExecReplicaIdentity(rel, (ReplicaIdentityStmt*)cmd->def, lockmode); - break; - case AT_GenericOptions: - ATExecGenericOptions(rel, (List*)cmd->def); - break; - case AT_SET_COMPRESS: - ATExecSetCompress(rel, cmd->name); - break; - case AT_EnableRowMoveMent: - ATExecModifyRowMovement(rel, true); - break; - case AT_DisableRowMoveMent: - ATExecModifyRowMovement(rel, false); - break; - case AT_TruncatePartition: - ATExecTruncatePartition(rel, cmd); - break; - case AT_TruncateSubPartition: - ATExecTruncateSubPartition(rel, cmd); - break; - case AT_ExchangePartition: - ATExecExchangePartition(rel, cmd); - break; - case AT_MergePartition: - ATExecMergePartition(rel, cmd); - break; - case AT_SplitPartition: - ATExecSplitPartition(rel, cmd); - break; - case AT_AddIntoCBI: - ATExecAddTblIntoCBI(rel, (AddTableIntoCBIState *)cmd->def); - break; - case AT_SplitSubPartition: - ATExecSplitSubPartition(rel, cmd); - break; - case AT_SetAutoIncrement: - ATExecSetAutoIncrement(rel, (Value*)cmd->def); - break; - case AT_ResetPartitionno: - ATExecResetPartitionno(rel); - break; - case AT_ModifyColumn: - ATExecAlterModifyColumn(tab, rel, cmd); - break; - case AT_SetCharsetCollate: - sqlcmd_alter_exec_set_charsetcollate(rel, (CharsetCollateOptions*)cmd->def, lockmode); - break; - case AT_ConvertCharset: /* CONVERT TO CHARACTER SET */ - sqlcmd_alter_exec_convert_charset(tab, rel, (CharsetCollateOptions*)cmd->def, lockmode); - break; - -#ifdef PGXC - case AT_DistributeBy: - AtExecDistributeBy(rel, (DistributeBy*)cmd->def); - break; - case AT_SubCluster: - AtExecSubCluster(rel, (PGXCSubCluster*)cmd->def); - break; - case AT_AddNodeList: - AtExecAddNode(rel, (List*)cmd->def); - break; - case AT_DeleteNodeList: - AtExecDeleteNode(rel, (List*)cmd->def); - break; - case AT_UpdateSliceLike: - AtExecUpdateSliceLike(rel, cmd->exchange_with_rel); - break; -#endif - case AT_COMMENTS: - /* alter table comment */ - CreateComments(rel->rd_id, RelationRelationId, 0, cmd->name); - break; - default: /* oops */ - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized alter table type: %d", (int)cmd->subtype))); - break; - } - - /* Recode time of alter relation. */ - PgObjectType objectType = GetPgObjectTypePgClass(tab->relkind); - if (objectType != OBJECT_TYPE_INVALID) { - UpdatePgObjectMtime(tab->relid, objectType); - } - - /* - * Report the subcommand to interested event triggers. - */ - EventTriggerCollectAlterTableSubcmd((Node *) cmd, address); - - - /* take ExclusiveLock to avoid PARTITION DDL COMMIT until we finish the InitPlan. Oid info will be masked here, and - * be locked in CommitTransaction. Distribute mode doesn't support partition DDL/DML parallel work, no need this - * action */ -#ifndef ENABLE_MULTIPLE_NODES - if (PARTITION_DDL_CMD(cmd->subtype)) { - AddPartitionDDLInfo(RelationGetRelid(rel)); - } -#endif - - /* - * Bump the command counter to ensure the next subcommand in the sequence - * can see the changes so far - */ - CommandCounterIncrement(); -} - -typedef void (*ExecRewriteFuncPtr)(AlteredTableInfo*, Oid, LOCKMODE); -typedef void (*ExecOnlyTestFuncPtr)(AlteredTableInfo*); -typedef void (*ExecChangeTabspcFuncPtr)(AlteredTableInfo*, LOCKMODE); - -#define IDX_ROW_TBL 0 -#define IDX_COL_TBL 1 -#define IDX_ORDINARY_TBL 0 -#define IDX_PARTITIONED_TBL 1 - -ExecRewriteFuncPtr ExecRewriteFuncPtrArray[2][2] = { - {ExecRewriteRowTable, ExecRewriteRowPartitionedTable}, {ExecRewriteCStoreTable, ExecRewriteCStorePartitionedTable}}; -ExecOnlyTestFuncPtr ExecOnlyTestFuncPtrArray[2][2] = {{ExecOnlyTestRowTable, ExecOnlyTestRowPartitionedTable}, - {ExecOnlyTestCStoreTable, ExecOnlyTestCStorePartitionedTable}}; -ExecChangeTabspcFuncPtr ExecChangeTabspcFuncPtrArray[2][2] = { - {ExecChangeTableSpaceForRowTable, ExecChangeTableSpaceForRowPartition}, - {ExecChangeTableSpaceForCStoreTable, ExecChangeTableSpaceForCStorePartition}}; - -/* - * @Description: check this relation whether it's a temp table in current session - * @Param[IN] topRelId: top relation OID - * @See also: RELATION_IS_OTHER_TEMP() - */ -static void CheckTopRelationIsInMyTempSession(Oid topRelId) -{ - /* first reset it */ - u_sess->cmd_cxt.topRelatationIsInMyTempSession = false; - - Relation topRel = relation_open(topRelId, NoLock); - u_sess->cmd_cxt.topRelatationIsInMyTempSession = - /* check top realtion persistent */ - topRel->rd_rel->relpersistence == RELPERSISTENCE_TEMP && - /* check top relation namespace */ - (u_sess->catalog_cxt.myTempNamespace == topRel->rd_rel->relnamespace || - u_sess->catalog_cxt.myTempToastNamespace == topRel->rd_rel->relnamespace); - - relation_close(topRel, NoLock); -} - -/* - * ATRewriteTables: ALTER TABLE phase 3 - */ -static void ATRewriteTables(AlterTableStmt *parsetree, List** wqueue, LOCKMODE lockmode) -{ - ListCell* ltab = NULL; - - /* Go through each table that needs to be checked or rewritten */ - foreach (ltab, *wqueue) { - AlteredTableInfo* tab = (AlteredTableInfo*)lfirst(ltab); - int rel_format_idx = IDX_ROW_TBL; - int idxPartitionedOrNot = IDX_ORDINARY_TBL; - -#ifdef PGXC - /* Forbid table rewrite operations with online data redistribution */ - if (tab->rewrite > 0 && list_length(tab->subcmds[AT_PASS_DISTRIB]) > 0 && IS_PGXC_COORDINATOR && !IsConnFromCoord()) - ereport(ERROR, - (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), errmsg("Incompatible operation with data redistribution"))); -#endif - - /* Foreign tables have no storage. */ - if (tab->relkind == RELKIND_FOREIGN_TABLE || tab->relkind == RELKIND_STREAM) - continue; - - if (tab->relkind == RELKIND_RELATION) { - Relation temprel = heap_open(tab->relid, NoLock); - rel_format_idx = - RelationIsCUFormat(temprel) ? IDX_COL_TBL : IDX_ROW_TBL; - idxPartitionedOrNot = RELATION_IS_PARTITIONED(temprel) ? IDX_PARTITIONED_TBL : IDX_ORDINARY_TBL; - heap_close(temprel, NoLock); - } else if (tab->relkind == RELKIND_INDEX || tab->relkind == RELKIND_GLOBAL_INDEX) { - Relation temprel = index_open(tab->relid, NoLock); - rel_format_idx = IDX_ROW_TBL; /* row relation */ - idxPartitionedOrNot = RelationIsPartitioned(temprel) ? IDX_PARTITIONED_TBL : IDX_ORDINARY_TBL; - index_close(temprel, NoLock); - } - - /* - * If we change column data types or add/remove OIDs, the operation - * has to be propagated to tables that use this table's rowtype as a - * column type. tab->newvals will also be non-NULL in the case where - * we're adding a column with a default. We choose to forbid that - * case as well, since composite types might eventually support - * defaults. - * - * (Eventually we'll probably need to check for composite type - * dependencies even when we're just scanning the table without a - * rewrite, but at the moment a composite type does not enforce any - * constraints, so it's not necessary/appropriate to enforce them just - * during ALTER.) - */ - if (tab->newvals != NIL || tab->rewrite > 0) { - Relation rel; - - rel = heap_open(tab->relid, NoLock); - find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL); - heap_close(rel, NoLock); - } - - /* - * We only need to rewrite the table if at least one column needs to - * be recomputed, or we are adding/removing the OID column. - */ - if (tab->rewrite > 0) { - /* Build a temporary relation and copy data */ - Relation OldHeap; - Oid NewTableSpace; - - OldHeap = heap_open(tab->relid, NoLock); - /* - * Fire off an Event Trigger now, before actually rewriting the - * table. - * - * We don't support Event Trigger for nested commands anywhere, - * here included, and parsetree is given NULL when coming from - * AlterTableInternal. - * - * And fire it only once. - */ - if (parsetree) - EventTriggerTableRewrite((Node *)parsetree, - tab->relid, - tab->rewrite); - - /* - * We don't support rewriting of system catalogs; there are too - * many corner cases and too little benefit. In particular this - * is certainly not going to work for mapped catalogs. - */ - if (IsSystemRelation(OldHeap)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot rewrite system relation \"%s\"", RelationGetRelationName(OldHeap)))); - if (RelationIsUsedAsCatalogTable(OldHeap)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg( - "cannot rewrite table \"%s\" used as a catalog table", RelationGetRelationName(OldHeap)))); - /* - * Don't allow rewrite on temp tables of other backends ... their - * local buffer manager is not going to cope. - */ - if (RELATION_IS_OTHER_TEMP(OldHeap)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot rewrite temporary tables of other sessions"))); - - /* - * Select destination tablespace (same as original unless user - * requested a change) - */ - if (tab->newTableSpace) - NewTableSpace = tab->newTableSpace; - else - NewTableSpace = OldHeap->rd_rel->reltablespace; - - heap_close(OldHeap, NoLock); - - ExecRewriteFuncPtrArray[rel_format_idx][idxPartitionedOrNot](tab, NewTableSpace, lockmode); - } else { - /* - * Test the current data within the table against new constraints - * generated by ALTER TABLE commands, but don't rebuild data. - */ - if (tab->constraints != NIL || tab->new_notnull) { - ExecOnlyTestFuncPtrArray[rel_format_idx][idxPartitionedOrNot](tab); - } - - /* - * If we had SET TABLESPACE but no reason to reconstruct tuples, - * just do a block-by-block copy. - */ - if (tab->newTableSpace) { - CheckTopRelationIsInMyTempSession(tab->relid); - if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && IsTransactionBlock() && rel_format_idx == 0 && - tab->relkind == RELKIND_RELATION) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("alter row table tablespace cannot run inside a transaction block"))); - } - ExecChangeTabspcFuncPtrArray[rel_format_idx][idxPartitionedOrNot](tab, lockmode); - } - } - } - -#ifdef PGXC - /* - * In PGXC, do not check the FK constraints on the Coordinator, and just return - * That is because a SELECT is generated whose plan will try and use - * the Datanodes. We (currently) do not want to do that on the Coordinator, - * when the command is passed down to the Datanodes it will - * peform the check locally. - * This issue was introduced when we added multi-step handling, - * it caused foreign key constraints to fail. - * issue for pg_catalog or any other cases? - */ - if (IS_PGXC_COORDINATOR) - return; -#endif - /* - * Foreign key constraints are checked in a final pass, since (a) it's - * generally best to examine each one separately, and (b) it's at least - * theoretically possible that we have changed both relations of the - * foreign key, and we'd better have finished both rewrites before we try - * to read the tables. - */ - foreach (ltab, *wqueue) { - AlteredTableInfo* tab = (AlteredTableInfo*)lfirst(ltab); - Relation rel = NULL; - ListCell* lcon = NULL; - - foreach (lcon, tab->constraints) { - NewConstraint* con = (NewConstraint*)lfirst(lcon); - - if (con->contype == CONSTR_FOREIGN) { - Constraint* fkconstraint = (Constraint*)con->qual; - Relation refrel; - - if (rel == NULL) { - /* Long since locked, no need for another */ - rel = heap_open(tab->relid, NoLock); - } - - refrel = heap_open(con->refrelid, RowShareLock); - - validateForeignKeyConstraint(fkconstraint->conname, rel, refrel, con->refindid, con->conid); - - /* - * No need to mark the constraint row as validated, we did - * that when we inserted the row earlier. - */ - heap_close(refrel, NoLock); - } - } - - if (rel) - heap_close(rel, NoLock); - } -} - -static UHeapTuple BackUpScanCuTup(UHeapTuple scanCuTup) -{ - /* use top transaction memcxt, abort transaction will free the memory if any error occur */ - MemoryContext oldCxt = MemoryContextSwitchTo(u_sess->top_transaction_mem_cxt); - UHeapTuple backUpTup = UHeapCopyTuple(scanCuTup); - MemoryContextSwitchTo(oldCxt); - return backUpTup; -} - -static UHeapTuple RestoreScanCuTup(UHeapTuple backUpTup) -{ - UHeapTuple scanCuTup = UHeapCopyTuple(backUpTup); - return scanCuTup; -} - -template -static T EvaluateGenExpr(AlteredTableInfo* tab, T tuple, - TupleDesc newTupDesc, ExprContext* econtext, - Datum* values, bool* isnull) -{ - bool hasGenCol = false; - ListCell* l = NULL; - T tup = tuple; - - if (tab->rewrite > 0) { - foreach(l, tab->newvals) - { - NewColumnValue *ex = (NewColumnValue*)lfirst(l); - - if (!ex->is_generated) - continue; - - hasGenCol = true; - - if (tab->is_first_after) { - values[ex->generate_attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, - &isnull[ex->generate_attnum - 1], NULL); - } else { - values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1], NULL); - } - } - - if (hasGenCol) { - /* - * Form the new tuple. Note that we don't explicitly pfree it, - * since the per-tuple memory context will be reset shortly. - */ - if (amtype == TAM_HEAP) { - tup = (T)heap_form_tuple(newTupDesc, values, isnull); - } else { - tup = (T)tableam_tops_form_tuple(newTupDesc, values, isnull, TableAmUstore); - } - } - } - return tup; -} - -/* - * update values and isnull after modify column to a new loaction. - * newattnum > 0 denotes modify with first or after column or add generated column. - */ -static void UpdateValueModifyFirstAfter(NewColumnValue *ex, Datum* values, bool* isnull) -{ - if (ex->newattnum > 0 && !ex->is_addloc) { - Datum valuesTemp = values[ex->attnum - 1]; - bool isnullTemp = isnull[ex->attnum - 1]; - if (ex->newattnum > ex->attnum) { - for (int i = ex->attnum; i <= ex->newattnum - 1; i++) { - values[i - 1] = values[i]; - isnull[i - 1] = isnull[i]; - } - } else { - for (int i = ex->attnum - 1; i >= ex->newattnum; i--) { - values[i] = values[i - 1]; - isnull[i] = isnull[i - 1]; - } - } - values[ex->newattnum - 1] = valuesTemp; - isnull[ex->newattnum - 1] = isnullTemp; - } -} - -static void UpdateGeneratedColumnIsnull(AlteredTableInfo* tab, bool* isnull, bool has_generated) -{ - ListCell* l = NULL; - - if (!has_generated) { - return; - } - - foreach (l, tab->newvals) { - NewColumnValue *ex = (NewColumnValue*)lfirst(l); - - if (!ex->is_generated) { - continue; - } - - isnull[ex->generate_attnum - 1] = true; - } -} - -/* - * change ATRewriteTable() input: oid->rel - */ -/* - * ATRewriteTable: scan or rewrite one table - * - * oldrel is NULL if we don't need to rewrite - */ -static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relation newrel) -{ - TupleDesc oldTupDesc; - TupleDesc newTupDesc; - bool needscan = false; - List* notnull_attrs = NIL; - int i; - ListCell* l = NULL; - EState* estate = NULL; - CommandId mycid; - BulkInsertState bistate; - uint32 hi_options; - - oldTupDesc = tab->oldDesc; - newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ - - /* - * Prepare a BulkInsertState and options for heap_insert. Because we're - * building a new heap, we can skip WAL-logging and fsync it to disk at - * the end instead (unless WAL-logging is required for archiving or - * streaming replication). The FSM is empty too, so don't bother using it. - */ - if (newrel) { - mycid = GetCurrentCommandId(true); - bistate = GetBulkInsertState(); - - hi_options = TABLE_INSERT_SKIP_FSM; - if (!XLogIsNeeded()) - hi_options |= TABLE_INSERT_SKIP_WAL; - } else { - /* keep compiler quiet about using these uninitialized */ - mycid = 0; - bistate = NULL; - hi_options = 0; - } - - /* - * Generate the constraint and default execution states - */ - estate = CreateExecutorState(); - - /* Build the needed expression execution states */ - foreach (l, tab->constraints) { - NewConstraint* con = (NewConstraint*)lfirst(l); - - switch (con->contype) { - case CONSTR_CHECK: - needscan = true; - if (estate->es_is_flt_frame){ - con->qualstate = (List*)ExecPrepareExprList((List*)con->qual, estate); - } else { - con->qualstate = (List*)ExecPrepareExpr((Expr*)con->qual, estate); - } - break; - case CONSTR_FOREIGN: - /* Nothing to do here */ - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized constraint type: %d", (int)con->contype))); - } break; - } - } - - foreach (l, tab->newvals) { - NewColumnValue* ex = (NewColumnValue*)lfirst(l); - - /* expr already planned */ - ex->exprstate = ExecInitExpr((Expr*)ex->expr, NULL); - } - - notnull_attrs = NIL; - if (newrel || tab->new_notnull) { - /* - * If we are rebuilding the tuples OR if we added any new NOT NULL - * constraints, check all not-null constraints. This is a bit of - * overkill but it minimizes risk of bugs, and heap_attisnull is a - * pretty cheap test anyway. - */ - for (i = 0; i < newTupDesc->natts; i++) { - if (newTupDesc->attrs[i].attnotnull && !newTupDesc->attrs[i].attisdropped) - notnull_attrs = lappend_int(notnull_attrs, i); - } - if (notnull_attrs != NULL) - needscan = true; - } - - if (newrel || needscan) { - ExprContext* econtext = NULL; - Datum* values = NULL; - bool* isnull = NULL; - bool isUstore = false; - TupleTableSlot* oldslot = NULL; - TupleTableSlot* newslot = NULL; - TableScanDesc scan; - HeapTuple tuple; - UHeapTuple utuple; - MemoryContext oldCxt; - List* dropped_attrs = NIL; - ListCell* lc = NULL; - errno_t rc = EOK; - int128 autoinc = 0; - bool need_autoinc = false; - bool has_generated = false; - AttrNumber autoinc_attnum = (newTupDesc->constr && newTupDesc->constr->cons_autoinc) ? - newTupDesc->constr->cons_autoinc->attnum : 0; - - isUstore = RelationIsUstoreFormat(oldrel); - - if (newrel) - ereport(DEBUG1, (errmsg("rewriting table \"%s\"", RelationGetRelationName(oldrel)))); - else - ereport(DEBUG1, (errmsg("verifying table \"%s\"", RelationGetRelationName(oldrel)))); - - if (newrel) { - /* - * All predicate locks on the tuples or pages are about to be made - * invalid, because we move tuples around. Promote them to - * relation locks. - */ - TransferPredicateLocksToHeapRelation(oldrel); - } - - econtext = GetPerTupleExprContext(estate); - - /* - * Make tuple slots for old and new tuples. Note that even when the - * tuples are the same, the tupDescs might not be (consider ADD COLUMN - * without a default). - */ - oldslot = MakeSingleTupleTableSlot(oldTupDesc, false, oldrel->rd_tam_ops); - newslot = MakeSingleTupleTableSlot(newTupDesc, false, oldrel->rd_tam_ops); - - /* Preallocate values/isnull arrays */ - i = Max(newTupDesc->natts, oldTupDesc->natts); - values = (Datum*)palloc(i * sizeof(Datum)); - isnull = (bool*)palloc(i * sizeof(bool)); - rc = memset_s(values, i * sizeof(Datum), 0, i * sizeof(Datum)); - securec_check(rc, "\0", "\0"); - rc = memset_s(isnull, i * sizeof(bool), true, i * sizeof(bool)); - securec_check(rc, "\0", "\0"); - - /* - * Any attributes that are dropped according to the new tuple - * descriptor can be set to NULL. We precompute the list of dropped - * attributes to avoid needing to do so in the per-tuple loop. - */ - for (i = 0; i < newTupDesc->natts; i++) { - if (newTupDesc->attrs[i].attisdropped) - dropped_attrs = lappend_int(dropped_attrs, i); - } - - /* - * here we don't care oldTupDesc->initdefvals, because it's - * handled during deforming old tuple. new values for added - * colums maybe is from *tab->newvals* list, or newTupDesc' - * initdefvals list. - */ - if (newTupDesc->initdefvals) { - TupInitDefVal* defvals = newTupDesc->initdefvals; - - /* skip all the existing columns within this relation */ - for (i = oldTupDesc->natts; i < newTupDesc->natts; ++i) { - if (!defvals[i].isNull) { - /* we assign both *isnull* and *values* here instead of - * scaning loop, because all these are constant and not - * dependent on each tuple. - */ - isnull[i] = false; - values[i] = fetchatt(&newTupDesc->attrs[i], defvals[i].datum); - } - } - } - - /* - * Scan through the rows, generating a new row if needed and then - * checking all the constraints. - */ - scan = tableam_scan_begin(oldrel, SnapshotNow, 0, NULL); - - /* - * Switch to per-tuple memory context and reset it for each tuple - * produced, so we don't leak memory. - */ - oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - - // it is special that oldTupDesc must be used for deforming the heap tuple, - // so that scan->rs_tupdesc is overwritten here. - // - if (isUstore) { - ((UHeapScanDesc) scan)->rs_tupdesc = oldTupDesc; - while ((UHeapGetNextSlotGuts(scan, ForwardScanDirection, oldslot)) != NULL) - { - utuple = (UHeapTuple)oldslot->tts_tuple; - - if (tab->rewrite > 0) - { - int newvals_num = 0; - /* Extract data from old tuple */ - tableam_tops_deform_tuple(utuple, oldTupDesc, values, isnull); - - /* - * Process supplied expressions to replace selected columns. - * - * First, evaluate expressions whose inputs come from the old - * tuple. - */ - econtext->ecxt_scantuple = oldslot; - - foreach(l, tab->newvals) - { - NewColumnValue *ex = (NewColumnValue*)lfirst(l); - - if (ex->is_addloc) { - for (i = oldTupDesc->natts + newvals_num - 1; i >= ex->attnum - 1; i--) { - values[i + 1] = values[i]; - isnull[i + 1] = isnull[i]; - } - newvals_num++; - } - - if (ex->is_generated) { - if (tab->is_first_after) { - UpdateValueModifyFirstAfter(ex, values, isnull); - has_generated = true; - } else { - isnull[ex->attnum - 1] = true; - } - continue; - } - - values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1], NULL); - - if (ex->is_autoinc) { - need_autoinc = (autoinc_attnum > 0); - } - - if (tab->is_first_after) { - UpdateValueModifyFirstAfter(ex, values, isnull); - } - } - - /* generated column */ - UpdateGeneratedColumnIsnull(tab, isnull, has_generated); - - /* auto_increment */ - if (need_autoinc) { - autoinc = EvaluateAutoIncrement(oldrel, newTupDesc, - autoinc_attnum, &values[autoinc_attnum - 1], &isnull[autoinc_attnum - 1]); - } - - /* Set dropped attributes to null in new tuple */ - foreach(lc, dropped_attrs) { - isnull[lfirst_int(lc)] = true; - } - - /* - * Form the new tuple. Note that we don't explicitly pfree it, - * since the per-tuple memory context will be reset shortly. - */ - utuple = (UHeapTuple)tableam_tops_form_tuple(newTupDesc, values, isnull, TableAmUstore); - } - - /* Now check any constraints on the possibly-changed tuple */ - (void)ExecStoreTuple(utuple, newslot, InvalidBuffer, false); - econtext->ecxt_scantuple = newslot; - - /* - * Now, evaluate any expressions whose inputs come from the - * new tuple. We assume these columns won't reference each - * other, so that there's no ordering dependency. - */ - utuple = EvaluateGenExpr(tab, utuple, newTupDesc, econtext, values, isnull); - - foreach(l, notnull_attrs) - { - int attn = lfirst_int(l); - - /* replace heap_attisnull with relationAttIsNull - * due to altering table instantly - */ - if (tableam_tops_tuple_attisnull(utuple, attn + 1, newTupDesc)) - ereport(ERROR, - (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" contains null values", - NameStr(newTupDesc->attrs[attn].attname)))); - } - - foreach(l, tab->constraints) - { - NewConstraint *con = (NewConstraint*)lfirst(l); - ListCell* lc = NULL; - - switch (con->contype) - { - case CONSTR_CHECK: - { - if (estate->es_is_flt_frame){ - foreach (lc, con->qualstate) { - ExprState* exprState = (ExprState*)lfirst(lc); - - if (!ExecCheckByFlatten(exprState, econtext)) - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", - con->name))); - } - } else { - if (!ExecQual(con->qualstate, econtext, true)){ - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", - con->name))); - } - } - } - break; - case CONSTR_FOREIGN: - /* Nothing to do here */ - break; - default: - { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized constraint type: %d", (int) con->contype))); - } - } - } - - /* Write the tuple out to the new relation */ - if (newrel) { - (void)tableam_tuple_insert(newrel, utuple, mycid, hi_options, bistate); - - if (autoinc > 0) { - SetRelAutoIncrement(oldrel, newTupDesc, autoinc); - } - } - - /* - * We need to reset the flags of slot before entering the next loop so that inplaceindex_getnextslot - * will not try to clear it after we reset the context. Note that we don't explicitly pfree its - * tuple since the per-tuple memory context will be reset shortly. - */ - oldslot->tts_flags &= ~TTS_FLAG_SHOULDFREE; - - UHeapTuple backUpTup = BackUpScanCuTup(((UHeapScanDesc) scan)->rs_cutup); - ResetExprContext(econtext); - ((UHeapScanDesc) scan)->rs_cutup = RestoreScanCuTup(backUpTup); - if (backUpTup != NULL) { - pfree_ext(backUpTup); - } - - CHECK_FOR_INTERRUPTS(); - } - } else { - ((HeapScanDesc) scan)->rs_tupdesc = oldTupDesc; - while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - if (tab->rewrite > 0) { - Oid tupOid = InvalidOid; - int newvals_num = 0; - - /* Extract data from old tuple */ - tableam_tops_deform_tuple(tuple, oldTupDesc, values, isnull); - if (oldTupDesc->tdhasoid) - tupOid = HeapTupleGetOid(tuple); - - /* - * Process supplied expressions to replace selected columns. - * - * First, evaluate expressions whose inputs come from the old - * tuple. - */ - (void)ExecStoreTuple(tuple, oldslot, InvalidBuffer, false); - econtext->ecxt_scantuple = oldslot; - - foreach (l, tab->newvals) { - NewColumnValue* ex = (NewColumnValue*)lfirst(l); - - if (ex->is_addloc) { - for (i = oldTupDesc->natts + newvals_num - 1; i >= ex->attnum - 1; i--) { - values[i + 1] = values[i]; - isnull[i + 1] = isnull[i]; - } - newvals_num++; - } - - if (ex->is_generated) { - if (tab->is_first_after) { - UpdateValueModifyFirstAfter(ex, values, isnull); - has_generated = true; - } else { - isnull[ex->attnum - 1] = true; - } - continue; - } - - values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, &isnull[ex->attnum - 1]); - if (ex->is_autoinc) { - need_autoinc = (autoinc_attnum > 0); - } - - if (tab->is_first_after) { - UpdateValueModifyFirstAfter(ex, values, isnull); - } - } - - /* generated column */ - UpdateGeneratedColumnIsnull(tab, isnull, has_generated); - - /* auto_increment */ - if (need_autoinc) { - autoinc = EvaluateAutoIncrement(oldrel, newTupDesc, - autoinc_attnum, &values[autoinc_attnum - 1], &isnull[autoinc_attnum - 1]); - } - - /* Set dropped attributes to null in new tuple */ - foreach (lc, dropped_attrs) { - isnull[lfirst_int(lc)] = true; - } - - /* - * Form the new tuple. Note that we don't explicitly pfree it, - * since the per-tuple memory context will be reset shortly. - */ - tuple = (HeapTuple)heap_form_tuple(newTupDesc, values, isnull); - - /* Preserve OID, if any */ - if (newTupDesc->tdhasoid) - HeapTupleSetOid(tuple, tupOid); - } - - /* Now check any constraints on the possibly-changed tuple */ - (void)ExecStoreTuple(tuple, newslot, InvalidBuffer, false); - econtext->ecxt_scantuple = newslot; - - /* - * Now, evaluate any expressions whose inputs come from the - * new tuple. We assume these columns won't reference each - * other, so that there's no ordering dependency. - */ - tuple = EvaluateGenExpr(tab, tuple, newTupDesc, econtext, values, isnull); - - foreach (l, notnull_attrs) { - int attn = lfirst_int(l); - - /* replace heap_attisnull with relationAttIsNull - * due to altering table instantly - */ - if (relationAttIsNull(tuple, attn + 1, newTupDesc)) - ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" contains null values", NameStr(newTupDesc->attrs[attn].attname)))); - } - - foreach (l, tab->constraints) { - NewConstraint* con = (NewConstraint*)lfirst(l); - ListCell* lc = NULL; - - switch (con->contype) { - case CONSTR_CHECK: - { - if (estate->es_is_flt_frame){ - foreach (lc, con->qualstate) { - ExprState* exprState = (ExprState*)lfirst(lc); - - if (!ExecCheckByFlatten(exprState, econtext)) - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", - con->name))); - } - } else { - if (!ExecQualByRecursion(con->qualstate, econtext, true)){ - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", - con->name))); - } - } - } - break; - case CONSTR_FOREIGN: - /* Nothing to do here */ - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized constraint type: %d", (int)con->contype))); - } - } - } - - /* Write the tuple out to the new relation */ - if (newrel) { - (void)tableam_tuple_insert(newrel, tuple, mycid, hi_options, bistate); - - if (autoinc > 0) { - SetRelAutoIncrement(oldrel, newTupDesc, autoinc); - } - } - ResetExprContext(econtext); - - CHECK_FOR_INTERRUPTS(); - } - } - - MemoryContextSwitchTo(oldCxt); - tableam_scan_end(scan); - - ExecDropSingleTupleTableSlot(oldslot); - ExecDropSingleTupleTableSlot(newslot); - } - - FreeExecutorState(estate); - - if (newrel) { - FreeBulkInsertState(bistate); - - /* If we skipped writing WAL, then we need to sync the heap. */ - if (((hi_options & TABLE_INSERT_SKIP_WAL) || enable_heap_bcm_data_replication()) && - !RelationIsSegmentTable(newrel)) - heap_sync(newrel); - /* - * After the temporary table is rewritten, the relfilenode changes. - * We need to find new TmptableCacheEntry with new relfilenode. - * Then set new auto_increment counter value in new TmptableCacheEntry. - */ - CopyTempAutoIncrement(oldrel, newrel); - } -} - -static void ATRewriteTable(AlteredTableInfo* tab, Relation oldrel, Relation newrel) -{ - if (RELATION_CREATE_BUCKET(oldrel)) { - oidvector* bucketlist = searchHashBucketByOid(oldrel->rd_bucketoid); - - for (int i = 0; i < bucketlist->dim1; i++) { - Relation oldbucket = bucketGetRelation(oldrel, NULL, bucketlist->values[i]); - Relation newbucket = NULL; - if (newrel != NULL) { - newbucket = bucketGetRelation(newrel, NULL, bucketlist->values[i]); - } - - ATRewriteTableInternal(tab, oldbucket, newbucket); - - bucketCloseRelation(oldbucket); - if (newbucket != NULL) { - bucketCloseRelation(newbucket); - } - } - - if (newrel && (!XLogIsNeeded() || enable_heap_bcm_data_replication()) && !RelationIsSegmentTable(newrel)) { - heap_sync(newrel); - } - } else { - ATRewriteTableInternal(tab, oldrel, newrel); - } -} - -#ifndef ENABLE_MULTIPLE_NODES -/* - * ATOnlyCheckCStoreTable - * Only support not-null constraint check currently. - */ -static void ATOnlyCheckCStoreTable(const AlteredTableInfo* tab, Relation rel) -{ - TupleDesc oldTupDesc = NULL; - TupleDesc newTupDesc = NULL; - List* notnullAttrs = NIL; - bool needscan = false; - - oldTupDesc = tab->oldDesc; - newTupDesc = RelationGetDescr(rel); /* includes all mods */ - - if (tab->constraints != NIL || tab->newvals != NIL) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column stored relation doesn't support this feature"))); - } - - if (tab->new_notnull) { - /* - * If we added any new NOT NULL constraints, check all not-null constraints. - */ - for (int i = 0; i < newTupDesc->natts; i++) { - if (newTupDesc->attrs[i].attnotnull && !newTupDesc->attrs[i].attisdropped) - notnullAttrs = lappend_int(notnullAttrs, i); - } - if (notnullAttrs != NULL) - needscan = true; - } - - if (needscan) { - CStoreScanDesc cstoreScan = NULL; - VectorBatch* vecScanBatch = NULL; - int attrNum = list_length(notnullAttrs); - AttrNumber* colIdx = (AttrNumber*)palloc(sizeof(AttrNumber) * attrNum); - int n = 0; - ListCell* cell = NULL; - ScalarVector *vec = NULL; - foreach (cell, notnullAttrs) { - colIdx[n++] = lfirst_int(cell) + 1; - } - - ereport(DEBUG1, (errmsg("verifying table \"%s\"", RelationGetRelationName(rel)))); - - /* - * Scan through the cstore, generating a new row if needed and then - * checking constraints. - */ - cstoreScan = CStoreBeginScan(rel, attrNum, colIdx, SnapshotNow, false); - - do { - vecScanBatch = CStoreGetNextBatch(cstoreScan); - vec = vecScanBatch->m_arr; - for (int rowIdx = 0; rowIdx < vecScanBatch->m_rows; rowIdx++) { - for (n = 0; n < attrNum; ++n ) { - int attn = colIdx[n] - 1; - if (vec[attn].IsNull(rowIdx)) { - ereport(ERROR, - (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" contains null values", NameStr(newTupDesc->attrs[attn].attname)))); - } - } - } - } while (!CStoreIsEndScan(cstoreScan)); - - CStoreEndScan(cstoreScan); - - pfree_ext(colIdx); - } - - list_free_ext(notnullAttrs); -} -#endif - -/* - * ATGetQueueEntry: find or create an entry in the ALTER TABLE work queue - */ -static AlteredTableInfo* ATGetQueueEntry(List** wqueue, Relation rel, bool isDeltaTable) -{ - Oid relid = RelationGetRelid(rel); - AlteredTableInfo* tab = NULL; - ListCell* ltab = NULL; - - foreach (ltab, *wqueue) { - tab = (AlteredTableInfo*)lfirst(ltab); - if (tab->relid == relid) - return tab; - } - - /* - * Not there, so add it. Note that we make a copy of the relation's - * existing descriptor before anything interesting can happen to it. - */ - tab = (AlteredTableInfo*)palloc0(sizeof(AlteredTableInfo)); - tab->relid = relid; - tab->relkind = rel->rd_rel->relkind; - tab->oldDesc = CreateTupleDescCopy(RelationGetDescr(rel)); - tab->isDeltaTable = isDeltaTable; - *wqueue = lappend(*wqueue, tab); - - return tab; -} - -/* - * ATSimplePermissions - * - * - Ensure that it is a relation (or possibly a view) - * - Ensure this user is the owner or have the needed privileges - * - Ensure that it is not a system table - */ -static void ATSimplePermissions(Relation rel, int allowed_targets) -{ - int actual_target; - - switch (rel->rd_rel->relkind) { - case RELKIND_RELATION: - actual_target = ATT_TABLE; - break; - case RELKIND_CONTQUERY: - case RELKIND_VIEW: - actual_target = ATT_VIEW; - break; - case RELKIND_MATVIEW: - actual_target = ATT_MATVIEW; - break; - case RELKIND_INDEX: - case RELKIND_GLOBAL_INDEX: - actual_target = ATT_INDEX; - break; - case RELKIND_COMPOSITE_TYPE: - actual_target = ATT_COMPOSITE_TYPE; - break; - case RELKIND_STREAM: - case RELKIND_FOREIGN_TABLE: - actual_target = ATT_FOREIGN_TABLE; - break; - case RELKIND_SEQUENCE: - case RELKIND_LARGE_SEQUENCE: { - if (u_sess->attr.attr_common.IsInplaceUpgrade) { - actual_target = ATT_SEQUENCE; - } else - actual_target = ATT_NULL; - break; - } - default: - actual_target = 0; - break; - } - - /* Wrong target type? */ - if (((uint32)actual_target & (uint32)allowed_targets) == 0) - ATWrongRelkindError(rel, allowed_targets); - - /* Permission check */ - ATPermissionCheck(rel->rd_rel, RelationGetRelid(rel)); - - if (!(g_instance.attr.attr_common.allowSystemTableMods && RelationGetRelid(rel) >= FirstBootstrapObjectId) && - !u_sess->attr.attr_common.IsInplaceUpgrade && IsSystemRelation(rel)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", RelationGetRelationName(rel)))); -} - -/* - * ATWrongRelkindError - * - * Throw an error when a relation has been determined to be of the wrong - * type. - */ -static void ATWrongRelkindError(Relation rel, int allowed_targets) -{ - char* msg = NULL; - - switch (allowed_targets) { - case ATT_TABLE: - msg = _("\"%s\" is not a table"); - break; - case ATT_TABLE | ATT_VIEW: - msg = _("\"%s\" is not a table or view"); - break; - case ATT_TABLE | ATT_FOREIGN_TABLE: - msg = _("\"%s\" is not a table or foreign table"); - break; - case ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE: - msg = _("\"%s\" is not a table, composite type, or foreign table"); - break; - case ATT_VIEW: - msg = _("\"%s\" is not a view"); - break; - case ATT_FOREIGN_TABLE: - msg = _("\"%s\" is not a foreign table"); - break; - case ATT_TABLE | ATT_VIEW | ATT_MATVIEW | ATT_INDEX: - msg = _("\"%s\" is not a table, view, materialized view, or index"); - break; - case ATT_TABLE | ATT_MATVIEW: - msg = _("\"%s\" is not a table or materialized view"); - break; - case ATT_TABLE | ATT_MATVIEW | ATT_INDEX: - msg = _("\"%s\" is not a table, materialized view, or index"); - break; - case ATT_TABLE | ATT_MATVIEW | ATT_INDEX | ATT_FOREIGN_TABLE: - msg = _("\"%s\" is not a table, materialized view, composite type, or foreign table"); - break; - default: - /* shouldn't get here, add all necessary cases above */ - msg = _("\"%s\" is of the wrong type"); - break; - } - - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg(msg, RelationGetRelationName(rel)))); -} - -/* - * ATSimpleRecursion - * - * Simple table recursion sufficient for most ALTER TABLE operations. - * All direct and indirect children are processed in an unspecified order. - * Note that if a child inherits from the original table via multiple - * inheritance paths, it will be visited just once. - */ -static void ATSimpleRecursion(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recurse, LOCKMODE lockmode) -{ - /* - * Propagate to children if desired. Non-table relations never have - * children, so no need to search in that case. - */ - if (recurse && rel->rd_rel->relkind == RELKIND_RELATION) { - Oid relid = RelationGetRelid(rel); - ListCell* child = NULL; - List* children = NIL; - - bool isDeltaStore = RelationIsCUFormat(rel); -#ifdef ENABLE_MULTIPLE_NODES - isDeltaStore = g_instance.attr.attr_storage.enable_delta_store && isDeltaStore; -#endif - if (isDeltaStore) - - /* - * Under centrailzed mode, there may be unique index on delta table. When checking unique - * constraint, unique index on delta will be used. So we ignore enable_delta_store here - * and alter delta table at the same time. - */ - children = find_cstore_delta(rel, lockmode); - else - children = find_all_inheritors(relid, lockmode, NULL); - - /* - * find_all_inheritors does the recursive search of the inheritance - * hierarchy, so all we have to do is process all of the relids in the - * list that it returns. - */ - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - Relation childrel; - - if (childrelid == relid) - continue; - /* find_all_inheritors already got lock */ - childrel = relation_open(childrelid, NoLock); - CheckTableNotInUse(childrel, "ALTER TABLE"); - ATPrepCmd(wqueue, childrel, cmd, false, true, lockmode, isDeltaStore); - relation_close(childrel, NoLock); - } - } -} - -/* - * ATTypedTableRecursion - * - * Propagate ALTER TYPE operations to the typed tables of that type. - * Also check the RESTRICT/CASCADE behavior. Given CASCADE, also permit - * recursion to inheritance children of the typed tables. - */ -static void ATTypedTableRecursion(List** wqueue, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - ListCell* child = NULL; - List* children = NIL; - - Assert(rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE); - - children = find_typed_table_dependencies(rel->rd_rel->reltype, RelationGetRelationName(rel), cmd->behavior); - - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - Relation childrel; - - childrel = relation_open(childrelid, lockmode); - CheckTableNotInUse(childrel, "ALTER TABLE"); - ATPrepCmd(wqueue, childrel, cmd, true, true, lockmode); - relation_close(childrel, NoLock); - } -} - -/* - * find_composite_type_dependencies - * - * Check to see if a composite type is being used as a column in some - * other table (possibly nested several levels deep in composite types!). - * Eventually, we'd like to propagate the check or rewrite operation - * into other such tables, but for now, just error out if we find any. - * - * Caller should provide either a table name or a type name (not both) to - * report in the error message, if any. - * - * We assume that functions and views depending on the type are not reasons - * to reject the ALTER. (How safe is this really?) - */ -void find_composite_type_dependencies(Oid typeOid, Relation origRelation, const char* origTypeName) -{ - Relation depRel; - ScanKeyData key[2]; - SysScanDesc depScan; - HeapTuple depTup; - Oid arrayOid; - - /* - * We scan pg_depend to find those things that depend on the rowtype. (We - * assume we can ignore refobjsubid for a rowtype.) - */ - depRel = heap_open(DependRelationId, AccessShareLock); - - ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(TypeRelationId)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(typeOid)); - - depScan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 2, key); - - while (HeapTupleIsValid(depTup = systable_getnext(depScan))) { - Form_pg_depend pg_depend = (Form_pg_depend)GETSTRUCT(depTup); - Relation rel; - Form_pg_attribute att; - - /* Ignore dependees that aren't user columns of relations */ - /* (we assume system columns are never of rowtypes) */ - if (pg_depend->classid != RelationRelationId || pg_depend->objsubid <= 0) - continue; - - rel = relation_open(pg_depend->objid, AccessShareLock); - att = &rel->rd_att->attrs[pg_depend->objsubid - 1]; - - if (rel->rd_rel->relkind == RELKIND_RELATION || rel->rd_rel->relkind == RELKIND_MATVIEW) { - if (origTypeName != NULL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type \"%s\" because column \"%s.%s\" uses it", - origTypeName, - RelationGetRelationName(rel), - NameStr(att->attname)))); - else if (origRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type \"%s\" because column \"%s.%s\" uses it", - RelationGetRelationName(origRelation), - RelationGetRelationName(rel), - NameStr(att->attname)))); - else if (origRelation->rd_rel->relkind == RELKIND_FOREIGN_TABLE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter foreign table \"%s\" because column \"%s.%s\" uses its row type", - RelationGetRelationName(origRelation), - RelationGetRelationName(rel), - NameStr(att->attname)))); - else if (origRelation->rd_rel->relkind == RELKIND_STREAM) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter stream \"%s\" because column \"%s.%s\" uses its row type", - RelationGetRelationName(origRelation), - RelationGetRelationName(rel), - NameStr(att->attname)))); - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter table \"%s\" because column \"%s.%s\" uses its row type", - RelationGetRelationName(origRelation), - RelationGetRelationName(rel), - NameStr(att->attname)))); - } else if (OidIsValid(rel->rd_rel->reltype)) { - /* - * A view or composite type itself isn't a problem, but we must - * recursively check for indirect dependencies via its rowtype. - */ - find_composite_type_dependencies(rel->rd_rel->reltype, origRelation, origTypeName); - } - - relation_close(rel, AccessShareLock); - } - - systable_endscan(depScan); - - relation_close(depRel, AccessShareLock); - - /* - * If there's an array type for the rowtype, must check for uses of it, - * too. - */ - arrayOid = get_array_type(typeOid); - if (OidIsValid(arrayOid)) - find_composite_type_dependencies(arrayOid, origRelation, origTypeName); -} - -/* - * find_typed_table_dependencies - * - * Check to see if a composite type is being used as the type of a - * typed table. Abort if any are found and behavior is RESTRICT. - * Else return the list of tables. - */ -static List* find_typed_table_dependencies(Oid typeOid, const char* typname, DropBehavior behavior) -{ - Relation classRel; - ScanKeyData key[1]; - TableScanDesc scan; - HeapTuple tuple; - List* result = NIL; - - classRel = heap_open(RelationRelationId, AccessShareLock); - - ScanKeyInit(&key[0], Anum_pg_class_reloftype, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(typeOid)); - - scan = tableam_scan_begin(classRel, SnapshotNow, 1, key); - - while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - if (behavior == DROP_RESTRICT) - ereport(ERROR, - (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), - errmsg("cannot alter type \"%s\" because it is the type of a typed table", typname), - errhint("Use ALTER ... CASCADE to alter the typed tables too."))); - else - result = lappend_oid(result, HeapTupleGetOid(tuple)); - } - - tableam_scan_end(scan); - heap_close(classRel, AccessShareLock); - - return result; -} - -/* - * check_of_type - * - * Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it - * isn't suitable, throw an error. Currently, we require that the type - * originated with CREATE TYPE AS. We could support any row type, but doing so - * would require handling a number of extra corner cases in the DDL commands. - */ -void check_of_type(HeapTuple typetuple) -{ - Form_pg_type typ = (Form_pg_type)GETSTRUCT(typetuple); - bool typeOk = false; - - if (typ->typtype == TYPTYPE_COMPOSITE) { - Relation typeRelation; - - Assert(OidIsValid(typ->typrelid)); - typeRelation = relation_open(typ->typrelid, AccessShareLock); - typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE); - - /* - * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing - * the type before the typed table creation/conversion commits. - */ - relation_close(typeRelation, NoLock); - } - if (!typeOk) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("type %s is not a composite type", format_type_be(HeapTupleGetOid(typetuple))))); -} - -/* - * ALTER TABLE ADD COLUMN - * - * Adds an additional attribute to a relation making the assumption that - * CHECK, NOT NULL, and FOREIGN KEY constraints will be removed from the - * AT_AddColumn AlterTableCmd by parse_utilcmd.c and added as independent - * AlterTableCmd's. - * - * ADD COLUMN cannot use the normal ALTER TABLE recursion mechanism, because we - * have to decide at runtime whether to recurse or not depending on whether we - * actually add a column or merely merge with an existing column. (We can't - * check this in a static pre-pass because it won't handle multiple inheritance - * situations correctly.) - */ -static void ATPrepAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, - bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - if (rel->rd_rel->reloftype && !recursing) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot add column to typed table"))); - - ColumnDef* colDef = (ColumnDef*)cmd->def; - - if (RelationIsColStore(rel)) { - int32 typmod = 0; - HeapTuple typeTuple = typenameType(NULL, colDef->typname, &typmod); - Oid typeOid = HeapTupleGetOid(typeTuple); - ReleaseSysCache(typeTuple); - - // check the supported data type and error report if needed. - if (RelationIsCUFormat(rel) && !IsTypeSupportedByCStore(typeOid)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("type \"%s\" is not supported in column store", format_type_with_typemod(typeOid, typmod)))); - } - if (RelationIsPAXFormat(rel) && !IsTypeSupportedByORCRelation(typeOid)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("type \"%s\" is not supported in DFS table.", format_type_with_typemod(typeOid, typmod)))); - } - } else if (RelationIsTsStore(rel)) { - int32 typmod = 0; - HeapTuple typeTuple = typenameType(NULL, colDef->typname, &typmod); - Oid typeOid = HeapTupleGetOid(typeTuple); - ReleaseSysCache(typeTuple); - - // check the supported data type and error report if needed. - if (!IsTypeSupportedByTsStore(colDef->kvtype, typeOid)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("type \"%s\" is not supported in timeseries store", - format_type_with_typemod(typeOid, typmod)))); - } - } - - if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) - ATTypedTableRecursion(wqueue, rel, cmd, lockmode); - - if (recurse) - cmd->subtype = AT_AddColumnRecurse; - - if ((cmd->is_first || cmd->after_name != NULL) && (tab != NULL)) { - tab->rewrite |= AT_REWRITE_ALTER_PERSISTENCE; - } -} - -static bool contain_columndef_walker(Node* node, void* context) -{ - if (node == NULL) - return false; - - if (IsA(node, ColumnRef)) - return true; - - return raw_expression_tree_walker(node, (bool (*)())contain_columndef_walker, context); -} - -static void ATPrepCheckDefault(Node* node) -{ - if (contain_columndef_walker(node, NULL)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - (errmsg("default value cannot reference to a column"), - errhint("Perhaps the default value is enclosed in double quotes")))); - } -} - -static FORCE_INLINE void ATExecAppendDefValExpr(_in_ AttrNumber attnum, _in_ Expr* defval, _out_ AlteredTableInfo* tab, - ColumnDef *colDef, bool is_autoinc, bool is_addloc) -{ - NewColumnValue* newval; - - newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue)); - newval->attnum = attnum; - newval->expr = expression_planner(defval); - newval->is_addloc = is_addloc; - newval->newattnum = is_addloc ? -1 : 0; - newval->generate_attnum = 0; - - tab->newvals = lappend(tab->newvals, newval); - newval->is_generated = (colDef->generatedCol != '\0'); - newval->col_name = pstrdup(colDef->colname); - newval->is_autoinc = is_autoinc; - tab->rewrite |= AT_REWRITE_ALTER_PERSISTENCE; -} - -static int GetAfterColumnAttnum(Oid attrelid, const char *after_name) -{ - int afterattnum = -1; - HeapTuple tuple; - - tuple = SearchSysCacheAttName(attrelid, after_name); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("The %s column of relation %u is not exists.", after_name, attrelid))); - } - - afterattnum = ((Form_pg_attribute)GETSTRUCT(tuple))->attnum + 1; - ReleaseSysCache(tuple); - return afterattnum; -} - -static Node *UpdateVarattnoAfterAddColumn(Node *node, int startattnum, int endattnum, bool is_increase) -{ - if (node == NULL) { - return node; - } - - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - switch (nodeTag(node)) { - case T_Var: { - Var *var = (Var *)node; - Var *new_var = (Var *)copyObject(var); - if (var->varattno >= startattnum && var->varattno <= endattnum) { - new_var->varattno = is_increase ? (var->varattno + 1) : (var->varattno - 1); - new_var->varoattno = is_increase ? (var->varoattno + 1) : (var->varoattno - 1); - } else if (var->varattno == curattnum) { - new_var->varattno = newattnum; - new_var->varoattno = newattnum; - } - return (Node *)new_var; - } - case T_Const: - case T_Param: - case T_Rownum: - case T_CoerceToDomainValue: - case T_CaseTestExpr: { - return node; - } - case T_TypeCast: { - TypeCast *expr = (TypeCast *)node; - TypeCast *newexpr = (TypeCast *)copyObject(expr); - Node *expr_arg = UpdateVarattnoAfterAddColumn(expr->arg, startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_ArrayExpr: { - ArrayExpr *expr = (ArrayExpr *)node; - ArrayExpr *newexpr = (ArrayExpr *)copyObject(expr); - List *expr_elements = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->elements, - startattnum, endattnum, is_increase); - newexpr->elements = expr_elements; - return (Node *)newexpr; - } - case T_FuncExpr: { - FuncExpr *expr = (FuncExpr *)node; - FuncExpr *newexpr = (FuncExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_OpExpr: - case T_DistinctExpr: - case T_NullIfExpr: { - OpExpr *expr = (OpExpr *)node; - OpExpr *newexpr = (OpExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_BoolExpr: { - BoolExpr *expr = (BoolExpr *)node; - BoolExpr *newexpr = (BoolExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_ScalarArrayOpExpr: { - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *)node; - ScalarArrayOpExpr *newexpr = (ScalarArrayOpExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_ArrayRef: { - ArrayRef *expr = (ArrayRef *)node; - ArrayRef *newexpr = (ArrayRef *)copyObject(expr); - List *expr_refupperindexpr = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->refupperindexpr, - startattnum, endattnum, is_increase); - List *expr_reflowerindexpr = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->reflowerindexpr, - startattnum, endattnum, is_increase); - Expr *expr_refexpr = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->refexpr, - startattnum, endattnum, is_increase); - Expr *expr_refassgnexpr = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->refassgnexpr, - startattnum, endattnum, is_increase); - newexpr->refupperindexpr = expr_refupperindexpr; - newexpr->reflowerindexpr = expr_reflowerindexpr; - newexpr->refexpr = expr_refexpr; - newexpr->refassgnexpr = expr_refassgnexpr; - return (Node *)newexpr; - } - case T_RowCompareExpr: { - RowCompareExpr *expr = (RowCompareExpr *)node; - RowCompareExpr *newexpr = (RowCompareExpr *)copyObject(expr); - List *expr_largs = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->largs, - startattnum, endattnum, is_increase); - List *expr_rargs = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->rargs, - startattnum, endattnum, is_increase); - newexpr->largs = expr_largs; - newexpr->rargs = expr_rargs; - return (Node *)newexpr; - } - case T_ConvertRowtypeExpr: { - ConvertRowtypeExpr *expr = (ConvertRowtypeExpr *)node; - ConvertRowtypeExpr *newexpr = (ConvertRowtypeExpr *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_FieldStore: { - FieldStore *expr = (FieldStore *)node; - FieldStore *newexpr = (FieldStore *)node; - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - List *expr_newvals = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->newvals, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - newexpr->newvals = expr_newvals; - return (Node *)newexpr; - } - case T_FieldSelect: { - FieldSelect *expr = (FieldSelect *)node; - FieldSelect *newexpr = (FieldSelect *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_MinMaxExpr: { - MinMaxExpr *expr = (MinMaxExpr *)node; - MinMaxExpr *newexpr = (MinMaxExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_BooleanTest: { - BooleanTest *expr = (BooleanTest *)node; - BooleanTest *newexpr = (BooleanTest *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_RowExpr: { - RowExpr *expr = (RowExpr *)node; - RowExpr *newexpr = (RowExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_XmlExpr: { - XmlExpr *expr = (XmlExpr *)node; - XmlExpr *newExpr = (XmlExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - List *expr_name_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->named_args, - startattnum, endattnum, is_increase); - newExpr->args = expr_args; - newExpr->named_args = expr_name_args; - return (Node *)newExpr; - } - case T_RelabelType: { - RelabelType *expr = (RelabelType *)node; - RelabelType *newexpr =(RelabelType *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_WindowFunc: { - WindowFunc *expr = (WindowFunc *)node; - WindowFunc *newexpr = (WindowFunc *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_ArrayCoerceExpr: { - ArrayCoerceExpr *expr = (ArrayCoerceExpr *)node; - ArrayCoerceExpr *newexpr = (ArrayCoerceExpr *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_PredictByFunction: { - PredictByFunction *expr = (PredictByFunction *)node; - PredictByFunction *newexpr = (PredictByFunction *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->model_args, - startattnum, endattnum, is_increase); - newexpr->model_args = expr_args; - return (Node *)newexpr; - } - case T_NamedArgExpr: { - NamedArgExpr *expr = (NamedArgExpr *)node; - NamedArgExpr *newexpr = (NamedArgExpr *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_CoerceViaIO: { - CoerceViaIO *expr = (CoerceViaIO *)node; - CoerceViaIO *newexpr = (CoerceViaIO *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_CoerceToDomain: { - CoerceToDomain *expr = (CoerceToDomain *)node; - CoerceToDomain *newexpr = (CoerceToDomain *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_CoalesceExpr: { - CoalesceExpr* expr = (CoalesceExpr *)node; - CoalesceExpr* newexpr = (CoalesceExpr *)copyObject(expr); - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - newexpr->args = expr_args; - return (Node *)newexpr; - } - case T_NullTest: { - NullTest *expr = (NullTest *)node; - NullTest *newexpr = (NullTest *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - case T_CaseExpr: { - CaseExpr *expr = (CaseExpr *)node; - CaseExpr *newExpr = (CaseExpr *)copyObject(expr); - - List *expr_args = (List *)UpdateVarattnoAfterAddColumn((Node *)expr->args, - startattnum, endattnum, is_increase); - // case_default - Expr *expr_defresult = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->defresult, - startattnum, endattnum, is_increase); - // case_arg - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newExpr->arg = expr_arg; - newExpr->args = expr_args; - newExpr->defresult = expr_defresult; - return (Node *)newExpr; - } - case T_CaseWhen: { - CaseWhen *expr = (CaseWhen *)node; - CaseWhen *newexpr = (CaseWhen *)copyObject(expr); - Expr *expr_expr = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->expr, - startattnum, endattnum, is_increase); - Expr *expr_result = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->result, - startattnum, endattnum, is_increase); - newexpr->expr = expr_expr; - newexpr->result = expr_result; - return (Node *)newexpr; - } - case T_List: { - List *reslist = NIL; - ListCell *temp = NULL; - - foreach(temp, (List *)node) { - reslist = lappend(reslist, - UpdateVarattnoAfterAddColumn((Node *)lfirst(temp), - startattnum, endattnum, is_increase)); - } - return (Node *)reslist; - } - case T_PrefixKey: { - PrefixKey *expr = (PrefixKey *)node; - PrefixKey *newexpr = (PrefixKey *)copyObject(expr); - Expr *expr_arg = (Expr *)UpdateVarattnoAfterAddColumn((Node *)expr->arg, - startattnum, endattnum, is_increase); - newexpr->arg = expr_arg; - return (Node *)newexpr; - } - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized node type: %d for first|after col_name", (int)nodeTag(node)))); - break; - } - return NULL; -} - -/* - * update pg_description - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgDescriptionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - Relation desc_rel; - HeapTuple desc_tuple; - ScanKeyData key[3]; - SysScanDesc scan; - Form_pg_description desc_form; - - desc_rel = heap_open(DescriptionRelationId, RowExclusiveLock); - - for (int i = (is_increase ? endattnum : startattnum); - (is_increase ? i >= startattnum : i <= endattnum); (is_increase ? i-- : i++)) { - ScanKeyInit(&key[0], Anum_pg_description_objoid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[1], Anum_pg_description_classoid, BTEqualStrategyNumber, F_OIDEQ, RelationRelationId); - ScanKeyInit(&key[2], Anum_pg_description_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(i)); - - scan = systable_beginscan(desc_rel, DescriptionObjIndexId, true, NULL, 3, key); - - while (HeapTupleIsValid(desc_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_description] = { 0 }; - bool nulls[Natts_pg_description] = { 0 }; - bool replaces[Natts_pg_description] = { 0 }; - HeapTuple new_desc_tuple; - - desc_form = (Form_pg_description)GETSTRUCT(desc_tuple); - - values[Anum_pg_description_objsubid - 1] = is_increase ? Int32GetDatum(desc_form->objsubid + 1) : - Int32GetDatum(desc_form->objsubid - 1); - replaces[Anum_pg_description_objsubid - 1] = true; - - new_desc_tuple = heap_modify_tuple(desc_tuple, RelationGetDescr(desc_rel), values, nulls, replaces); - simple_heap_update(desc_rel, &new_desc_tuple->t_self, new_desc_tuple); - CatalogUpdateIndexes(desc_rel, new_desc_tuple); - - heap_freetuple_ext(new_desc_tuple); - } - systable_endscan(scan); - } - - heap_close(desc_rel, RowExclusiveLock); -} - -/* - * update pg_attribute. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgAttributeFirstAfter(Relation attr_rel, Oid attrelid, int startattnum, int endattnum, - bool is_increase) -{ - ScanKeyData key[2]; - HeapTuple attr_tuple; - SysScanDesc scan; - Form_pg_attribute attr_form; - - for (int i = (is_increase ? endattnum : startattnum); - (is_increase ? i >= startattnum : i <= endattnum); (is_increase ? i-- : i++)) { - AttrNumber myattnum = (AttrNumber)i; - ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attrelid)); - ScanKeyInit(&key[1], Anum_pg_attribute_attnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(myattnum)); - - scan = systable_beginscan(attr_rel, AttributeRelidNumIndexId, true, NULL, 2, key); - - /* only one */ - while (HeapTupleIsValid(attr_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_attribute] = { 0 }; - bool nulls[Natts_pg_attribute] = { 0 }; - bool replaces[Natts_pg_attribute] = { 0 }; - errno_t rc = 0; - HeapTuple new_attr_tuple; - char newattname[NAMEDATALEN]; - - attr_form = (Form_pg_attribute)GETSTRUCT(attr_tuple); - - // update pg_attribute_attnum - if (is_increase) { - values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(attr_form->attnum + 1); - replaces[Anum_pg_attribute_attnum - 1] = true; - } else { - values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(attr_form->attnum - 1); - replaces[Anum_pg_attribute_attnum - 1] = true; - } - - // if exists dropped column, update pg_attribute_attname of dropped column - if (attr_form->attisdropped) { - if (is_increase) { - rc = snprintf_s(newattname, sizeof(newattname), - sizeof(newattname) - 1, "........pg.dropped.%d........", attr_form->attnum + 1); - securec_check_ss(rc, "\0", "\0"); - } else { - rc = snprintf_s(newattname, sizeof(newattname), - sizeof(newattname) - 1, "........pg.dropped.%d........", attr_form->attnum - 1); - securec_check_ss(rc, "\0", "\0"); - } - - values[Anum_pg_attribute_attname - 1] = NameGetDatum(newattname); - replaces[Anum_pg_attribute_attname - 1] = true; - } - - new_attr_tuple = heap_modify_tuple(attr_tuple, RelationGetDescr(attr_rel), values, nulls, replaces); - simple_heap_update(attr_rel, &new_attr_tuple->t_self, new_attr_tuple); - CatalogUpdateIndexes(attr_rel, new_attr_tuple); - - heap_freetuple_ext(new_attr_tuple); - } - systable_endscan(scan); - } -} - -/* - * update pg_index. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgIndexFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - Relation index_rel; - HeapTuple index_tuple; - ScanKeyData key; - SysScanDesc scan; - Form_pg_index index_form; - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - /* Prepare to scan pg_index for entries having indrelid = this rel. */ - ScanKeyInit(&key, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - index_rel = heap_open(IndexRelationId, RowExclusiveLock); - scan = systable_beginscan(index_rel, IndexIndrelidIndexId, true, NULL, 1, &key); - - while (HeapTupleIsValid(index_tuple = systable_getnext(scan))) { - int numatts; - bool is_null = false; - Datum values[Natts_pg_index] = { 0 }; - bool nulls[Natts_pg_index] = { 0 }; - bool replaces[Natts_pg_index] = { 0 }; - int2vector *indkey = NULL; - int2vector *new_indkey = NULL; - HeapTuple new_index_tuple; - - index_form = (Form_pg_index)GETSTRUCT(index_tuple); - numatts = index_form->indnatts; - - // update pg_index_indkey - Datum indkey_datum = SysCacheGetAttr(INDEXRELID, index_tuple, Anum_pg_index_indkey, &is_null); - AssertEreport(!is_null, MOD_OPT, ""); - indkey = (int2vector *)DatumGetPointer(indkey_datum); - Assert(indkey->dim1 == numatts); - new_indkey = buildint2vector(NULL, numatts); - for (int i = 0; i < numatts; i++) { - if (indkey->values[i] >= startattnum && indkey->values[i] <= endattnum) { - new_indkey->values[i] = is_increase ? (indkey->values[i] + 1) : (indkey->values[i] - 1); - } else if (indkey->values[i] == curattnum) { - new_indkey->values[i] = newattnum; - } else { - new_indkey->values[i] = indkey->values[i]; - } - } - values[Anum_pg_index_indkey - 1] = PointerGetDatum(new_indkey); - replaces[Anum_pg_index_indkey - 1] = true; - - // udpate pg_index_indexprs - if (!heap_attisnull(index_tuple, Anum_pg_index_indexprs, NULL)) { - Datum exprs_datum; - List *indexprs = NIL; - List *new_indexprs = NIL; - char* exprs_string = NULL; - - exprs_datum = SysCacheGetAttr(INDEXRELID, index_tuple, Anum_pg_index_indexprs, &is_null); - AssertEreport(!is_null, MOD_OPT, ""); - exprs_string = TextDatumGetCString(exprs_datum); - indexprs = (List *)stringToNode(exprs_string); - - new_indexprs = (List *)UpdateVarattnoAfterAddColumn((Node *)indexprs, - startattnum, endattnum, is_increase); - exprs_string = nodeToString(new_indexprs); - values[Anum_pg_index_indexprs - 1] = CStringGetTextDatum(exprs_string); - replaces[Anum_pg_index_indexprs - 1] = true; - pfree_ext(exprs_string); - } - - // update pg_index_indpred - if (!heap_attisnull(index_tuple, Anum_pg_index_indpred, NULL)) { - Datum pred_datum; - List *indpred = NIL; - List *new_indpred = NIL; - char *pred_string = NULL; - - pred_datum = SysCacheGetAttr(INDEXRELID, index_tuple, Anum_pg_index_indpred, &is_null); - AssertEreport(!is_null, MOD_OPT, ""); - pred_string = TextDatumGetCString(pred_datum); - indpred = (List *)stringToNode(pred_string); - - new_indpred = (List *)UpdateVarattnoAfterAddColumn((Node *)indpred, - startattnum, endattnum, is_increase); - pred_string = nodeToString(new_indpred); - values[Anum_pg_index_indpred - 1] = CStringGetTextDatum(pred_string); - replaces[Anum_pg_index_indpred - 1] = true; - pfree_ext(pred_string); - } - - new_index_tuple = heap_modify_tuple(index_tuple, RelationGetDescr(index_rel), values, nulls, replaces); - simple_heap_update(index_rel, &new_index_tuple->t_self, new_index_tuple); - CatalogUpdateIndexes(index_rel, new_index_tuple); - - pfree_ext(new_indkey); - heap_freetuple_ext(new_index_tuple); - } - - systable_endscan(scan); - heap_close(index_rel, RowExclusiveLock); -} - -/* - * update pg_constraint. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgConstraintFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - ScanKeyData key; - HeapTuple con_tuple; - Relation con_rel; - SysScanDesc scan; - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - ScanKeyInit(&key, Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - con_rel = heap_open(ConstraintRelationId, RowExclusiveLock); - scan = systable_beginscan(con_rel, ConstraintRelidIndexId, true, NULL, 1, &key); - - while (HeapTupleIsValid(con_tuple = systable_getnext(scan))) { - bool is_null = false; - ArrayType *conkey_array = NULL; - ArrayType *conincluding_array = NULL; - Datum values[Natts_pg_constraint] = { 0 }; - bool nulls[Natts_pg_constraint] = { 0 }; - bool replaces[Natts_pg_constraint] = { 0 }; - HeapTuple new_con_tuple; - - // update pg_constraint_conkey - Datum conkeyDatum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conkey, &is_null); - if (!is_null) { - ArrayType* con_key_arr = DatumGetArrayTypeP(conkeyDatum); - int con_key_num = ARR_DIMS(con_key_arr)[0]; - int16 *con_key_attnums = (int16 *)ARR_DATA_PTR(con_key_arr); - Datum *conkey = (Datum *)palloc(con_key_num * sizeof(Datum)); - - for (int i = 0; i < con_key_num; i++) { - if (con_key_attnums[i] >= startattnum && con_key_attnums[i] <= endattnum) { - con_key_attnums[i] = is_increase ? (con_key_attnums[i] + 1) : (con_key_attnums[i] - 1); - } else if (con_key_attnums[i] == curattnum) { - con_key_attnums[i] = newattnum; - } - conkey[i] = Int16GetDatum(con_key_attnums[i]); - } - conkey_array = construct_array(conkey, con_key_num, INT2OID, 2, true, 's'); - values[Anum_pg_constraint_conkey - 1] = PointerGetDatum(conkey_array); - replaces[Anum_pg_constraint_conkey - 1] = true; - } - - // update pg_constraint_conincluding - Datum con_including_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conincluding, &is_null); - if (!is_null) { - ArrayType* con_including_arr = DatumGetArrayTypeP(con_including_datum); - int con_including_num = ARR_DIMS(con_including_arr)[0]; - int16* con_including_attnums = (int16 *)ARR_DATA_PTR(con_including_arr); - Datum* conincluding = (Datum *)palloc(con_including_num * sizeof(Datum)); - - for (int i = 0; i < con_including_num; i++) { - if (con_including_attnums[i] >= startattnum && con_including_attnums[i] <= endattnum) { - con_including_attnums[i] = is_increase ? - (con_including_attnums[i] + 1) : (con_including_attnums[i] - 1); - } else if (con_including_attnums[i] == curattnum) { - con_including_attnums[i] = newattnum; - } - conincluding[i] = Int16GetDatum(con_including_attnums[i]); - } - conincluding_array = construct_array(conincluding, con_including_num, INT2OID, 2, true, 's'); - values[Anum_pg_constraint_conincluding - 1] = PointerGetDatum(conincluding_array); - replaces[Anum_pg_constraint_conincluding - 1] = true; - } - - // update pg_constraint_conbin - Datum conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_conbin, &is_null); - if (!is_null) { - char *conbin_string = NULL; - Node *conbin = NULL; - Node *new_conbin = NULL; - - conbin_string = TextDatumGetCString(conbin_datum); - conbin = (Node*)stringToNode(conbin_string); - - new_conbin = UpdateVarattnoAfterAddColumn(conbin, startattnum, endattnum, is_increase); - conbin_string = nodeToString(new_conbin); - values[Anum_pg_constraint_conbin - 1] = CStringGetTextDatum(conbin_string); - replaces[Anum_pg_constraint_conbin - 1] = true; - pfree_ext(conbin_string); - } - - new_con_tuple = heap_modify_tuple(con_tuple, RelationGetDescr(con_rel), values, nulls, replaces); - simple_heap_update(con_rel, &new_con_tuple->t_self, new_con_tuple); - CatalogUpdateIndexes(con_rel, new_con_tuple); - - pfree_ext(conkey_array); - pfree_ext(conincluding_array); - heap_freetuple_ext(new_con_tuple); - } - - systable_endscan(scan); - heap_close(con_rel, RowExclusiveLock); -} - -/* - * update pg_constraint confkey. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgConstraintConfkeyFirstAfter(Relation rel, int startattnum, int endattnum, - bool is_increase) -{ - ScanKeyData key; - HeapTuple con_tuple; - Relation con_rel; - SysScanDesc scan; - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - ScanKeyInit(&key, Anum_pg_constraint_confrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - con_rel = heap_open(ConstraintRelationId, RowExclusiveLock); - scan = systable_beginscan(con_rel, InvalidOid, false, NULL, 1, &key); - - while (HeapTupleIsValid(con_tuple = systable_getnext(scan))) { - bool is_null = false; - ArrayType* confkey_array = NULL; - Datum values[Natts_pg_constraint] = { 0 }; - bool nulls[Natts_pg_constraint] = { 0 }; - bool replaces[Natts_pg_constraint] = { 0 }; - HeapTuple new_con_tuple; - - // update pg_constraint_confkey - Datum confkey_datum = SysCacheGetAttr(CONSTROID, con_tuple, Anum_pg_constraint_confkey, &is_null); - if (!is_null) { - ArrayType* conf_key_rr = DatumGetArrayTypeP(confkey_datum); - int confkey_num = ARR_DIMS(conf_key_rr)[0]; - int16 *confkey_attnums = (int16 *)ARR_DATA_PTR(conf_key_rr); - Datum *confkey = (Datum *)palloc(confkey_num * sizeof(Datum)); - - for (int i = 0; i < confkey_num; i++) { - if (confkey_attnums[i] >= startattnum && confkey_attnums[i] <= endattnum) { - confkey_attnums[i] = is_increase ? (confkey_attnums[i] + 1) : (confkey_attnums[i] - 1); - } else if (confkey_attnums[i] == curattnum) { - confkey_attnums[i] = newattnum; - } - confkey[i] = Int16GetDatum(confkey_attnums[i]); - } - confkey_array = construct_array(confkey, confkey_num, INT2OID, 2, true, 's'); - values[Anum_pg_constraint_confkey - 1] = PointerGetDatum(confkey_array); - replaces[Anum_pg_constraint_confkey - 1] = true; - } - - new_con_tuple = heap_modify_tuple(con_tuple, RelationGetDescr(con_rel), values, nulls, replaces); - simple_heap_update(con_rel, &new_con_tuple->t_self, new_con_tuple); - CatalogUpdateIndexes(con_rel, new_con_tuple); - - pfree_ext(confkey_array); - heap_freetuple_ext(new_con_tuple); - } - - systable_endscan(scan); - heap_close(con_rel, RowExclusiveLock); -} - -/* - * update generated column information for pg_attrdef. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdateGenerateColFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - ScanKeyData key; - HeapTuple def_tuple; - Relation def_rel; - SysScanDesc scan; - - def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock); - ScanKeyInit(&key, Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(def_rel, AttrDefaultIndexId, true, NULL, 1, &key); - while (HeapTupleIsValid(def_tuple = systable_getnext(scan))) { - bool is_null = false; - char generated_col = '\0'; - Datum values[Natts_pg_attrdef] = { 0 }; - bool nulls[Natts_pg_attrdef] = { 0 }; - bool replaces[Natts_pg_attrdef] = { 0 }; - HeapTuple new_def_tuple; - - Datum adgencol = fastgetattr(def_tuple, Anum_pg_attrdef_adgencol, def_rel->rd_att, &is_null); - if (!is_null) { - generated_col = DatumGetChar(adgencol); - } - - // update pg_attrdef_adbin - if (generated_col == ATTRIBUTE_GENERATED_STORED) { - Datum adbin_datum; - Node *adbin = NULL; - Node *new_adbin = NULL; - char *adbin_string = NULL; - - adbin_datum = fastgetattr(def_tuple, Anum_pg_attrdef_adbin, def_rel->rd_att, &is_null); - AssertEreport(!is_null, MOD_OPT, ""); - adbin_string = TextDatumGetCString(adbin_datum); - adbin = (Node *)stringToNode(adbin_string); - - new_adbin = UpdateVarattnoAfterAddColumn(adbin, startattnum, endattnum, is_increase); - adbin_string = nodeToString(new_adbin); - values[Anum_pg_attrdef_adbin - 1] = CStringGetTextDatum(adbin_string); - replaces[Anum_pg_attrdef_adbin - 1] = true; - pfree_ext(adbin_string); - } else { - continue; - } - - new_def_tuple = heap_modify_tuple(def_tuple, RelationGetDescr(def_rel), values, nulls, replaces); - simple_heap_update(def_rel, &new_def_tuple->t_self, new_def_tuple); - CatalogUpdateIndexes(def_rel, new_def_tuple); - - heap_freetuple_ext(new_def_tuple); - } - systable_endscan(scan); - heap_close(def_rel, RowExclusiveLock); -} - - -/* - * update the exists index information. - * 1. add column with first or after col_name. - */ -static void UpdateIndexFirstAfter(Relation rel) -{ - Relation pg_index_rel, table_index_rel; - HeapTuple index_tuple; - ScanKeyData key; - SysScanDesc scan; - Form_pg_index index_form; - - /* Prepare to scan pg_index for entries having indrelid = this rel. */ - ScanKeyInit(&key, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - pg_index_rel = heap_open(IndexRelationId, RowExclusiveLock); - scan = systable_beginscan(pg_index_rel, IndexIndrelidIndexId, true, NULL, 1, &key); - - while (HeapTupleIsValid(index_tuple = systable_getnext(scan))) { - index_form = (Form_pg_index)GETSTRUCT(index_tuple); - - table_index_rel = index_open(index_form->indexrelid, RowExclusiveLock); - - table_index_rel->rd_index = index_form; - - index_close(table_index_rel, RowExclusiveLock); - } - systable_endscan(scan); - heap_close(pg_index_rel, RowExclusiveLock); -} - -/* - * update pg_attrdef. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgAttrdefFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - ScanKeyData key[2]; - HeapTuple def_tuple; - Relation def_rel; - SysScanDesc scan; - Form_pg_attrdef def_form; - - def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock); - - for (int i = (is_increase ? endattnum : startattnum); - (is_increase ? i >= startattnum : i <= endattnum); (is_increase ? i-- : i++)) { - AttrNumber myattnum = (AttrNumber)i; - ScanKeyInit(&key[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(myattnum)); - - scan = systable_beginscan(def_rel, AttrDefaultIndexId, true, NULL, 2, key); - - // only one - while (HeapTupleIsValid(def_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_attrdef] = { 0 }; - bool nulls[Natts_pg_attrdef] = { 0 }; - bool replaces[Natts_pg_attrdef] = { 0 }; - HeapTuple new_def_tuple; - - def_form = (Form_pg_attrdef)GETSTRUCT(def_tuple); - - values[Anum_pg_attrdef_adnum - 1] = is_increase ? Int16GetDatum(def_form->adnum + 1) : - Int16GetDatum(def_form->adnum - 1); - replaces[Anum_pg_attrdef_adnum - 1] = true; - - new_def_tuple = heap_modify_tuple(def_tuple, RelationGetDescr(def_rel), values, nulls, replaces); - simple_heap_update(def_rel, &new_def_tuple->t_self, new_def_tuple); - CatalogUpdateIndexes(def_rel, new_def_tuple); - - heap_freetuple_ext(new_def_tuple); - } - systable_endscan(scan); - } - heap_close(def_rel, RowExclusiveLock); -} - -/* - * update pg_depend. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgDependFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - ScanKeyData key[2]; - HeapTuple dep_tuple; - Relation dep_rel; - SysScanDesc scan; - Form_pg_depend dep_form; - - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - dep_rel = heap_open(DependRelationId, RowExclusiveLock); - - // find pg_depend based on refobjid and refobjsubid, then update refobjsubid - ScanKeyInit(&key[0], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[1], Anum_pg_depend_refobjsubid, BTGreaterStrategyNumber, F_INT4GT, Int32GetDatum(0)); - - scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, 2, key); - while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_depend] = { 0 }; - bool nulls[Natts_pg_depend] = { 0 }; - bool replaces[Natts_pg_depend] = { 0 }; - HeapTuple new_dep_tuple; - - dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple); - - if (dep_form->refobjsubid >= startattnum && dep_form->refobjsubid <= endattnum) { - values[Anum_pg_depend_refobjsubid - 1] = is_increase ? - Int32GetDatum(dep_form->refobjsubid + 1) : Int32GetDatum(dep_form->refobjsubid - 1); - replaces[Anum_pg_depend_refobjsubid - 1] = true; - } else if (dep_form->refobjsubid == curattnum) { - values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newattnum); - replaces[Anum_pg_depend_refobjsubid - 1] = true; - } - - new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(dep_rel), values, nulls, replaces); - simple_heap_update(dep_rel, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(dep_rel, new_dep_tuple); - - heap_freetuple_ext(new_dep_tuple); - } - systable_endscan(scan); - heap_close(dep_rel, RowExclusiveLock); - - CommandCounterIncrement(); - - dep_rel = heap_open(DependRelationId, RowExclusiveLock); - - // find pg_depend based on objid and objsubid, then update objsubid - ScanKeyInit(&key[0], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[1], Anum_pg_depend_objsubid, BTGreaterStrategyNumber, F_INT4GT, Int32GetDatum(0)); - - scan = systable_beginscan(dep_rel, DependDependerIndexId, true, NULL, 2, key); - while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_depend] = { 0 }; - bool nulls[Natts_pg_depend] = { 0 }; - bool replaces[Natts_pg_depend] = { 0 }; - HeapTuple new_dep_tuple; - - dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple); - - /* the situation has been updated in sqlcmd_update_depend_refobjsubid_first_after */ - if (dep_form->refobjsubid == -1 && dep_form->refobjid == RelationGetRelid(rel)) { - continue; - } - - if (dep_form->objsubid >= startattnum && dep_form->objsubid <= endattnum) { - values[Anum_pg_depend_objsubid - 1] = is_increase ? Int32GetDatum(dep_form->objsubid + 1) : - Int32GetDatum(dep_form->objsubid - 1); - replaces[Anum_pg_depend_objsubid - 1] = true; - } else if (dep_form->objsubid == curattnum) { - values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(newattnum); - replaces[Anum_pg_depend_objsubid - 1] = true; - } - - new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(dep_rel), values, nulls, replaces); - simple_heap_update(dep_rel, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(dep_rel, new_dep_tuple); - - heap_freetuple_ext(new_dep_tuple); - } - systable_endscan(scan); - heap_close(dep_rel, RowExclusiveLock); -} - -/* - * update pg_partition. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase, - bool is_modified, bool *has_partition) -{ - ScanKeyData key; - HeapTuple par_tuple; - Relation par_rel; - SysScanDesc scan; - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - par_rel = heap_open(PartitionRelationId, RowExclusiveLock); - - ScanKeyInit(&key, Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - - scan = systable_beginscan(par_rel, PartitionParentOidIndexId, true, NULL, 1, &key); - while (HeapTupleIsValid(par_tuple = systable_getnext(scan))) { - bool is_null = false; - - // update pg_partition_partkey - Datum partkey_datum = SysCacheGetAttr(PARTRELID, par_tuple, Anum_pg_partition_partkey, &is_null); - if (!is_null) { - Datum values[Natts_pg_partition] = { 0 }; - bool nulls[Natts_pg_partition] = { 0 }; - bool replaces[Natts_pg_partition] = { 0 }; - int2vector *partkey = NULL; - int2vector *new_partKey = NULL; - HeapTuple new_par_tuple; - - partkey = (int2vector *)DatumGetPointer(partkey_datum); - new_partKey = buildint2vector(NULL, partkey->dim1); - for (int i = 0; i < partkey->dim1; i++) { - if (partkey->values[i] >= startattnum && partkey->values[i] <= endattnum) { - new_partKey->values[i] = is_increase ? (partkey->values[i] + 1) : (partkey->values[i] - 1); - } else if (partkey->values[i] == curattnum) { - if (is_modified) { - if (has_partition != NULL) { - *has_partition = true; - } - new_partKey->values[i] = 0; - } else { - new_partKey->values[i] = newattnum; - } - } else { - new_partKey->values[i] = partkey->values[i]; - } - } - values[Anum_pg_partition_partkey - 1] = PointerGetDatum(new_partKey); - replaces[Anum_pg_partition_partkey - 1] = true; - - new_par_tuple = heap_modify_tuple(par_tuple, RelationGetDescr(par_rel), values, nulls, replaces); - simple_heap_update(par_rel, &new_par_tuple->t_self, new_par_tuple); - CatalogUpdateIndexes(par_rel, new_par_tuple); - - pfree_ext(new_partKey); - heap_freetuple_ext(new_par_tuple); - } - } - systable_endscan(scan); - heap_close(par_rel, RowExclusiveLock); -} - - -static ViewInfoForAdd *GetViewInfoFirstAfter(const char *rel_name, Oid objid, bool keep_star) -{ - ScanKeyData entry; - ViewInfoForAdd *info = NULL; - - ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid)); - - Relation rewrite_rel = heap_open(RewriteRelationId, AccessShareLock); - - SysScanDesc rewrite_scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry); - - HeapTuple rewrite_tup = systable_getnext(rewrite_scan); - - if (HeapTupleIsValid(rewrite_tup)) { - Form_pg_rewrite rewrite_form = (Form_pg_rewrite)GETSTRUCT(rewrite_tup); - - if (strcmp(NameStr(rewrite_form->rulename), ViewSelectRuleName) == 0) { - bool is_null = false; - - Datum ev_actiom_datum = fastgetattr(rewrite_tup, Anum_pg_rewrite_ev_action, rewrite_rel->rd_att, &is_null); - if (!is_null) { - StringInfoData buf; - - initStringInfo(&buf); - - Relation ev_relation = heap_open(rewrite_form->ev_class, AccessShareLock); - char *ev_action_string = TextDatumGetCString(ev_actiom_datum); - List *ev_action = (List *)stringToNode(ev_action_string); - Query* query = (Query*)linitial(ev_action); - - get_query_def(query, - &buf, - NIL, - RelationGetDescr(ev_relation), - 0, - -1, - 0, -#ifdef PGXC - false, - false, - NULL, -#endif /* PGXC */ - false, - keep_star); - appendStringInfo(&buf, ";"); - - info = (ViewInfoForAdd *)palloc0(sizeof(ViewInfoForAdd)); - info->ev_class = rewrite_form->ev_class; - info->query_string = pstrdup(buf.data); - - heap_close(ev_relation, AccessShareLock); - - FreeStringInfo(&buf); - pfree_ext(ev_action_string); - } - } else { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-supported feature"), - errdetail("rule %s depend on %s, alter table %s add ... first|after colname is not supported", - NameStr(rewrite_form->rulename), rel_name, rel_name))); - } - } - systable_endscan(rewrite_scan); - heap_close(rewrite_rel, AccessShareLock); - - return info; -} - -/* - * get sql for view. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static List *CheckPgRewriteFirstAfter(Relation rel) -{ - ScanKeyData key[2]; - HeapTuple dep_tuple; - SysScanDesc dep_scan; - Form_pg_depend dep_form; - Oid pre_objid = 0; - List *query_str = NIL; - - Relation dep_rel = heap_open(DependRelationId, AccessShareLock); - - ScanKeyInit( - &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit( - &key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - - dep_scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, 2, key); - - while (HeapTupleIsValid(dep_tuple = systable_getnext(dep_scan))) { - dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple); - - if (dep_form->classid == RewriteRelationId) { - ListCell* viewinfo = NULL; - bool is_exist = false; - - if (dep_form->objid == pre_objid) { - continue; - } - - pre_objid = dep_form->objid; - - ViewInfoForAdd *info = GetViewInfoFirstAfter(NameStr(rel->rd_rel->relname), dep_form->objid); - - foreach (viewinfo, query_str) { - ViewInfoForAdd *oldInfo = (ViewInfoForAdd *)lfirst(viewinfo); - if (info != NULL && oldInfo->ev_class == info->ev_class) { - is_exist = true; - break; - } - } - - if (info != NULL && !is_exist) { - query_str = lappend(query_str, info); - } - } - } - systable_endscan(dep_scan); - heap_close(dep_rel, AccessShareLock); - return query_str; -} - -/* - * create or replace view when the table has view. - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void ReplaceViewQueryFirstAfter(List *query_str) -{ - if (query_str != NIL) { - ListCell* viewinfo = NULL; - - foreach (viewinfo, query_str) { - Query* query = NULL; - List* parsetree_list = NULL; - Node* parsetree = NULL; - - ViewInfoForAdd *info = (ViewInfoForAdd *)lfirst(viewinfo); - parsetree_list = pg_parse_query(info->query_string); - if (list_length(parsetree_list) != 1) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("this is not a view"))); - } - - parsetree = (Node *)linitial(parsetree_list); - query = parse_analyze(parsetree, info->query_string, NULL, 0); - StoreViewQuery(info->ev_class, query, true); - } - } -} - -/* - * update pg_trigger - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - if (!rel->rd_rel->relhastriggers) { - return; - } - ScanKeyData key; - HeapTuple tri_tuple; - Relation tri_rel; - SysScanDesc scan; - int curattnum = is_increase ? endattnum + 1 : startattnum - 1; - int newattnum = is_increase ? startattnum : endattnum; - - tri_rel = heap_open(TriggerRelationId, RowExclusiveLock); - - ScanKeyInit(&key, Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - - scan = systable_beginscan(tri_rel, TriggerRelidNameIndexId, true, NULL, 1, &key); - while (HeapTupleIsValid(tri_tuple = systable_getnext(scan))) { - bool is_null = false; - Datum values[Natts_pg_trigger] = { 0 }; - bool nulls[Natts_pg_trigger] = { 0 }; - bool replaces[Natts_pg_trigger] = { 0 }; - HeapTuple new_tri_tuple; - - Datum tgattr_datum = fastgetattr(tri_tuple, Anum_pg_trigger_tgattr, tri_rel->rd_att, &is_null); - if (!is_null) { - int2vector *tgattr = (int2vector *)DatumGetPointer(tgattr_datum); - int2vector *new_tgattr = buildint2vector(NULL, tgattr->dim1); - for (int i = 0; i < tgattr->dim1; i++) { - if (tgattr->values[i] >= startattnum && tgattr->values[i] <= endattnum) { - new_tgattr->values[i] = is_increase ? (tgattr->values[i] + 1) : (tgattr->values[i] - 1); - } else if (tgattr->values[i] == curattnum) { - new_tgattr->values[i] = newattnum; - } else { - new_tgattr->values[i] = tgattr->values[i]; - } - } - - values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(new_tgattr); - replaces[Anum_pg_trigger_tgattr - 1] = true; - } - - Datum tgqual_datum = fastgetattr(tri_tuple, Anum_pg_trigger_tgqual, tri_rel->rd_att, &is_null); - if (!is_null) { - char *tgqual_string = NULL; - Node *tgqual = NULL; - Node *new_tgqual = NULL; - - tgqual_string = TextDatumGetCString(tgqual_datum); - tgqual = (Node *)stringToNode(tgqual_string); - - new_tgqual = UpdateVarattnoAfterAddColumn(tgqual, startattnum, endattnum, is_increase); - tgqual_string = nodeToString(new_tgqual); - values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(tgqual_string); - replaces[Anum_pg_trigger_tgqual - 1] = true; - pfree_ext(tgqual_string); - } - - new_tri_tuple = heap_modify_tuple(tri_tuple, RelationGetDescr(tri_rel), values, nulls, replaces); - simple_heap_update(tri_rel, &new_tri_tuple->t_self, new_tri_tuple); - CatalogUpdateIndexes(tri_rel, new_tri_tuple); - } - - systable_endscan(scan); - heap_close(tri_rel, RowExclusiveLock); -} - -/* - * update pg_rlspolicy - * 1. add column with first or after col_name. - * 2. modify column to first or after column. - */ -static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) -{ - ScanKeyData key; - HeapTuple rls_tuple; - Relation rls_rel; - SysScanDesc scan; - - rls_rel = heap_open(RlsPolicyRelationId, RowExclusiveLock); - - ScanKeyInit(&key, Anum_pg_rlspolicy_polrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - - scan = systable_beginscan(rls_rel, PgRlspolicyPolrelidPolnameIndex, true, NULL, 1, &key); - while (HeapTupleIsValid(rls_tuple = systable_getnext(scan))) { - bool is_null = false; - Datum values[Natts_pg_rlspolicy] = { 0 }; - bool nulls[Natts_pg_rlspolicy] = { 0 }; - bool replaces[Natts_pg_rlspolicy] = { 0 }; - HeapTuple new_rls_tuple; - - Datum polqual_datum = heap_getattr(rls_tuple, Anum_pg_rlspolicy_polqual, rls_rel->rd_att, &is_null); - if (!is_null) { - char *polqual_string = NULL; - Node *polqual = NULL; - Node *new_polqual = NULL; - - polqual_string = TextDatumGetCString(polqual_datum); - polqual = (Node *)stringToNode(polqual_string); - - new_polqual = UpdateVarattnoAfterAddColumn(polqual, startattnum, endattnum, is_increase); - polqual_string = nodeToString(new_polqual); - values[Anum_pg_rlspolicy_polqual - 1] = CStringGetTextDatum(polqual_string); - replaces[Anum_pg_rlspolicy_polqual - 1] = true; - pfree_ext(polqual_string); - } - - new_rls_tuple = heap_modify_tuple(rls_tuple, RelationGetDescr(rls_rel), values, nulls, replaces); - simple_heap_update(rls_rel, &new_rls_tuple->t_self, new_rls_tuple); - CatalogUpdateIndexes(rls_rel, new_rls_tuple); - } - - systable_endscan(scan); - heap_close(rls_rel, RowExclusiveLock); -} - -#ifdef ENABLE_MOT -static void ATExecMOTAlterTable(AlterForeingTableCmd* cmd) -{ - FdwRoutine* fdwroutine; - - if (cmd->rel->rd_fdwroutine != nullptr) { - fdwroutine = cmd->rel->rd_fdwroutine; - } else { - fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(cmd->rel)); - } - - if (fdwroutine->ValidateTableDef != nullptr) { - fdwroutine->ValidateTableDef((Node*)cmd); - } -} -#endif - -static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, ColumnDef* colDef, bool isOid, - bool recurse, bool recursing, bool is_first, char *after_name, LOCKMODE lockmode) -{ - Oid myrelid = RelationGetRelid(rel); - Relation pgclass = NULL; - Relation attrdesc = NULL; - Relation cedesc = NULL; - HeapTuple reltup = NULL; - FormData_pg_attribute attribute; - int newattnum = 0; - int currattnum = 0; - char relkind; - HeapTuple typeTuple; - Oid typeOid = InvalidOid; - int32 typmod = -1; - Oid collOid = InvalidOid; - Form_pg_type tform = NULL; - Expr* defval = NULL; - List* children = NIL; - ListCell* child = NULL; - AclResult aclresult; - bool isDfsTable = RelationIsPAXFormat(rel); - ObjectAddress address; - bool is_addloc = is_first || after_name != NULL; - List *query_str = NIL; - - /* At top level, permission check was done in ATPrepCmd, else do it */ - if (recursing) - ATSimplePermissions(rel, ATT_TABLE); - - attrdesc = heap_open(AttributeRelationId, RowExclusiveLock); - - if (is_addloc) { - if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-supported feature"), - errdetail("foreign table is not supported for add column first|after columnName"))); - } - - if (RelationIsColumnFormat(rel)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-supported feature"), - errdetail("column orientated table is not supported for add column first|after columnName"))); - } - } - - /* - * if adding encrypted column - */ - CeHeapInfo* ceHeapInfo = NULL; - if (colDef->clientLogicColumnRef != NULL) { - if (is_addloc) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-supported feature"), - errdetail("encryption column is not supported for add column first|after columnName"))); - } - if (colDef->clientLogicColumnRef != NULL) { - ceHeapInfo = (CeHeapInfo *)palloc(sizeof(CeHeapInfo)); - process_encrypted_columns(colDef, ceHeapInfo); - } - cedesc = heap_open(ClientLogicCachedColumnsId, RowExclusiveLock); - } - - /* - * Are we adding the column to a recursion child? If so, check whether to - * merge with an existing definition for the column. If we do merge, we - * must not recurse. Children will already have the column, and recursing - * into them would mess up attinhcount. - */ - if (colDef->inhcount > 0) { - HeapTuple tuple; - - /* Does child already have a column by this name? */ - tuple = SearchSysCacheCopyAttName(myrelid, colDef->colname); - if (HeapTupleIsValid(tuple)) { - Form_pg_attribute childatt = (Form_pg_attribute)GETSTRUCT(tuple); - Oid ctypeId = InvalidOid; - int32 ctypmod = -1; - Oid ccollid = InvalidOid; - - /* Child column must match on type, typmod, and collation */ - typenameTypeIdAndMod(NULL, colDef->typname, &ctypeId, &ctypmod); - if (ctypeId != childatt->atttypid || ctypmod != childatt->atttypmod) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("child table \"%s\" has different type for column \"%s\"", - RelationGetRelationName(rel), - colDef->colname))); - ccollid = GetColumnDefCollation(NULL, colDef, ctypeId); - if (ccollid != childatt->attcollation) - ereport(ERROR, - (errcode(ERRCODE_COLLATION_MISMATCH), - errmsg("child table \"%s\" has different collation for column \"%s\"", - RelationGetRelationName(rel), - colDef->colname), - errdetail("\"%s\" versus \"%s\"", - get_collation_name(ccollid), - get_collation_name(childatt->attcollation)))); - - /* If it's OID, child column must actually be OID */ - if (isOid && childatt->attnum != ObjectIdAttributeNumber) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("child table \"%s\" has a conflicting \"%s\" column", - RelationGetRelationName(rel), - colDef->colname))); - - /* Bump the existing child att's inhcount */ - childatt->attinhcount++; - simple_heap_update(attrdesc, &tuple->t_self, tuple); - CatalogUpdateIndexes(attrdesc, tuple); - - tableam_tops_free_tuple(tuple); - - /* Inform the user about the merge */ - ereport(NOTICE, - (errmsg("merging definition of column \"%s\" for child \"%s\"", - colDef->colname, - RelationGetRelationName(rel)))); - - heap_close(attrdesc, RowExclusiveLock); - return InvalidObjectAddress; - } - } - - pgclass = heap_open(RelationRelationId, RowExclusiveLock); - - reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(myrelid)); - if (!HeapTupleIsValid(reltup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", myrelid))); - } - relkind = ((Form_pg_class)GETSTRUCT(reltup))->relkind; - - /* new name should not already exist */ - check_for_column_name_collision(rel, colDef->colname); - - /* Determine the new attribute's number */ - if (isOid) { - newattnum = ObjectIdAttributeNumber; - } else { - currattnum = ((Form_pg_class)GETSTRUCT(reltup))->relnatts; - if (currattnum + 1 > MaxHeapAttributeNumber) { - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_COLUMNS), - errmsg("tables can have at most %d columns", MaxHeapAttributeNumber))); - } - if (is_first) { - newattnum = 1; - } else if (after_name != NULL) { - newattnum = GetAfterColumnAttnum(myrelid, after_name); - } else { - newattnum = currattnum + 1; - } - } - - typeTuple = typenameType(NULL, colDef->typname, &typmod); - tform = (Form_pg_type)GETSTRUCT(typeTuple); - typeOid = HeapTupleGetOid(typeTuple); - - /* And the collation */ - Oid rel_coll_oid = rel->rd_options == NULL ? InvalidOid : ((StdRdOptions*)(rel)->rd_options)->collate; - collOid = GetColumnDefCollation(NULL, colDef, typeOid, rel_coll_oid); - if (DB_IS_CMPT(B_FORMAT)) { - typeOid = binary_need_transform_typeid(typeOid, &collOid); - if (RelationIsColStore(rel) || RelationIsTsStore(rel)) { - check_unsupported_charset_for_column(collOid, colDef->colname); - } - } - - aclresult = pg_type_aclcheck(typeOid, GetUserId(), ACL_USAGE); - if (aclresult != ACLCHECK_OK) - aclcheck_error_type(aclresult, typeOid); - - /* make sure datatype is legal for a column */ - CheckAttributeType(colDef->colname, typeOid, collOid, list_make1_oid(rel->rd_rel->reltype), false); - -#ifdef ENABLE_MOT - if (relkind == RELKIND_FOREIGN_TABLE && isMOTFromTblOid(RelationGetRelid(rel))) { - DetermineColumnCollationForMOTTable(&collOid); - } -#endif - - bool isDelta = RELATION_IS_DELTA(rel); - - /* construct new attribute's pg_attribute entry */ - attribute.attrelid = myrelid; - (void)namestrcpy(&(attribute.attname), colDef->colname); - attribute.atttypid = typeOid; - attribute.attstattarget = (newattnum > 0) ? -1 : 0; - attribute.attlen = tform->typlen; - attribute.attcacheoff = -1; - attribute.atttypmod = typmod; - attribute.attnum = newattnum; - attribute.attbyval = tform->typbyval; - attribute.attndims = list_length(colDef->typname->arrayBounds); - attribute.attstorage = tform->typstorage; - attribute.attalign = tform->typalign; - attribute.attnotnull = colDef->is_not_null; - attribute.atthasdef = false; - attribute.attisdropped = false; - attribute.attislocal = colDef->is_local; - attribute.attkvtype = colDef->kvtype; - if (!isDelta) { - VerifyAttrCompressMode(colDef->cmprs_mode, attribute.attlen, colDef->colname); - attribute.attcmprmode = colDef->cmprs_mode; - } else { - attribute.attcmprmode = ATT_CMPR_NOCOMPRESS; - } - attribute.attinhcount = colDef->inhcount; - attribute.attcollation = collOid; - /* attribute.attacl is handled by InsertPgAttributeTuple */ - ReleaseSysCache(typeTuple); - - if (!isDelta && RelationIsRowFormat(rel) && ATT_CMPR_NOCOMPRESS < colDef->cmprs_mode - && colDef->cmprs_mode <= ATT_CMPR_NUMSTR) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("row-oriented table does not support compression"))); - } - - if (is_addloc) { - UpdatePgAttributeFirstAfter(attrdesc, myrelid, newattnum, currattnum, true); - UpdatePgDescriptionFirstAfter(rel, newattnum, currattnum, true); - UpdatePgIndexFirstAfter(rel, newattnum, currattnum, true); - UpdatePgConstraintFirstAfter(rel, newattnum, currattnum, true); - UpdatePgConstraintConfkeyFirstAfter(rel, newattnum, currattnum, true); - UpdatePgAttrdefFirstAfter(rel, newattnum, currattnum, true); - UpdatePgPartitionFirstAfter(rel, newattnum, currattnum, true, false, NULL); - UpdatePgTriggerFirstAfter(rel, newattnum, currattnum, true); - UpdatePgRlspolicyFirstAfter(rel, newattnum, currattnum, true); - query_str = CheckPgRewriteFirstAfter(rel); - tab->rewrite |= AT_REWRITE_ALTER_PERSISTENCE; - tab->is_first_after = true; - } - - InsertPgAttributeTuple(attrdesc, &attribute, NULL); - - heap_close(attrdesc, RowExclusiveLock); - - if (colDef->clientLogicColumnRef != NULL) { - ceHeapInfo->attnum = newattnum; - insert_gs_sec_encrypted_column_tuple(ceHeapInfo, cedesc, myrelid, NULL); - heap_close(cedesc, RowExclusiveLock); - } - - /* - * Update pg_class tuple as appropriate - */ - if (isOid) - ((Form_pg_class)GETSTRUCT(reltup))->relhasoids = true; - else - ((Form_pg_class)GETSTRUCT(reltup))->relnatts = currattnum + 1; - - simple_heap_update(pgclass, &reltup->t_self, reltup); - - /* keep catalog indexes current */ - CatalogUpdateIndexes(pgclass, reltup); - - tableam_tops_free_tuple(reltup); - - /* Post creation hook for new attribute */ - InvokeObjectAccessHook(OAT_POST_CREATE, RelationRelationId, myrelid, newattnum, NULL); - - heap_close(pgclass, RowExclusiveLock); - - /* Make the attribute's catalog entry visible */ - CommandCounterIncrement(); - - if (is_addloc) { - UpdatePgDependFirstAfter(rel, newattnum, currattnum, true); - UpdateGenerateColFirstAfter(rel, newattnum, currattnum, true); - UpdateIndexFirstAfter(rel); - - /* create or replace view */ - ReplaceViewQueryFirstAfter(query_str); - } - - /* - * Store the DEFAULT, if any, in the catalogs - */ - if (colDef->raw_default) { - RawColumnDefault* rawEnt = NULL; - - if (relkind == RELKIND_FOREIGN_TABLE) { -#ifdef ENABLE_MOT - if (!isMOTFromTblOid(RelationGetRelid(rel)) && !isPostgresFDWFromTblOid(RelationGetRelid(rel))) { -#else - if (!isPostgresFDWFromTblOid(RelationGetRelid(rel))) { -#endif - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s on foreign tables are not supported", - colDef->generatedCol ? "generated column" : "default values"))); - } - } else if (relkind == RELKIND_STREAM) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s on streams are not supported", - colDef->generatedCol ? "generated column" : "default values"))); - } else if (RelationIsTsStore(rel)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("It's not supported to add column with default value for timeseries tables."))); - } - - rawEnt = (RawColumnDefault*)palloc(sizeof(RawColumnDefault)); - rawEnt->attnum = attribute.attnum; - rawEnt->raw_default = (Node*)copyObject(colDef->raw_default); - rawEnt->update_expr = (Node*)copyObject(colDef->update_default); - - rawEnt->generatedCol = colDef->generatedCol; - - /* - * This function is intended for CREATE TABLE, so it processes a - * _list_ of defaults, but we just do one. - */ - (void)AddRelationNewConstraints(rel, list_make1(rawEnt), NIL, false, true); - - /* Make the additional catalog changes visible */ - CommandCounterIncrement(); - } - - /* - * Tell Phase 3 to fill in the default expression, if there is one. - * - * If there is no default, Phase 3 doesn't have to do anything, because - * that effectively means that the default is NULL. The heap tuple access - * routines always check for attnum > # of attributes in tuple, and return - * NULL if so, so without any modification of the tuple data we will get - * the effect of NULL values in the new column. - * - * An exception occurs when the new column is of a domain type: the domain - * might have a NOT NULL constraint, or a check constraint that indirectly - * rejects nulls. If there are any domain constraints then we construct - * an explicit NULL default value that will be passed through - * CoerceToDomain processing. (This is a tad inefficient, since it causes - * rewriting the table which we really don't have to do, but the present - * design of domain processing doesn't offer any simple way of checking - * the constraints more directly.) - * - * Note: we use build_column_default, and not just the cooked default - * returned by AddRelationNewConstraints, so that the right thing happens - * when a datatype's default applies. - * - * We skip this step completely for views and foreign tables. For a view, - * we can only get here from CREATE OR REPLACE VIEW, which historically - * doesn't set up defaults, not even for domain-typed columns. And in any - * case we mustn't invoke Phase 3 on a view or foreign table, since they - * have no storage. - */ -#ifdef ENABLE_MOT - if ((relkind == RELKIND_FOREIGN_TABLE && isMOTFromTblOid(RelationGetRelid(rel)) && attribute.attnum > 0) || - (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE && relkind != RELKIND_FOREIGN_TABLE && - relkind != RELKIND_STREAM && relkind != RELKIND_CONTQUERY && attribute.attnum > 0)) { -#else - if (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE && relkind != RELKIND_FOREIGN_TABLE && - relkind != RELKIND_STREAM && relkind != RELKIND_CONTQUERY && attribute.attnum > 0) { -#endif - /* test whether new column is null or not*/ - bool testNotNull = colDef->is_not_null; - - /* - * Generally, relcache of nailed-in system catalogs should not be blowed away - * for reliability consideration. Under such circumstances, visiting the - * attdesc of the newly added system catalog column, which is necessary in building - * its default, is impossible. - * On the other hand, during inplace or online upgrade, we only prohibit invalidation - * of pg_class's and pg_attribute's relcache. As a result, we do not support - * non-NULL default values for new columns in pg_class, pg_attribute, pg_proc during - * inplace or online upgrade at present. - */ - if (u_sess->attr.attr_common.IsInplaceUpgrade && - (rel->rd_id == RelationRelationId || rel->rd_id == AttributeRelationId)) - defval = NULL; - else - defval = (Expr*)build_column_default(rel, attribute.attnum); - - if (defval == NULL && (GetDomainConstraints(typeOid) != NIL || is_addloc)) { - Oid baseTypeId; - int32 baseTypeMod; - Oid baseTypeColl; - - baseTypeMod = typmod; - baseTypeId = getBaseTypeAndTypmod(typeOid, &baseTypeMod); - baseTypeColl = get_typcollation(baseTypeId); - defval = (Expr*)makeNullConst(baseTypeId, baseTypeMod, baseTypeColl); - if (GetDomainConstraints(typeOid) != NIL) { - defval = (Expr*)coerce_to_target_type( - NULL, (Node*)defval, baseTypeId, typeOid, typmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); - if (defval == NULL) /* should not happen */ - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("failed to coerce base type to domain"))); - } - } - - if (defval != NULL) { - /* if now the row-storage table must be rewrited, - * it isn't need to apply alter-table-instantly feature. - * also exclude temp table and column table. - */ - if (attribute.attnum == RelAutoIncAttrNum(rel)) { - if (colDef->is_not_null) { - ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, true, is_addloc); - } - } else if (contain_specified_function((Node*)defval, NEXTVALFUNCOID)) { - /* We don't support alter table add column which default with nextval expression. */ - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("It's not supported to alter table add column default with nextval expression."))); - } else if (RelationIsCUFormat(rel)) { - ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false); - } else if (tab->rewrite>0 || colDef->generatedCol || - RelationUsesSpaceType(rel->rd_rel->relpersistence) == SP_TEMP) { - ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, true); - } else { - bytea* value = NULL; - AT_INSTANT_DEFAULT_VALUE ret = - shouldUpdateAllTuples(defval, attribute.atttypid, attribute.attlen, attribute.attbyval, &value); - - if (ret == DEFAULT_NOT_NULL_CONST) { - Assert(value != NULL); - updateInitDefVal(value, rel, attribute.attnum); - pfree_ext(value); - - /* new column has const default value, - * so Not-Null test is not necessary. - */ - testNotNull = false; - } else if (ret == DEFAULT_OTHER) { - if (isDfsTable) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - (errmsg("It is not supported on DFS table. The detailed reasons are the" - " followings:"), - errdetail("1. the default value may be a volatile function.\n" - "2. the storage length of default value may be greater than 127.\n" - "3. the data type of new column is not supported.")))); - } - ATExecAppendDefValExpr(attribute.attnum, defval, tab, colDef, false, false); - } - /* nothing to do if ret is DEFAULT_NULL */ - } - } - - /* - * If the new column is NOT NULL, tell Phase 3 it needs to test that. - * (Note we don't do this for an OID column. OID will be marked not - * null, but since it's filled specially, there's no need to test - * anything.) - */ - if (testNotNull) { - tab->new_notnull = true; - } - } - - /* - * If we are adding an OID column, we have to tell Phase 3 to rewrite the - * table to fix that. - */ - if (isOid) { - tab->rewrite |= AT_REWRITE_ALTER_OID; - } - - if (RelationIsColStore(rel)) { - /* - * The DFS table do not rewrite data, only update the catalog for default value. - */ - if (isDfsTable) { - tab->rewrite = AT_REWRITE_ALTER_OID; - } else { - tab->rewrite |= AT_REWRITE_ALTER_OID; - } - } - - /* - * Add needed dependency entries for the new column. - */ - add_column_datatype_dependency(myrelid, newattnum, attribute.atttypid); - add_column_collation_dependency(myrelid, newattnum, attribute.attcollation); - -#ifdef ENABLE_MOT - if (relkind == RELKIND_FOREIGN_TABLE && isMOTFromTblOid(RelationGetRelid(rel))) { - AlterForeingTableCmd fcmd = { - T_AlterForeingTableCmd, - AT_AddColumn, - rel, - nullptr, - (Node*)colDef, - typeOid, - defval - }; - ATExecMOTAlterTable(&fcmd); - } -#endif - -#ifdef ENABLE_MULTIPLE_NODES - if (unlikely(RelationIsTsStore(rel))) { - /* add col to tag table */ - if (colDef->kvtype == ATT_KV_TAG) { - Oid tag_relid = get_tag_relid(RelationGetRelationName(rel), rel->rd_rel->relnamespace); - Relation tagrel; - AlteredTableInfo* tagtab = NULL; - List* index_col_name = NIL; - - tagrel = heap_open(tag_relid, lockmode); - CheckTableNotInUse(tagrel, "ALTER TABLE"); - - tagtab = ATGetQueueEntry(wqueue, tagrel); - - ATExecAddColumn(wqueue, tagtab, tagrel, colDef, isOid, false, false, false, NULL, lockmode); - - char tag_relname[NAMEDATALEN] = {0}; - Tsdb::GenMetaRelname(rel->rd_rel->relnamespace, Tsdb::MetaTableType::META_TABLE_TAGS, - tag_relname, TsConf::MAX_TS_NAME_LEN, RelationGetRelationName(rel)); - /* index_col_name contains the name of one new tag column which need to add */ - index_col_name = lappend(index_col_name, colDef->colname); - create_tag_index(tag_relid, tag_relname, index_col_name); - list_free_ext(index_col_name); - heap_close(tagrel, NoLock); - } else if (colDef->kvtype == ATT_KV_FIELD && Tsdb::RelationEnablesTsdbDelta(rel)) { - /* if add TSField columns, update delta table simultaneously */ - Relation delta_rel = Tsdb::RelationGetDeltaRelation(rel, lockmode); - CheckTableNotInUse(delta_rel, "ALTER TABLE"); - AlteredTableInfo* delta_tab = ATGetQueueEntry(wqueue, delta_rel); - ATExecAddColumn(wqueue, delta_tab, delta_rel, colDef, isOid, false, false, false, NULL, lockmode); - heap_close(delta_rel, NoLock); - } - } -#endif /* ENABLE_MULTIPLE_NODES */ - - if (RelationIsPAXFormat(rel)) { - /* - * Add column for delta table. - */ - children = lappend_oid(children, RelationGetDeltaRelId(rel)); - /* - * Get the lock to synchronize against concurrent drop. - */ - LockRelationOid(RelationGetDeltaRelId(rel), lockmode); - elog(DEBUG1, - "[GET LOCK] Get the lock %d successfully on delta table of %s for altering operator.", - lockmode, - RelationGetRelationName(rel)); -#ifdef ENABLE_MULTIPLE_NODES - } else if (g_instance.attr.attr_storage.enable_delta_store && RelationIsCUFormat(rel)) { -#else - /* - * Under centrailzed mode, there may be unique index on delta table. When checking unique - * constraint, unique index on delta will be used. So we ignore enable_delta_store here - * and alter delta table at the same time. - */ - } else if (RelationIsCUFormat(rel)) { -#endif - /* - * add cstore relation delta table to recurse, if col support inherit feture - * we also need call find_inheritance_children as below - */ - children = find_cstore_delta(rel, lockmode); - } else { - /* - * Propagate to children as appropriate. Unlike most other ALTER - * routines, we have to do this one level of recursion at a time; we can't - * use find_all_inheritors to do it in one pass. - */ - children = find_inheritance_children(RelationGetRelid(rel), lockmode); - } - - /* - * If we are told not to recurse, there had better not be any child - * tables; else the addition would put them out of step. - */ - if (children && !recurse) - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("column must be added to child tables too"))); - - /* Children should see column as singly inherited, Cstore table and delta talbe are not inherited table */ - if (!recursing && !RelationIsCUFormat(rel)) { - colDef = (ColumnDef*)copyObject(colDef); - colDef->inhcount = 1; - colDef->is_local = false; - } - - - /* column Options */ - ATCreateColumComments(myrelid, colDef); - - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - Relation childrel; - AlteredTableInfo* childtab = NULL; - - /* find_inheritance_children already got lock */ - childrel = heap_open(childrelid, NoLock); - CheckTableNotInUse(childrel, "ALTER TABLE"); - - /* Find or create work queue entry for this table */ - childtab = ATGetQueueEntry(wqueue, childrel); - - /* Recurse to child */ - ATExecAddColumn(wqueue, childtab, childrel, colDef, isOid, recurse, true, is_first, after_name, lockmode); - - heap_close(childrel, NoLock); - } - ObjectAddressSubSet(address, RelationRelationId, myrelid, newattnum); - return address; -} - -/* - * If a new or renamed column will collide with the name of an existing - * column, error out. - */ -static void check_for_column_name_collision(Relation rel, const char* colname) -{ - HeapTuple attTuple; - int attnum; - - /* - * this test is deliberately not attisdropped-aware, since if one tries to - * add a column matching a dropped column name, it's gonna fail anyway. - */ - attTuple = SearchSysCache2(ATTNAME, ObjectIdGetDatum(RelationGetRelid(rel)), PointerGetDatum(colname)); - if (!HeapTupleIsValid(attTuple)) - return; - - attnum = ((Form_pg_attribute)GETSTRUCT(attTuple))->attnum; - ReleaseSysCache(attTuple); - - /* - * We throw a different error message for conflicts with system column - * names, since they are normally not shown and the user might otherwise - * be confused about the reason for the conflict. - */ - if (attnum <= 0) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column name \"%s\" conflicts with a system column name", colname))); - else - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" of relation \"%s\" already exists", colname, RelationGetRelationName(rel)))); -} - -/* - * Install a column's dependency on its datatype. - */ -static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid) -{ - ObjectAddress myself, referenced; - - myself.classId = RelationRelationId; - myself.objectId = relid; - myself.objectSubId = attnum; - referenced.classId = TypeRelationId; - referenced.objectId = typid; - referenced.objectSubId = 0; - recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); -} - -/* - * Install a column's dependency on its collation. - */ -static void add_column_collation_dependency(Oid relid, int32 attnum, Oid collid) -{ - ObjectAddress myself, referenced; - - /* We know the default collation is pinned, so don't bother recording it */ - if (OidIsValid(collid) && collid != DEFAULT_COLLATION_OID) { - myself.classId = RelationRelationId; - myself.objectId = relid; - myself.objectSubId = attnum; - referenced.classId = CollationRelationId; - referenced.objectId = collid; - referenced.objectSubId = 0; - recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); - } -} - -/* - * ALTER TABLE SET WITH OIDS - * - * Basically this is an ADD COLUMN for the special OID column. We have - * to cons up a ColumnDef node because the ADD COLUMN code needs one. - */ -static void ATPrepAddOids(List** wqueue, Relation rel, bool recurse, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - /* If we're recursing to a child table, the ColumnDef is already set up */ - if (cmd->def == NULL) { - ColumnDef* cdef = makeNode(ColumnDef); - - cdef->colname = pstrdup("oid"); - cdef->typname = makeTypeNameFromOid(OIDOID, -1); - cdef->inhcount = 0; - cdef->is_local = true; - cdef->is_not_null = true; - cdef->storage = 0; - // the best compression method for OID datatype is DELTA. but its data is stored - // in header part of heap tuple, instead of data part. so don't compress it. - // - cdef->kvtype = ATT_KV_UNDEFINED; - cdef->cmprs_mode = ATT_CMPR_NOCOMPRESS; - cmd->def = (Node*)cdef; - } - ATPrepAddColumn(wqueue, NULL, rel, recurse, false, cmd, lockmode); - - if (recurse) - cmd->subtype = AT_AddOidsRecurse; -} - -/* - * ALTER TABLE ALTER COLUMN DROP NOT NULL - * - * Return the address of the modified column. If the column was already - * nullable, InvalidObjectAddress is returned. - */ -static ObjectAddress ATExecDropNotNull(Relation rel, const char* colName, LOCKMODE lockmode) -{ - HeapTuple tuple; - AttrNumber attnum; - Relation attr_rel; - List* indexoidlist = NIL; - ListCell* indexoidscan = NULL; - ObjectAddress address; - - /* - * lookup the attribute - */ - attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - - attnum = ((Form_pg_attribute)GETSTRUCT(tuple))->attnum; - - /* Prevent them from altering a system attribute */ - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - /* - * Check that the attribute is not in a primary key - * - * Note: we'll throw error even if the pkey index is not valid. - */ - /* Loop over all indexes on the relation */ - indexoidlist = RelationGetIndexList(rel); - - foreach (indexoidscan, indexoidlist) { - Oid indexoid = lfirst_oid(indexoidscan); - HeapTuple indexTuple; - Form_pg_index indexStruct; - int i; - int indnkeyatts; - - indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid)); - if (!HeapTupleIsValid(indexTuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", indexoid))); - } - - indexStruct = (Form_pg_index)GETSTRUCT(indexTuple); - indnkeyatts = GetIndexKeyAttsByTuple(NULL, indexTuple); - - /* If the index is not a primary key, skip the check */ - if (indexStruct->indisprimary) { - /* - * Loop over each attribute in the primary key and see if it - * matches the to-be-altered attribute - */ - for (i = 0; i < indnkeyatts; i++) { - if (indexStruct->indkey.values[i] == attnum) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("column \"%s\" is in a primary key", colName))); - } - } - - ReleaseSysCache(indexTuple); - } - - list_free_ext(indexoidlist); - - /* - * Okay, actually perform the catalog change ... if needed - */ - if (((Form_pg_attribute)GETSTRUCT(tuple))->attnotnull) { - ((Form_pg_attribute)GETSTRUCT(tuple))->attnotnull = FALSE; - - simple_heap_update(attr_rel, &tuple->t_self, tuple); - - /* keep the system catalog indexes current */ - CatalogUpdateIndexes(attr_rel, tuple); - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - - } - else - address = InvalidObjectAddress; - - heap_close(attr_rel, RowExclusiveLock); - return address; -} - -/* - * ALTER TABLE ALTER COLUMN SET NOT NULL - * - * Return the address of the modified column. If the column was already NOT - * NULL, InvalidObjectAddress is returned. - */ -static ObjectAddress ATExecSetNotNull(AlteredTableInfo* tab, Relation rel, const char* colName, LOCKMODE lockmode) -{ - HeapTuple tuple; - AttrNumber attnum; - Relation attr_rel; - ObjectAddress address; - /* - * lookup the attribute - */ - attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - - attnum = ((Form_pg_attribute)GETSTRUCT(tuple))->attnum; - - /* Prevent them from altering a system attribute */ - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - /* - * Okay, actually perform the catalog change ... if needed - */ - if (!((Form_pg_attribute)GETSTRUCT(tuple))->attnotnull) { - ((Form_pg_attribute)GETSTRUCT(tuple))->attnotnull = TRUE; - - simple_heap_update(attr_rel, &tuple->t_self, tuple); - - /* keep the system catalog indexes current */ - CatalogUpdateIndexes(attr_rel, tuple); - - /* Tell Phase 3 it needs to test the constraint */ - tab->new_notnull = true; - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - } - else - address = InvalidObjectAddress; - - heap_close(attr_rel, RowExclusiveLock); - return address; -} - -bool FetchOnUpdateExpress(Relation rel, const char* colName) -{ - HeapTuple htup = NULL; - bool isnull = false; - bool temp_on_update = FALSE; - htup = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - if (!HeapTupleIsValid(htup)) - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - Form_pg_attribute attTup = (Form_pg_attribute)GETSTRUCT(htup); - AttrNumber temp_attnum = attTup->attnum; - - ScanKeyData skey[2]; - Relation adrel = heap_open(AttrDefaultRelationId, RowExclusiveLock); - ScanKeyInit(&skey[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&skey[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(temp_attnum)); - SysScanDesc adscan = systable_beginscan(adrel, AttrDefaultIndexId, true, NULL, 2, skey); - - if (HeapTupleIsValid(htup = systable_getnext(adscan))) { - Datum val = heap_getattr(htup, Anum_pg_attrdef_adsrc_on_update, adrel->rd_att, &isnull); - if (val && pg_strcasecmp(TextDatumGetCString(val), "") != 0) { - temp_on_update = TRUE; - } - } - systable_endscan(adscan); - heap_close(adrel, RowExclusiveLock); - return temp_on_update; -} - -/* - * ALTER TABLE ALTER COLUMN SET/DROP DEFAULT - */ -static ObjectAddress ATExecColumnDefault(Relation rel, const char* colName, Node* newDefault, LOCKMODE lockmode) -{ - AttrNumber attnum; - ObjectAddress address; - TupleDesc tupdesc = RelationGetDescr(rel); - - /* - * get the number of the attribute - */ - attnum = get_attnum(RelationGetRelid(rel), colName); - if (attnum == InvalidAttrNumber) { - if (u_sess->attr.attr_common.IsInplaceUpgrade) { - ereport(WARNING, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist while dropping default", - colName, - RelationGetRelationName(rel)))); - return InvalidObjectAddress; - } else - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - } - - /* Prevent them from altering a system attribute */ - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - if (ISGENERATEDCOL(tupdesc, attnum - 1)) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_SYNTAX_ERROR), - errmsg("column \"%s\" of relation \"%s\" is a generated column", colName, RelationGetRelationName(rel)))); - } else if (attnum == RelAutoIncAttrNum(rel)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter auto_increment column \"%s\" default", colName))); - } - - bool on_update = FetchOnUpdateExpress(rel, colName); - - /* - * Remove any old default for the column. We use RESTRICT here for - * safety, but at present we do not expect anything to depend on the - * default. - * - * We treat removing the existing default as an internal operation when it - * is preparatory to adding a new default, but as a user-initiated - * operation when the user asked for a drop. - */ - if (!on_update) { - RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false, newDefault == NULL ? false : true); - } - - if (newDefault != NULL || (on_update && newDefault == NULL)) { - /* SET DEFAULT */ - RawColumnDefault* rawEnt = NULL; - - rawEnt = (RawColumnDefault*)palloc(sizeof(RawColumnDefault)); - rawEnt->attnum = attnum; - rawEnt->raw_default = newDefault; - rawEnt->generatedCol = '\0'; - rawEnt->update_expr = NULL; - - /* - * This function is intended for CREATE TABLE, so it processes a - * _list_ of defaults, but we just do one. - */ - (void)AddRelationNewConstraints(rel, list_make1(rawEnt), NIL, false, true); - } - - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - return address; - -} - -/* - * ALTER TABLE ALTER COLUMN SET STATISTICS - */ -static void ATPrepSetStatistics(Relation rel) -{ - /* - * We do our own permission checking because (a) we want to allow SET - * STATISTICS on indexes (for expressional index columns), and (b) we want - * to allow SET STATISTICS on system catalogs without requiring - * allowSystemTableMods to be turned on. - */ - if (rel->rd_rel->relkind != RELKIND_RELATION && !RelationIsIndex(rel) && rel->rd_rel->relkind != RELKIND_STREAM && - rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && rel->rd_rel->relkind != RELKIND_MATVIEW) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, materialized view, index, or foreign table", RelationGetRelationName(rel)))); - - /* Permissions checks */ - AclResult aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), ACL_ALTER); - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) { - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, RelationGetRelationName(rel)); - } -} - -/* - * Return value is the address of the modified column - */ -static ObjectAddress ATExecSetStatistics( - Relation rel, const char* colName, Node* newValue, AlterTableStatProperty additional_property, LOCKMODE lockmode) -{ - int newtarget; - Relation attrelation; - HeapTuple tuple; - Form_pg_attribute attrtuple; - AttrNumber attnum; - ObjectAddress address; - - Assert(IsA(newValue, Integer)); - newtarget = intVal(newValue); - - /* - * Limit target to a sane range - */ - if (additional_property == AT_CMD_WithoutPercent) { - if (newtarget < -1) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("statistics target %d is too low", newtarget))); - } else if (newtarget > 10000) { - newtarget = 10000; - ereport(WARNING, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("lowering statistics target to %d", newtarget))); - } - } else { - // Example:additional_property == AT_CMD_WithPercent - if (newtarget < 0 || newtarget > 100) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("statistics percent valid value is between 0 and 100"))); - } - - newtarget = -1 * newtarget - 1; //-1 * newtarget - 1 - } - attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - attrtuple = (Form_pg_attribute)GETSTRUCT(tuple); - - attnum = attrtuple->attnum; - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - attrtuple->attstattarget = newtarget; - - simple_heap_update(attrelation, &tuple->t_self, tuple); - - /* keep system catalog indexes current */ - CatalogUpdateIndexes(attrelation, tuple); - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - - tableam_tops_free_tuple(tuple); - - heap_close(attrelation, RowExclusiveLock); - - return address; -} - -/* - * ATExecAddStatistics - * to execute add statistics - * - * @param (in) rel: - * the relation - * @param (in) def: - * the alter table column definition - * @param (in) lockmode: - * lock mode - */ -static void ATExecAddStatistics(Relation rel, Node* def, LOCKMODE lockmode) -{ - Assert(IsA(def, List)); - - Oid relid = rel->rd_id; - if (is_sys_table(relid)) { - ereport(ERROR, - ((errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("System catalog is not supported by extended statistic.")))); - } - - char relkind = es_get_starelkind(); - bool inh = es_get_stainherit(); - - /* A list of VacAttrStats */ - int array_length = 0; - VacAttrStats** vacattrstats_array = es_build_vacattrstats_array(rel, (List*)def, true, &array_length, inh); - - if (array_length > 0) { - update_attstats(relid, relkind, false, array_length, vacattrstats_array, RelationGetRelPersistence(rel)); - } -} - -/* - * ATExecDeleteStatistics - * to execute delete statistics - * - * @param (in) rel: - * the relation - * @param (in) def: - * the alter table column definition - * @param (in) lockmode: - * lock mode - */ -static void ATExecDeleteStatistics(Relation rel, Node* def, LOCKMODE lockmode) -{ - Assert(IsA(def, List)); - - Oid relid = rel->rd_id; - if (is_sys_table(relid)) { - ereport(ERROR, - ((errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("System catalog is not supported by extended statistic.")))); - } - - char relkind = es_get_starelkind(); - bool inh = es_get_stainherit(); - - /* A list of VacAttrStats */ - int array_length = 0; - VacAttrStats** vacattrstats_array = es_build_vacattrstats_array(rel, (List*)def, false, &array_length, inh); - - if (array_length > 0) { - delete_attstats(relid, relkind, false, array_length, vacattrstats_array, DELETE_STATS_MULTI); - } -} - -static ObjectAddress ATExecSetOptions(Relation rel, const char* colName, Node* options, bool isReset, LOCKMODE lockmode) -{ - Relation attrelation; - HeapTuple tuple, newtuple; - Form_pg_attribute attrtuple; - AttrNumber attnum; - Datum datum, newOptions; - bool isnull = false; - Datum repl_val[Natts_pg_attribute]; - bool repl_null[Natts_pg_attribute]; - bool repl_repl[Natts_pg_attribute]; - errno_t rc = EOK; - ObjectAddress address; - - attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - - tuple = SearchSysCacheAttName(RelationGetRelid(rel), colName); - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - attrtuple = (Form_pg_attribute)GETSTRUCT(tuple); - - attnum = attrtuple->attnum; - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - Assert(IsA(options, List)); - ForbidToSetOptionsForAttribute((List*)options); - - /* Generate new proposed attoptions (text array) */ - datum = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_attoptions, &isnull); - newOptions = transformRelOptions(isnull ? (Datum)0 : datum, (List*)options, NULL, NULL, false, isReset); - /* Validate new options */ - (void)attribute_reloptions(newOptions, true); - - /* Build new tuple. */ - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - if (newOptions != (Datum)0) - repl_val[Anum_pg_attribute_attoptions - 1] = newOptions; - else - repl_null[Anum_pg_attribute_attoptions - 1] = true; - repl_repl[Anum_pg_attribute_attoptions - 1] = true; - newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(attrelation), repl_val, repl_null, repl_repl); - ReleaseSysCache(tuple); - - /* Update system catalog. */ - simple_heap_update(attrelation, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(attrelation, newtuple); - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - tableam_tops_free_tuple(newtuple); - - heap_close(attrelation, RowExclusiveLock); - return address; -} - -/* - * ALTER TABLE ALTER COLUMN SET STORAGE - * - * Return value is the address of the modified column - */ -static ObjectAddress ATExecSetStorage(Relation rel, const char* colName, Node* newValue, LOCKMODE lockmode) -{ - char* storagemode = NULL; - char newstorage; - Relation attrelation; - HeapTuple tuple; - Form_pg_attribute attrtuple; - AttrNumber attnum; - ObjectAddress address; - - Assert(IsA(newValue, String)); - storagemode = strVal(newValue); - - if (RelationIsColStore(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Storage type \"%s\" is meaningless for column relation", storagemode))); - } - - if (pg_strcasecmp(storagemode, "plain") == 0) - newstorage = 'p'; - else if (pg_strcasecmp(storagemode, "external") == 0) - newstorage = 'e'; - else if (pg_strcasecmp(storagemode, "extended") == 0) - newstorage = 'x'; - else if (pg_strcasecmp(storagemode, "main") == 0) - newstorage = 'm'; - else { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid storage type \"%s\"", storagemode))); - newstorage = 0; /* keep compiler quiet */ - } - - attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - attrtuple = (Form_pg_attribute)GETSTRUCT(tuple); - - attnum = attrtuple->attnum; - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - /* - * safety check: do not allow toasted storage modes unless column datatype - * is TOAST-aware. - */ - if (newstorage == 'p' || TypeIsToastable(attrtuple->atttypid)) - attrtuple->attstorage = newstorage; - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column data type %s can only have storage PLAIN", format_type_be(attrtuple->atttypid)))); - - simple_heap_update(attrelation, &tuple->t_self, tuple); - - /* keep system catalog indexes current */ - CatalogUpdateIndexes(attrelation, tuple); - - tableam_tops_free_tuple(tuple); - - heap_close(attrelation, RowExclusiveLock); - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - return address; -} - -/* - * ALTER TABLE DROP COLUMN - * - * DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism, - * because we have to decide at runtime whether to recurse or not depending - * on whether attinhcount goes to zero or not. (We can't check this in a - * static pre-pass because it won't handle multiple inheritance situations - * correctly.) - */ -static void ATPrepDropColumn( - List** wqueue, Relation rel, bool recurse, bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - if (rel->rd_rel->reloftype && !recursing) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot drop column from typed table"))); - - if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) - ATTypedTableRecursion(wqueue, rel, cmd, lockmode); - - if (recurse) - cmd->subtype = AT_DropColumnRecurse; -} -/* - * Brief : Check if the input column is the last column of the input Relation - * Input : rel, the input relation - * attrnum, the input attribute number which is start from 1 - * Output : None - * Return Value : true if it's the last column of the input relation, false otherwise - */ -static bool CheckLastColumn(Relation rel, AttrNumber attrnum) -{ - for (int col = 0; col < rel->rd_att->natts; ++col) { - if (rel->rd_att->attrs[col].attisdropped) - continue; - if (ISGENERATEDCOL(rel->rd_att, col)) - continue; - if (col != (attrnum - 1)) { - return false; - } - } - return true; -} - - -#ifdef ENABLE_MULTIPLE_NODES -/* Check if the ALTER TABLE DROP COLUMN operation is valid in timeseries table. - * Invalid case: - * 1. TSTime column is not allowed to drop - * 2. At least one TSTag column should be remained - * - * Parameter: - * - tsrel: an opened timeseries table - * - attnum: AttrNumber of the column to be dropped - * - colName: name of the column to be dropped - */ -static void CheckTsStoreDropColumn(const Relation tsrel, const AttrNumber attnum, const char* colName) -{ - int1 kvtype = tsrel->rd_att->attrs[attnum - 1]->attkvtype; - if (kvtype == ATT_KV_TIMETAG) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot drop the TSTime column \"%s\" from timeseries table", colName), - errdetail("If the type of the column is TsTime, it cannot be dropped"), - errcause("Unexpected error"), - erraction("This column is not be dropped"), - errmodule(MOD_TIMESERIES))); - } else if (kvtype == ATT_KV_TAG && get_valid_natts_by_kvtype(tsrel, kvtype) <= 1) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot drop the only TSTag column \"%s\" from timeseries table", colName), - errdetail("When the column of TSTag is the last one, it cannot be dropped"), - errcause("Unexpected error"), - erraction("This column is not be dropped"), - errmodule(MOD_TIMESERIES))); - } else if (kvtype == ATT_KV_FIELD && get_valid_natts_by_kvtype(tsrel, kvtype) <= 1) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot drop the only TSField column \"%s\" from timeseries table", colName), - errdetail("When the column of TSField is the last one, it cannot be dropped"), - errcause("Unexpected error"), - erraction("This column is not be dropped"), - errmodule(MOD_TIMESERIES))); - } -} -#endif - -static void ResetTempAutoIncrement(Relation rel, AttrNumber attnum) -{ - if (rel->rd_rel->relpersistence != RELPERSISTENCE_TEMP || RelAutoIncAttrNum(rel) != attnum) { - return; - } - tmptable_autoinc_reset(rel->rd_rel->relfilenode, 1); -} - -/* - * Return value is that of the dropped column. - */ -static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* colName, DropBehavior behavior, bool recurse, - bool recursing, bool missing_ok, LOCKMODE lockmode) -{ - HeapTuple tuple; - Form_pg_attribute targetatt; - AttrNumber attnum; - List* children = NIL; - ObjectAddress object; - - /* At top level, permission check was done in ATPrepCmd, else do it */ - if (recursing) - ATSimplePermissions(rel, ATT_TABLE); - - /* - * get the number of the attribute - */ - tuple = SearchSysCacheAttName(RelationGetRelid(rel), colName); - if (!HeapTupleIsValid(tuple)) { - if (!missing_ok) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - } else { - ereport(NOTICE, - (errmsg("column \"%s\" of relation \"%s\" does not exist, skipping", - colName, - RelationGetRelationName(rel)))); - return InvalidObjectAddress; - } - } - targetatt = (Form_pg_attribute)GETSTRUCT(tuple); - - attnum = targetatt->attnum; - -#ifdef ENABLE_MULTIPLE_NODES - if (RelationIsTsStore(rel)) { - CheckTsStoreDropColumn(rel, attnum, colName); - } -#endif /* ENABLE_MULTIPLE_NODES */ - - /* - * column of a partitioned table's partition key can not be dropped - */ - if (is_partition_column(rel, attnum)) { - ereport( - ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot drop partitioning column \"%s\"", colName))); - } - - /* Can't drop a system attribute, except OID */ - if (attnum <= 0 && attnum != ObjectIdAttributeNumber) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot drop system column \"%s\"", colName))); - - /* Don't drop inherited columns */ - if (targetatt->attinhcount > 0 && !recursing) - ereport( - ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot drop inherited column \"%s\"", colName))); - - ReleaseSysCache(tuple); - - /* - * For a table, we don't allow to drop all the column. - * We have to check if the drop column is the last column. - * If it is, not allow to drop it. - */ - if (GetLocatorType(rel->rd_id) != LOCATOR_TYPE_HASH) { - bool lastColumn = CheckLastColumn(rel, attnum); - if (lastColumn) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("must have at least one column"))); - } - } - - /* - * Propagate to children as appropriate. Unlike most other ALTER - * routines, we have to do this one level of recursion at a time; we can't - * use find_all_inheritors to do it in one pass. - */ - if (RelationIsPAXFormat(rel)) { - /* Add column for delta table. */ - children = lappend_oid(children, RelationGetDeltaRelId(rel)); - - /* Get the lock to synchronize against concurrent drop. */ - LockRelationOid(RelationGetDeltaRelId(rel), lockmode); - elog(DEBUG1, - "[GET LOCK] Get the lock %d successfully on delta table of %s for altering operator.", - lockmode, - RelationGetRelationName(rel)); -#ifdef ENABLE_MULTIPLE_NODES - } else if (g_instance.attr.attr_storage.enable_delta_store && RelationIsCUFormat(rel)) { -#else - /* - * Under centrailzed mode, there may be unique index on delta table. When checking unique - * constraint, unique index on delta will be used. So we ignore enable_delta_store here - * and alter delta table at the same time. - */ - } else if (RelationIsCUFormat(rel)) { -#endif - /* - * add cstore relation delta table to recurse, if col support inherit feture - * we also need call find_inheritance_children as below - */ - children = find_cstore_delta(rel, lockmode); - } else { - children = find_inheritance_children(RelationGetRelid(rel), lockmode); - } - - if (children != NULL) { - Relation attr_rel; - ListCell* child = NULL; - - attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - Relation childrel; - Form_pg_attribute childatt; - - /* find_inheritance_children already got lock */ - childrel = heap_open(childrelid, NoLock); - CheckTableNotInUse(childrel, "ALTER TABLE"); - - tuple = SearchSysCacheCopyAttName(childrelid, colName); - if (!HeapTupleIsValid(tuple)) { - /* shouldn't happen */ - Assert(0); - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute \"%s\" of relation %u", colName, childrelid))); - } - childatt = (Form_pg_attribute)GETSTRUCT(tuple); - - /* - * The detal table is not inherit table. - */ - if (!RelationIsPAXFormat(rel) && !RelationIsCUFormat(rel) && - childatt->attinhcount <= 0) /* shouldn't happen */ - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("relation %u has non-inherited attribute \"%s\"", childrelid, colName))); - - if (recurse) { - if (RelationIsPAXFormat(rel) || RelationIsCUFormat(rel)) { - /* - * Delete this column of the delta table. - */ - ATExecDropColumn(wqueue, childrel, colName, behavior, true, true, false, lockmode); - } else if (childatt->attinhcount == 1 && !childatt->attislocal) { - /* - * If the child column has other definition sources, just - * decrement its inheritance count; if not, recurse to delete it. - * - * Time to delete this child column, too - */ - - ATExecDropColumn(wqueue, childrel, colName, behavior, true, true, false, lockmode); - } else { - /* Child column must survive my deletion */ - childatt->attinhcount--; - - simple_heap_update(attr_rel, &tuple->t_self, tuple); - - /* keep the system catalog indexes current */ - CatalogUpdateIndexes(attr_rel, tuple); - - /* Make update visible */ - CommandCounterIncrement(); - } - } else { - /* - * If we were told to drop ONLY in this table (no recursion), - * we need to mark the inheritors' attributes as locally - * defined rather than inherited. - */ - childatt->attinhcount--; - childatt->attislocal = true; - - simple_heap_update(attr_rel, &tuple->t_self, tuple); - - /* keep the system catalog indexes current */ - CatalogUpdateIndexes(attr_rel, tuple); - - /* Make update visible */ - CommandCounterIncrement(); - } - - tableam_tops_free_tuple(tuple); - - heap_close(childrel, NoLock); - } - heap_close(attr_rel, RowExclusiveLock); - } - - /* - * If the dropped column has partial cluster key, must to update - * relhasclusterkey in pg_class. - */ - if (rel->rd_rel->relhasclusterkey && colHasPartialClusterKey(rel, attnum)) { - SetRelHasClusterKey(rel, false); - } - - /* - * Delete the dependent objects in order and update the rel catalog - */ - object.classId = RelationRelationId; - object.objectId = RelationGetRelid(rel); - object.objectSubId = attnum; - - performDeletion(&object, behavior, 0); - - /* - * If it's a column table, when we drop the column, we also need to delete the - * column information from the cudesc, and put the column file into the pending delete. - */ - if (RelationIsCUFormat(rel)) { - CStoreRelDropColumn(rel, attnum, rel->rd_rel->relowner); - } - ResetTempAutoIncrement(rel, attnum); - -#ifdef ENABLE_MOT - if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE && isMOTFromTblOid(RelationGetRelid(rel))) { - AlterForeingTableCmd fcmd = { - T_AlterForeingTableCmd, - AT_DropColumn, - rel, - colName, - nullptr, - InvalidOid, - nullptr - }; - ATExecMOTAlterTable(&fcmd); - } -#endif - -#ifdef ENABLE_MULTIPLE_NODES - if (unlikely(RelationIsTsStore(rel))) { - /* drop column in tag table */ - if (rel->rd_att->attrs[attnum - 1]->attkvtype == ATT_KV_TAG) { - Oid tag_relid = get_tag_relid(RelationGetRelationName(rel), rel->rd_rel->relnamespace); - Relation tagrel = heap_open(tag_relid, lockmode); - CheckTableNotInUse(tagrel, "ALTER TABLE"); - ATExecDropColumn(wqueue, tagrel, colName, behavior, false, false, true, lockmode); - TagsCacheMgr::GetInstance().clear(); - - heap_close(tagrel, NoLock); - } else if (rel->rd_att->attrs[attnum - 1]->attkvtype == ATT_KV_FIELD && Tsdb::RelationEnablesTsdbDelta(rel)) { - /* if drop TSField columns, update delta table simultaneously */ - Relation delta_rel = Tsdb::RelationGetDeltaRelation(rel, lockmode); - CheckTableNotInUse(delta_rel, "ALTER TABLE"); - ATExecDropColumn(wqueue, delta_rel, colName, behavior, false, false, true, lockmode); - heap_close(delta_rel, NoLock); - } - } -#endif /* ENABLE_MULTIPLE_NODES */ - - /* - * If we dropped the OID column, must adjust pg_class.relhasoids and tell - * Phase 3 to physically get rid of the column. We formerly left the - * column in place physically, but this caused subtle problems. See - * http://archives.postgresql.org/pgsql-hackers/2009-02/msg00363.php - */ - if (attnum == ObjectIdAttributeNumber) { - Relation class_rel; - Form_pg_class tuple_class; - AlteredTableInfo* tab = NULL; - - class_rel = heap_open(RelationRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(RelationGetRelid(rel))); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for relation %u", RelationGetRelid(rel)))); - } - tuple_class = (Form_pg_class)GETSTRUCT(tuple); - - tuple_class->relhasoids = false; - simple_heap_update(class_rel, &tuple->t_self, tuple); - - /* Keep the catalog indexes up to date */ - CatalogUpdateIndexes(class_rel, tuple); - - heap_close(class_rel, RowExclusiveLock); - - /* Find or create work queue entry for this table */ - tab = ATGetQueueEntry(wqueue, rel); - - /* Tell Phase 3 to physically remove the OID column */ - tab->rewrite |= AT_REWRITE_ALTER_OID; - } - return object; -} - -/** - * Primary key/Unique constraint on cstore does not support deferrable - */ -FORCE_INLINE static void CheckCUConstraint(Relation rel, const IndexStmt* stmt) -{ - if (RelationIsCUFormat(rel) && stmt->isconstraint && (stmt->deferrable || stmt->initdeferred)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column store unsupport DEFERRABLE/INITIALLY DEFERRED on primary key/unique constraint"))); - } -} -/* - * ALTER TABLE ADD INDEX - * - * There is no such command in the grammar, but parse_utilcmd.c converts - * UNIQUE and PRIMARY KEY constraints into AT_AddIndex subcommands. This lets - * us schedule creation of the index at the appropriate time during ALTER. - */ -static ObjectAddress ATExecAddIndex(AlteredTableInfo* tab, Relation rel, IndexStmt* stmt, bool is_rebuild, LOCKMODE lockmode) -{ - bool check_rights = false; - bool skip_build = false; - bool quiet = false; - ObjectAddress address; - - Assert(IsA(stmt, IndexStmt)); - Assert(!stmt->concurrent); - - CheckCUConstraint(rel, stmt); - - /* suppress schema rights check when rebuilding existing index */ - check_rights = !is_rebuild; - /* skip index build if phase 3 will do it or we're reusing an old one */ - skip_build = tab->rewrite > 0 || OidIsValid(stmt->oldNode); - /* suppress notices when rebuilding existing index */ - quiet = is_rebuild; - - /* The IndexStmt has already been through transformIndexStmt */ - WaitState oldStatus = pgstat_report_waitstatus(STATE_CREATE_INDEX); - address = DefineIndex(RelationGetRelid(rel), - stmt, - InvalidOid, /* no predefined OID */ - true, /* is_alter_table */ - check_rights, - skip_build, - quiet, - tab->is_modify_primary); -#ifndef ENABLE_MULTIPLE_NODES - if (RelationIsCUFormat(rel) && (stmt->primary || stmt->unique)) { - DefineDeltaUniqueIndex(RelationGetRelid(rel), stmt, address.objectId); - } -#endif - (void)pgstat_report_waitstatus(oldStatus); - - /* - * If TryReuseIndex() stashed a relfilenode for us, we used it for the new - * index instead of building from scratch. The DROP of the old edition of - * this index will have scheduled the storage for deletion at commit, so - * cancel that pending deletion. - */ - if (OidIsValid(stmt->oldNode)) { - Relation irel = index_open(address.objectId, NoLock); - /* - * For global tmp table, the mapping between relid and RelFileNode are not recorded in pg_class, - * so can not reuse relfilenode. - */ - if (RELATION_IS_GLOBAL_TEMP(irel)) { - index_close(irel, NoLock); - return address; - } - - if (!stmt->isPartitioned || stmt->isGlobal) { - RelationPreserveStorage(irel->rd_node, true); - } else { - List* partOids = NIL; - ListCell* cell = NULL; - Partition partition = NULL; - Oid partOid = InvalidOid; - Oid partIndexOid = InvalidOid; - - partOids = relationGetPartitionOidList(rel); - foreach (cell, partOids) { - partOid = lfirst_oid(cell); - partIndexOid = getPartitionIndexOid(RelationGetRelid(irel), partOid); - - partition = partitionOpen(irel, partIndexOid, NoLock); - RelationPreserveStorage(partition->pd_node, true); - partitionClose(irel, partition, NoLock); - } - releasePartitionOidList(&partOids); - } - - index_close(irel, NoLock); - } - return address; -} - -/* - * ALTER TABLE ADD CONSTRAINT USING INDEX - * - * Returns the address of the new constraint. - */ -static ObjectAddress ATExecAddIndexConstraint(AlteredTableInfo* tab, Relation rel, IndexStmt* stmt, LOCKMODE lockmode) -{ - Oid index_oid = stmt->indexOid; - Relation indexRel; - char* indexName = NULL; - IndexInfo* indexInfo = NULL; - char* constraintName = NULL; - char constraintType; - ObjectAddress address; - - Assert(IsA(stmt, IndexStmt)); - Assert(OidIsValid(index_oid)); - Assert(stmt->isconstraint); - - indexRel = index_open(index_oid, AccessShareLock); - - indexName = pstrdup(RelationGetRelationName(indexRel)); - - indexInfo = BuildIndexInfo(indexRel); - - /* this should have been checked at parse time */ - if (!indexInfo->ii_Unique) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("index \"%s\" is not unique", indexName))); - /* - * Determine name to assign to constraint. We require a constraint to - * have the same name as the underlying index; therefore, use the index's - * existing name as the default constraint name, and if the user - * explicitly gives some other name for the constraint, rename the index - * to match. - */ - constraintName = stmt->idxname; - if (constraintName == NULL) - constraintName = indexName; - else if (strcmp(constraintName, indexName) != 0) { - ereport(NOTICE, - (errmsg("ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"", - indexName, - constraintName))); - RenameRelationInternal(index_oid, constraintName); - } - - /* Extra checks needed if making primary key */ - if (stmt->primary) - index_check_primary_key(rel, indexInfo, true, stmt); - - /* Note we currently don't support EXCLUSION constraints here */ - if (stmt->primary) - constraintType = CONSTRAINT_PRIMARY; - else - constraintType = CONSTRAINT_UNIQUE; - - /* Create the catalog entries for the constraint */ - address = index_constraint_create(rel, - index_oid, - indexInfo, - constraintName, - constraintType, - stmt->deferrable, - stmt->initdeferred, - stmt->primary, - true, /* update pg_index */ - true, /* remove old dependencies */ - (g_instance.attr.attr_common.allowSystemTableMods || u_sess->attr.attr_common.IsInplaceUpgrade)); - /* index constraint */ - CreateNonColumnComment(index_oid, stmt->indexOptions, RelationRelationId); - - index_close(indexRel, NoLock); - return address; -} - -/* - * ALTER TABLE ADD CONSTRAINT - * - * Return value is the address of the new constraint; if no constraint was - * added, InvalidObjectAddress is returned. - */ -static ObjectAddress ATExecAddConstraint(List** wqueue, AlteredTableInfo* tab, Relation rel, Constraint* newConstraint, - bool recurse, bool is_readd, LOCKMODE lockmode) -{ - ObjectAddress address = InvalidObjectAddress; - Assert(IsA(newConstraint, Constraint)); - - if (RelationIsColStore(rel) && !CStoreSupportConstraint(newConstraint)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column store unsupport constraint \"%s\"", GetConstraintType(newConstraint->contype)))); - - if (rel->rd_tam_ops == TableAmUstore && newConstraint->deferrable == true) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmodule(MOD_COMMAND), - errmsg("Ustore table does not support to set deferrable."), - errdetail("N/A"), - errcause("feature not supported"), - erraction("check constraints of columns"))); - } - - /* - * Currently, we only expect to see CONSTR_CHECK and CONSTR_FOREIGN nodes - * arriving here (see the preprocessing done in parse_utilcmd.c). Use a - * switch anyway to make it easier to add more code later. - */ - switch (newConstraint->contype) { - case CONSTR_CHECK: - address = ATAddCheckConstraint(wqueue, tab, rel, newConstraint, recurse, false, is_readd, lockmode); - break; - - case CONSTR_FOREIGN: - - /* - * Note that we currently never recurse for FK constraints, so the - * "recurse" flag is silently ignored. - * - * Assign or validate constraint name - */ - if (newConstraint->conname) { - if (ConstraintNameIsUsed( - CONSTRAINT_RELATION, RelationGetRelid(rel), RelationGetNamespace(rel), newConstraint->conname)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("constraint \"%s\" for relation \"%s\" already exists", - newConstraint->conname, - RelationGetRelationName(rel)))); - } else - newConstraint->conname = ChooseConstraintName(RelationGetRelationName(rel), - strVal(linitial(newConstraint->fk_attrs)), - "fkey", - RelationGetNamespace(rel), - NIL); - - address = ATAddForeignKeyConstraint(tab, rel, newConstraint, lockmode); - break; - case CONSTR_CLUSTER: - if (rel->rd_rel->relhasclusterkey && !is_readd) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("cluster key for relation \"%s\" already exists", RelationGetRelationName(rel)))); - else { - List * tmp = AddRelClusterConstraints(rel, list_make1(newConstraint)); - Assert(tmp != NIL && list_length(tmp) == 1); - address = *((ObjectAddress*)linitial(tmp)); - } - - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized constraint type: %d", (int)newConstraint->contype))); - } - } - return address; -} - -/* - * Add a check constraint to a single table and its children - * - * Subroutine for ATExecAddConstraint. - * - * We must recurse to child tables during execution, rather than using - * ALTER TABLE's normal prep-time recursion. The reason is that all the - * constraints *must* be given the same name, else they won't be seen as - * related later. If the user didn't explicitly specify a name, then - * AddRelationNewConstraints would normally assign different names to the - * child constraints. To fix that, we must capture the name assigned at - * the parent table and pass that down. - * - * When re-adding a previously existing constraint (during ALTER COLUMN TYPE), - * we don't need to recurse here, because recursion will be carried out at a - * higher level; the constraint name issue doesn't apply because the names - * have already been assigned and are just being re-used. We need a separate - * "is_readd" flag for that; just setting recurse=false would result in an - * error if there are child tables. - */ -static ObjectAddress ATAddCheckConstraint(List** wqueue, AlteredTableInfo* tab, Relation rel, Constraint* constr, bool recurse, - bool recursing, bool is_readd, LOCKMODE lockmode) -{ - List* newcons = NIL; - ListCell* lcon = NULL; - List* children = NIL; - ListCell* child = NULL; - ObjectAddress address = InvalidObjectAddress; - - /* At top level, permission check was done in ATPrepCmd, else do it */ - if (recursing) - ATSimplePermissions(rel, ATT_TABLE); - - /* - * Call AddRelationNewConstraints to do the work, making sure it works on - * a copy of the Constraint so transformExpr can't modify the original. It - * returns a list of cooked constraints. - * - * If the constraint ends up getting merged with a pre-existing one, it's - * omitted from the returned list, which is what we want: we do not need - * to do any validation work. That can only happen at child tables, - * though, since we disallow merging at the top level. - */ - newcons = AddRelationNewConstraints(rel, - NIL, - list_make1(copyObject(constr)), - recursing, /* allow_merge */ - !recursing); /* is_local */ - /* we don't expect more than one constraint here */ - Assert(list_length(newcons) <= 1); - - - /* Add each to-be-validated constraint to Phase 3's queue */ - foreach (lcon, newcons) { - CookedConstraint* ccon = (CookedConstraint*)lfirst(lcon); - - if (!ccon->skip_validation) { - NewConstraint* newcon = NULL; - - newcon = (NewConstraint*)palloc0(sizeof(NewConstraint)); - newcon->name = ccon->name; - newcon->contype = ccon->contype; - /* ExecQual wants implicit-AND format */ - newcon->qual = (Node*)make_ands_implicit((Expr*)ccon->expr); - - tab->constraints = lappend(tab->constraints, newcon); - } - - /* Save the actually assigned name if it was defaulted */ - if (constr->conname == NULL) - constr->conname = ccon->name; - - ObjectAddressSet(address, ConstraintRelationId, ccon->conoid); - } - - /* At this point we must have a locked-down name to use */ - Assert(constr->conname != NULL); - - /* Advance command counter in case same table is visited multiple times */ - CommandCounterIncrement(); - - /* - * If the constraint got merged with an existing constraint, we're done. - * We mustn't recurse to child tables in this case, because they've - * already got the constraint, and visiting them again would lead to an - * incorrect value for coninhcount. - */ - if (newcons == NIL) - return address; - - /* - * If adding a NO INHERIT constraint, no need to find our children. - * Likewise, in a re-add operation, we don't need to recurse (that will be - * handled at higher levels). - */ - if (constr->is_no_inherit || is_readd) - return address; - - /* - * Propagate to children as appropriate. Unlike most other ALTER - * routines, we have to do this one level of recursion at a time; we can't - * use find_all_inheritors to do it in one pass. - */ - children = find_inheritance_children(RelationGetRelid(rel), lockmode); - - /* - * Check if ONLY was specified with ALTER TABLE. If so, allow the - * contraint creation only if there are no children currently. Error out - * otherwise. - */ - if (!recurse && children != NIL) - ereport( - ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("constraint must be added to child tables too"))); - - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - Relation childrel; - AlteredTableInfo* childtab = NULL; - - /* find_inheritance_children already got lock */ - childrel = heap_open(childrelid, NoLock); - CheckTableNotInUse(childrel, "ALTER TABLE"); - - /* Find or create work queue entry for this table */ - childtab = ATGetQueueEntry(wqueue, childrel); - - /* Recurse to child */ - ATAddCheckConstraint(wqueue, childtab, childrel, constr, recurse, true, is_readd, lockmode); - - heap_close(childrel, NoLock); - } - return address; -} - -/* - * Add a foreign-key constraint to a single table, return the new constraint's - * address. - * - * Subroutine for ATExecAddConstraint. Must already hold exclusive - * lock on the rel, and have done appropriate validity checks for it. - * We do permissions checks here, however. - */ -static ObjectAddress ATAddForeignKeyConstraint(AlteredTableInfo* tab, Relation rel, Constraint* fkconstraint, LOCKMODE lockmode) -{ - Relation pkrel; - int16 pkattnum[INDEX_MAX_KEYS]; - int16 fkattnum[INDEX_MAX_KEYS]; - Oid pktypoid[INDEX_MAX_KEYS]; - Oid fktypoid[INDEX_MAX_KEYS]; - Oid opclasses[INDEX_MAX_KEYS]; - Oid pfeqoperators[INDEX_MAX_KEYS]; - Oid ppeqoperators[INDEX_MAX_KEYS]; - Oid ffeqoperators[INDEX_MAX_KEYS]; - int i; - int numfks, numpks; - Oid indexOid; - Oid constrOid; - bool old_check_ok = false; - ListCell* old_pfeqop_item = list_head(fkconstraint->old_conpfeqop); - ObjectAddress address; - - /* - * Grab an exclusive lock on the pk table, so that someone doesn't delete - * rows out from under us. (Although a lesser lock would do for that - * purpose, we'll need exclusive lock anyway to add triggers to the pk - * table; trying to start with a lesser lock will just create a risk of - * deadlock.) - */ - if (OidIsValid(fkconstraint->old_pktable_oid)) - pkrel = heap_open(fkconstraint->old_pktable_oid, AccessExclusiveLock); - else - pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock); - - /* - * Validity checks (permission checks wait till we have the column - * numbers) - */ - if (pkrel->rd_rel->relkind != RELKIND_RELATION) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("referenced relation \"%s\" is not a table", RelationGetRelationName(pkrel)))); - - if (!g_instance.attr.attr_common.allowSystemTableMods && !u_sess->attr.attr_common.IsInplaceUpgrade && - IsSystemRelation(pkrel)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", RelationGetRelationName(pkrel)))); - -#ifdef ENABLE_MULTIPLE_NODES - if (RELATION_IS_PARTITIONED(pkrel)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Invalid FOREIGN KEY constraints"), - errdetail("Partitioned table cannot be referenced table"))); -#endif - - /* - * References from permanent or unlogged tables to temp tables, and from - * permanent tables to unlogged tables, are disallowed because the - * referenced data can vanish out from under us. References from temp - * tables to any other table type are also disallowed, because other - * backends might need to run the RI triggers on the perm table, but they - * can't reliably see tuples in the local buffers of other backends. - */ - switch (rel->rd_rel->relpersistence) { - case RELPERSISTENCE_PERMANENT: - if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_PERMANENT) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("constraints on permanent tables may reference only permanent tables"))); - break; - case RELPERSISTENCE_UNLOGGED: - if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_PERMANENT && - pkrel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("constraints on unlogged tables may reference only permanent or unlogged tables"))); - break; - case RELPERSISTENCE_TEMP: - if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_TEMP) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("constraints on temporary tables may reference only temporary tables"))); - if (!RelationIsLocalTemp(pkrel) || !RelationIsLocalTemp(rel)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("constraints on temporary tables must involve temporary tables of this session"))); - break; - case RELPERSISTENCE_GLOBAL_TEMP: - if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_GLOBAL_TEMP) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("constraints on global temporary tables may reference only global temporary tables"))); - } - break; - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized table type: %d", (int)rel->rd_rel->relpersistence))); - } - - /* - * Look up the referencing attributes to make sure they exist, and record - * their attnums and type OIDs. - */ - errno_t rc; - rc = memset_s(pkattnum, sizeof(pkattnum), 0, sizeof(pkattnum)); - securec_check(rc, "", ""); - rc = memset_s(fkattnum, sizeof(fkattnum), 0, sizeof(fkattnum)); - securec_check(rc, "", ""); - rc = memset_s(pktypoid, sizeof(pktypoid), 0, sizeof(pktypoid)); - securec_check(rc, "", ""); - rc = memset_s(fktypoid, sizeof(fktypoid), 0, sizeof(fktypoid)); - securec_check(rc, "", ""); - rc = memset_s(opclasses, sizeof(opclasses), 0, sizeof(opclasses)); - securec_check(rc, "", ""); - rc = memset_s(pfeqoperators, sizeof(pfeqoperators), 0, sizeof(pfeqoperators)); - securec_check(rc, "", ""); - rc = memset_s(ppeqoperators, sizeof(ppeqoperators), 0, sizeof(ppeqoperators)); - securec_check(rc, "", ""); - rc = memset_s(ffeqoperators, sizeof(ffeqoperators), 0, sizeof(ffeqoperators)); - securec_check(rc, "", ""); - - numfks = transformColumnNameList(RelationGetRelid(rel), fkconstraint->fk_attrs, fkattnum, fktypoid); - - /* - * If the attribute list for the referenced table was omitted, lookup the - * definition of the primary key and use it. Otherwise, validate the - * supplied attribute list. In either case, discover the index OID and - * index opclasses, and the attnums and type OIDs of the attributes. - */ - if (fkconstraint->pk_attrs == NIL) { - numpks = transformFkeyGetPrimaryKey(pkrel, &indexOid, &fkconstraint->pk_attrs, pkattnum, pktypoid, opclasses); - } else { - numpks = transformColumnNameList(RelationGetRelid(pkrel), fkconstraint->pk_attrs, pkattnum, pktypoid); - /* Look for an index matching the column list */ - indexOid = transformFkeyCheckAttrs(pkrel, numpks, pkattnum, opclasses); - } - - /* - * Now we can check permissions. - */ - checkFkeyPermissions(pkrel, pkattnum, numpks); - checkFkeyPermissions(rel, fkattnum, numfks); - - /* - * Check some things for generated columns. - */ - for (i = 0; i < numfks; i++) - { - char generated = GetGeneratedCol(RelationGetDescr(rel), fkattnum[i] - 1); - - if (generated) - { - /* - * Check restrictions on UPDATE/DELETE actions, per SQL standard - */ - if (fkconstraint->fk_upd_action == FKCONSTR_ACTION_SETNULL || - fkconstraint->fk_upd_action == FKCONSTR_ACTION_SETDEFAULT || - fkconstraint->fk_upd_action == FKCONSTR_ACTION_CASCADE) - ereport(ERROR, - (errmodule(MOD_GEN_COL), errcode(ERRCODE_SYNTAX_ERROR), - errmsg("invalid %s action for foreign key constraint containing generated column", - "ON UPDATE"))); - if (fkconstraint->fk_del_action == FKCONSTR_ACTION_SETNULL || - fkconstraint->fk_del_action == FKCONSTR_ACTION_SETDEFAULT) - ereport(ERROR, - (errmodule(MOD_GEN_COL), errcode(ERRCODE_SYNTAX_ERROR), - errmsg("invalid %s action for foreign key constraint containing generated column", - "ON DELETE"))); - } - } - - /* - * Look up the equality operators to use in the constraint. - * - * Note that we have to be careful about the difference between the actual - * PK column type and the opclass' declared input type, which might be - * only binary-compatible with it. The declared opcintype is the right - * thing to probe pg_amop with. - */ - if (numfks != numpks) - ereport(ERROR, - (errcode(ERRCODE_INVALID_FOREIGN_KEY), - errmsg("number of referencing and referenced columns for foreign key disagree"))); - - /* - * On the strength of a previous constraint, we might avoid scanning - * tables to validate this one. See below. - */ - old_check_ok = (fkconstraint->old_conpfeqop != NIL); - Assert(!old_check_ok || numfks == list_length(fkconstraint->old_conpfeqop)); - - for (i = 0; i < numpks; i++) { - Oid pktype = pktypoid[i]; - Oid fktype = fktypoid[i]; - Oid fktyped; - HeapTuple cla_ht; - Form_pg_opclass cla_tup; - Oid amid; - Oid opfamily; - Oid opcintype; - Oid pfeqop; - Oid ppeqop; - Oid ffeqop; - int16 eqstrategy; - Oid pfeqop_right; - - /* We need several fields out of the pg_opclass entry */ - cla_ht = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclasses[i])); - if (!HeapTupleIsValid(cla_ht)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for opclass %u", opclasses[i]))); - } - cla_tup = (Form_pg_opclass)GETSTRUCT(cla_ht); - amid = cla_tup->opcmethod; - opfamily = cla_tup->opcfamily; - opcintype = cla_tup->opcintype; - ReleaseSysCache(cla_ht); - - /* - * Check it's a btree; currently this can never fail since no other - * index AMs support unique indexes. If we ever did have other types - * of unique indexes, we'd need a way to determine which operator - * strategy number is equality. (Is it reasonable to insist that - * every such index AM use btree's number for equality?) - */ - if (!OID_IS_BTREE(amid)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("only b-tree indexes are supported for foreign keys"))); - eqstrategy = BTEqualStrategyNumber; - - /* - * There had better be a primary equality operator for the index. - * We'll use it for PK = PK comparisons. - */ - ppeqop = get_opfamily_member(opfamily, opcintype, opcintype, eqstrategy); - - if (!OidIsValid(ppeqop)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("missing operator %d(%u,%u) in opfamily %u", eqstrategy, opcintype, opcintype, opfamily))); - /* - * Are there equality operators that take exactly the FK type? Assume - * we should look through any domain here. - */ - fktyped = getBaseType(fktype); - - pfeqop = get_opfamily_member(opfamily, opcintype, fktyped, eqstrategy); - if (OidIsValid(pfeqop)) { - pfeqop_right = fktyped; - ffeqop = get_opfamily_member(opfamily, fktyped, fktyped, eqstrategy); - } else { - /* keep compiler quiet */ - pfeqop_right = InvalidOid; - ffeqop = InvalidOid; - } - - if (!(OidIsValid(pfeqop) && OidIsValid(ffeqop))) { - /* - * Otherwise, look for an implicit cast from the FK type to the - * opcintype, and if found, use the primary equality operator. - * This is a bit tricky because opcintype might be a polymorphic - * type such as ANYARRAY or ANYENUM; so what we have to test is - * whether the two actual column types can be concurrently cast to - * that type. (Otherwise, we'd fail to reject combinations such - * as int[] and point[].) - */ - Oid input_typeids[2]; - Oid target_typeids[2]; - - input_typeids[0] = pktype; - input_typeids[1] = fktype; - target_typeids[0] = opcintype; - target_typeids[1] = opcintype; - if (can_coerce_type(2, input_typeids, target_typeids, COERCION_IMPLICIT)) { - pfeqop = ffeqop = ppeqop; - pfeqop_right = opcintype; - } - } - - if (!(OidIsValid(pfeqop) && OidIsValid(ffeqop))) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("foreign key constraint \"%s\" " - "cannot be implemented", - fkconstraint->conname), - errdetail("Key columns \"%s\" and \"%s\" " - "are of incompatible types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), - format_type_be(fktype), - format_type_be(pktype)))); - - if (old_check_ok) { - /* - * When a pfeqop changes, revalidate the constraint. We could - * permit intra-opfamily changes, but that adds subtle complexity - * without any concrete benefit for core types. We need not - * assess ppeqop or ffeqop, which RI_Initial_Check() does not use. - */ - old_check_ok = (pfeqop == lfirst_oid(old_pfeqop_item)); - old_pfeqop_item = lnext(old_pfeqop_item); - } - if (old_check_ok) { - Oid old_fktype; - Oid new_fktype; - CoercionPathType old_pathtype; - CoercionPathType new_pathtype; - Oid old_castfunc; - Oid new_castfunc; - - /* - * Identify coercion pathways from each of the old and new FK-side - * column types to the right (foreign) operand type of the pfeqop. - * We may assume that pg_constraint.conkey is not changing. - */ - old_fktype = tab->oldDesc->attrs[fkattnum[i] - 1].atttypid; - new_fktype = fktype; - old_pathtype = findFkeyCast(pfeqop_right, old_fktype, &old_castfunc); - new_pathtype = findFkeyCast(pfeqop_right, new_fktype, &new_castfunc); - - /* - * Upon a change to the cast from the FK column to its pfeqop - * operand, revalidate the constraint. For this evaluation, a - * binary coercion cast is equivalent to no cast at all. While - * type implementors should design implicit casts with an eye - * toward consistency of operations like equality, we cannot - * assume here that they have done so. - * - * A function with a polymorphic argument could change behavior - * arbitrarily in response to get_fn_expr_argtype(). Therefore, - * when the cast destination is polymorphic, we only avoid - * revalidation if the input type has not changed at all. Given - * just the core data types and operator classes, this requirement - * prevents no would-be optimizations. - * - * If the cast converts from a base type to a domain thereon, then - * that domain type must be the opcintype of the unique index. - * Necessarily, the primary key column must then be of the domain - * type. Since the constraint was previously valid, all values on - * the foreign side necessarily exist on the primary side and in - * turn conform to the domain. Consequently, we need not treat - * domains specially here. - * - * Since we require that all collations share the same notion of - * equality (which they do, because texteq reduces to bitwise - * equality), we don't compare collation here. - * - * We need not directly consider the PK type. It's necessarily - * binary coercible to the opcintype of the unique index column, - * and ri_triggers.c will only deal with PK datums in terms of - * that opcintype. Changing the opcintype also changes pfeqop. - */ - old_check_ok = (new_pathtype == old_pathtype && new_castfunc == old_castfunc && - (!IsPolymorphicType(pfeqop_right) || new_fktype == old_fktype)); - } - - pfeqoperators[i] = pfeqop; - ppeqoperators[i] = ppeqop; - ffeqoperators[i] = ffeqop; - } - -#ifdef PGXC - /* Check the shippability of this foreign key */ - if (IS_PGXC_COORDINATOR) { - List* childRefs = NIL; - List* parentRefs = NIL; - - /* Prepare call for shippability check */ - for (i = 0; i < numfks; i++) - childRefs = lappend_int(childRefs, fkattnum[i]); - for (i = 0; i < numpks; i++) - parentRefs = lappend_int(parentRefs, pkattnum[i]); - - /* Now check shippability for this foreign key */ - if (!pgxc_check_fk_shippability(GetRelationLocInfo(RelationGetRelid(pkrel)), - GetRelationLocInfo(RelationGetRelid(rel)), - parentRefs, - childRefs)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Cannot create foreign key whose evaluation cannot be enforced to remote nodes"))); - } -#endif - - /* - * Record the FK constraint in pg_constraint. - */ - constrOid = CreateConstraintEntry(fkconstraint->conname, - RelationGetNamespace(rel), - CONSTRAINT_FOREIGN, - fkconstraint->deferrable, - fkconstraint->initdeferred, - fkconstraint->initially_valid, - RelationGetRelid(rel), - fkattnum, - numfks, - numfks, - InvalidOid, /* not a domain - * constraint */ - indexOid, - RelationGetRelid(pkrel), - pkattnum, - pfeqoperators, - ppeqoperators, - ffeqoperators, - numpks, - fkconstraint->fk_upd_action, - fkconstraint->fk_del_action, - fkconstraint->fk_matchtype, - NULL, /* no exclusion constraint */ - NULL, /* no check constraint */ - NULL, - NULL, - true, /* islocal */ - 0, /* inhcount */ - true, /* isnoinherit */ - fkconstraint->inforConstraint); /* @hdfs informational constraint */ - - ObjectAddressSet(address, ConstraintRelationId, constrOid); - /* foreign key constraint */ - CreateNonColumnComment(constrOid, fkconstraint->constraintOptions, ConstraintRelationId); - - /* - * Create the triggers that will enforce the constraint. - */ - createForeignKeyTriggers(rel, RelationGetRelid(pkrel), fkconstraint, constrOid, indexOid); - - /* - * Tell Phase 3 to check that the constraint is satisfied by existing - * rows. We can skip this during table creation, when requested explicitly - * by specifying NOT VALID in an ADD FOREIGN KEY command, and when we're - * recreating a constraint following a SET DATA TYPE operation that did - * not impugn its validity. - */ - if (!old_check_ok && !fkconstraint->skip_validation) { - NewConstraint* newcon = (NewConstraint*)palloc0(sizeof(NewConstraint)); - newcon->name = fkconstraint->conname; - newcon->contype = CONSTR_FOREIGN; - newcon->refrelid = RelationGetRelid(pkrel); - newcon->refindid = indexOid; - newcon->conid = constrOid; - newcon->qual = (Node*)fkconstraint; - - tab->constraints = lappend(tab->constraints, newcon); - } - - /* - * Close pk table, but keep lock until we've committed. - */ - heap_close(pkrel, NoLock); - return address; -} - -/* - * ALTER TABLE VALIDATE CONSTRAINT - * - * XXX The reason we handle recursion here rather than at Phase 1 is because - * there's no good way to skip recursing when handling foreign keys: there is - * no need to lock children in that case, yet we wouldn't be able to avoid - * doing so at that level. - * - * Return value is the address of the validated constraint. If the constraint - * was already validated, InvalidObjectAddress is returned. - */ -static ObjectAddress ATExecValidateConstraint(Relation rel, char* constrName, bool recurse, bool recursing, LOCKMODE lockmode) -{ - Relation conrel; - SysScanDesc scan; - ScanKeyData key; - HeapTuple tuple; - Form_pg_constraint con = NULL; - bool found = false; - ObjectAddress address; - - conrel = heap_open(ConstraintRelationId, RowExclusiveLock); - - /* - * Find and check the target constraint - */ - ScanKeyInit( - &key, Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, true, NULL, 1, &key); - - while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - con = (Form_pg_constraint)GETSTRUCT(tuple); - if (strcmp(NameStr(con->conname), constrName) == 0) { - found = true; - break; - } - } - - if (!found) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg( - "constraint \"%s\" of relation \"%s\" does not exist", constrName, RelationGetRelationName(rel)))); - - if (con->contype != CONSTRAINT_FOREIGN && con->contype != CONSTRAINT_CHECK) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("constraint \"%s\" of relation \"%s\" is not a foreign key or check constraint", - constrName, - RelationGetRelationName(rel)))); - - if (!con->convalidated) { - HeapTuple copyTuple; - Form_pg_constraint copy_con; - - if (con->contype == CONSTRAINT_FOREIGN) { - Relation refrel; - - /* - * Triggers are already in place on both tables, so a concurrent - * write that alters the result here is not possible. Normally we - * can run a query here to do the validation, which would only - * require AccessShareLock. In some cases, it is possible that we - * might need to fire triggers to perform the check, so we take a - * lock at RowShareLock level just in case. - */ - refrel = heap_open(con->confrelid, RowShareLock); - - validateForeignKeyConstraint(constrName, rel, refrel, con->conindid, HeapTupleGetOid(tuple)); - heap_close(refrel, NoLock); - - /* - * Foreign keys do not inherit, so we purposely ignore the - * recursion bit here - */ - } else if (con->contype == CONSTRAINT_CHECK) { - List* children = NIL; - ListCell* child = NULL; - - /* - * If we're recursing, the parent has already done this, so skip - * it. - */ - if (!recursing) - children = find_all_inheritors(RelationGetRelid(rel), lockmode, NULL); - - /* - * For CHECK constraints, we must ensure that we only mark the - * constraint as validated on the parent if it's already validated - * on the children. - * - * We recurse before validating on the parent, to reduce risk of - * deadlocks. - */ - foreach (child, children) { - Oid childoid = lfirst_oid(child); - Relation childrel; - - if (childoid == RelationGetRelid(rel)) - continue; - - /* - * If we are told not to recurse, there had better not be any - * child tables; else the addition would put them out of step. - */ - if (!recurse) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("constraint must be validated on child tables too"))); - - /* find_all_inheritors already got lock */ - childrel = heap_open(childoid, NoLock); - - ATExecValidateConstraint(childrel, constrName, false, true, lockmode); - heap_close(childrel, NoLock); - } - - if (!RelationIsPartitioned(rel)) { - if (RELATION_CREATE_BUCKET(rel)) { - /* validate constraint for every buckets */ - validateCheckConstraintForBucket(rel, NULL, tuple); - } else { - validateCheckConstraint(rel, tuple); - } - } else if (RelationIsSubPartitioned(rel)) { - List* partitions = NIL; - ListCell* cell = NULL; - Partition partition = NULL; - Relation partRel = NULL; - - partitions = relationGetPartitionList(rel, lockmode); - foreach (cell, partitions) { - partition = (Partition)lfirst(cell); - partRel = partitionGetRelation(rel, partition); - - List *subpartitions = relationGetPartitionList(partRel, lockmode); - ListCell *subcell = NULL; - foreach (subcell, subpartitions) { - Partition subpartition = (Partition)lfirst(subcell); - if (RELATION_OWN_BUCKETKEY(rel)) { - /* validate constraint for every buckets */ - validateCheckConstraintForBucket(partRel, subpartition, tuple); - } else { - Relation subpartRel = partitionGetRelation(partRel, subpartition); - - validateCheckConstraint(subpartRel, tuple); - - releaseDummyRelation(&subpartRel); - } - } - releasePartitionList(partRel, &subpartitions, lockmode); - releaseDummyRelation(&partRel); - } - releasePartitionList(rel, &partitions, lockmode); - } else { - List* partitions = NIL; - ListCell* cell = NULL; - Partition partition = NULL; - Relation partRel = NULL; - - partitions = relationGetPartitionList(rel, lockmode); - foreach (cell, partitions) { - partition = (Partition)lfirst(cell); - if (RELATION_OWN_BUCKETKEY(rel)) { - /* validate constraint for every buckets */ - validateCheckConstraintForBucket(rel, partition, tuple); - } else { - partRel = partitionGetRelation(rel, partition); - - validateCheckConstraint(partRel, tuple); - - releaseDummyRelation(&partRel); - } - } - releasePartitionList(rel, &partitions, lockmode); - } - - /* - * Invalidate relcache so that others see the new validated - * constraint. - */ - CacheInvalidateRelcache(rel); - } - - /* - * Now update the catalog, while we have the door open. - */ - copyTuple = (HeapTuple) tableam_tops_copy_tuple(tuple); - copy_con = (Form_pg_constraint)GETSTRUCT(copyTuple); - copy_con->convalidated = true; - simple_heap_update(conrel, ©Tuple->t_self, copyTuple); - CatalogUpdateIndexes(conrel, copyTuple); - tableam_tops_free_tuple(copyTuple); - ObjectAddressSet(address, ConstraintRelationId, - HeapTupleGetOid(tuple)); - } else - address = InvalidObjectAddress; /* already validated */ - - systable_endscan(scan); - - heap_close(conrel, RowExclusiveLock); - return address; -} - -/* - * transformColumnNameList - transform list of column names - * - * Lookup each name and return its attnum and type OID - */ -static int transformColumnNameList(Oid relId, List* colList, int16* attnums, Oid* atttypids) -{ - ListCell* l = NULL; - int attnum; - - attnum = 0; - foreach (l, colList) { - char* attname = strVal(lfirst(l)); - HeapTuple atttuple; - - atttuple = SearchSysCacheAttName(relId, attname); - if (!HeapTupleIsValid(atttuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" referenced in foreign key constraint does not exist", attname))); - if (attnum >= INDEX_MAX_KEYS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_COLUMNS), - errmsg("cannot have more than %d keys in a foreign key", INDEX_MAX_KEYS))); - attnums[attnum] = ((Form_pg_attribute)GETSTRUCT(atttuple))->attnum; - atttypids[attnum] = ((Form_pg_attribute)GETSTRUCT(atttuple))->atttypid; - ReleaseSysCache(atttuple); - attnum++; - } - - return attnum; -} - -/* - * transformFkeyGetPrimaryKey - - * - * Look up the names, attnums, and types of the primary key attributes - * for the pkrel. Also return the index OID and index opclasses of the - * index supporting the primary key. - * - * All parameters except pkrel are output parameters. Also, the function - * return value is the number of attributes in the primary key. - * - * Used when the column list in the REFERENCES specification is omitted. - */ -static int transformFkeyGetPrimaryKey( - Relation pkrel, Oid* indexOid, List** attnamelist, int16* attnums, Oid* atttypids, Oid* opclasses) -{ - List* indexoidlist = NIL; - ListCell* indexoidscan = NULL; - HeapTuple indexTuple = NULL; - Form_pg_index indexStruct = NULL; - Datum indclassDatum; - bool isnull = false; - oidvector* indclass = NULL; - int indnkeyatts = 0; - int i; - - /* - * Get the list of index OIDs for the table from the relcache, and look up - * each one in the pg_index syscache until we find one marked primary key - * (hopefully there isn't more than one such). Insist it's valid, too. - */ - *indexOid = InvalidOid; - - indexoidlist = RelationGetIndexList(pkrel); - - foreach (indexoidscan, indexoidlist) { - Oid indexoid = lfirst_oid(indexoidscan); - - indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid)); - if (!HeapTupleIsValid(indexTuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", indexoid))); - } - - indexStruct = (Form_pg_index)GETSTRUCT(indexTuple); - indnkeyatts = GetIndexKeyAttsByTuple(NULL, indexTuple); - if (indexStruct->indisprimary && IndexIsValid(indexStruct)) { - /* - * Refuse to use a deferrable primary key. This is per SQL spec, - * and there would be a lot of interesting semantic problems if we - * tried to allow it. - */ - if (!indexStruct->indimmediate) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot use a deferrable primary key for referenced table \"%s\"", - RelationGetRelationName(pkrel)))); - - *indexOid = indexoid; - break; - } - ReleaseSysCache(indexTuple); - } - - list_free_ext(indexoidlist); - - /* - * Check that we found it - */ - if (!OidIsValid(*indexOid)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("there is no primary key for referenced table \"%s\"", RelationGetRelationName(pkrel)))); - - /* Must get indclass the hard way */ - indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple, Anum_pg_index_indclass, &isnull); - Assert(!isnull); - indclass = (oidvector*)DatumGetPointer(indclassDatum); - - /* - * Now build the list of PK attributes from the indkey definition (we - * assume a primary key cannot have expressional elements) - */ - *attnamelist = NIL; - for (i = 0; i < indnkeyatts; i++) { - int pkattno = indexStruct->indkey.values[i]; - - attnums[i] = pkattno; - atttypids[i] = attnumTypeId(pkrel, pkattno); - opclasses[i] = indclass->values[i]; - *attnamelist = lappend(*attnamelist, makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno))))); - } - - ReleaseSysCache(indexTuple); - - return i; -} - -/* - * transformFkeyCheckAttrs - - * - * Make sure that the attributes of a referenced table belong to a unique - * (or primary key) constraint. Return the OID of the index supporting - * the constraint, as well as the opclasses associated with the index - * columns. - */ -static Oid transformFkeyCheckAttrs(Relation pkrel, int numattrs, int16* attnums, Oid* opclasses) /* output parameter */ -{ - Oid indexoid = InvalidOid; - bool found = false; - bool found_deferrable = false; - List* indexoidlist = NIL; - ListCell* indexoidscan = NULL; - - /* - * Get the list of index OIDs for the table from the relcache, and look up - * each one in the pg_index syscache, and match unique indexes to the list - * of attnums we are given. - */ - indexoidlist = RelationGetIndexList(pkrel); - - foreach (indexoidscan, indexoidlist) { - HeapTuple indexTuple; - Form_pg_index indexStruct; - int indnkeyatts; - int i, j; - - indexoid = lfirst_oid(indexoidscan); - indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid)); - if (!HeapTupleIsValid(indexTuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", indexoid))); - } - indexStruct = (Form_pg_index)GETSTRUCT(indexTuple); - indnkeyatts = GetIndexKeyAttsByTuple(NULL, indexTuple); - - /* - * Must have the right number of columns; must be unique and not a - * partial index; forget it if there are any expressions, too. Invalid - * indexes are out as well. - */ - if (indnkeyatts == numattrs && indexStruct->indisunique && IndexIsValid(indexStruct) && - tableam_tops_tuple_attisnull(indexTuple, Anum_pg_index_indpred, NULL) && - tableam_tops_tuple_attisnull(indexTuple, Anum_pg_index_indexprs, NULL)) { - /* Must get indclass the hard way */ - Datum indclassDatum; - bool isnull = false; - oidvector* indclass = NULL; - - indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple, Anum_pg_index_indclass, &isnull); - Assert(!isnull); - indclass = (oidvector*)DatumGetPointer(indclassDatum); - - /* - * The given attnum list may match the index columns in any order. - * Check that each list is a subset of the other. - */ - for (i = 0; i < numattrs; i++) { - found = false; - for (j = 0; j < numattrs; j++) { - if (attnums[i] == indexStruct->indkey.values[j]) { - found = true; - break; - } - } - if (!found) - break; - } - if (found) { - for (i = 0; i < numattrs; i++) { - found = false; - for (j = 0; j < numattrs; j++) { - if (attnums[j] == indexStruct->indkey.values[i]) { - opclasses[j] = indclass->values[i]; - found = true; - break; - } - } - if (!found) - break; - } - } - - /* - * Refuse to use a deferrable unique/primary key. This is per SQL - * spec, and there would be a lot of interesting semantic problems - * if we tried to allow it. - */ - if (found && !indexStruct->indimmediate) { - /* - * Remember that we found an otherwise matching index, so that - * we can generate a more appropriate error message. - */ - found_deferrable = true; - found = false; - } - } - ReleaseSysCache(indexTuple); - if (found) - break; - } - - if (!found) { - if (found_deferrable) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot use a deferrable unique constraint for referenced table \"%s\"", - RelationGetRelationName(pkrel)))); - else - ereport(ERROR, - (errcode(ERRCODE_INVALID_FOREIGN_KEY), - errmsg("there is no unique constraint matching given keys for referenced table \"%s\"", - RelationGetRelationName(pkrel)))); - } - - list_free_ext(indexoidlist); - - return indexoid; -} - -/* - * findFkeyCast - - * - * Wrapper around find_coercion_pathway() for ATAddForeignKeyConstraint(). - * Caller has equal regard for binary coercibility and for an exact match. - */ -static CoercionPathType findFkeyCast(Oid targetTypeId, Oid sourceTypeId, Oid* funcid) -{ - CoercionPathType ret; - - if (targetTypeId == sourceTypeId) { - ret = COERCION_PATH_RELABELTYPE; - *funcid = InvalidOid; - } else { - ret = find_coercion_pathway(targetTypeId, sourceTypeId, COERCION_IMPLICIT, funcid); - if (ret == COERCION_PATH_NONE) - /* A previously-relied-upon cast is now gone. */ - ereport(ERROR, - (errcode(ERRCODE_INVALID_CHARACTER_VALUE_FOR_CAST), - errmsg("could not find cast from %s to %s", format_type_be(sourceTypeId), format_type_be(targetTypeId)))); - } - - return ret; -} - -/* Permissions checks for ADD FOREIGN KEY */ -static void checkFkeyPermissions(Relation rel, int16* attnums, int natts) -{ - Oid roleid = GetUserId(); - AclResult aclresult; - int i; - - /* Okay if we have relation-level REFERENCES permission */ - aclresult = pg_class_aclcheck(RelationGetRelid(rel), roleid, ACL_REFERENCES); - if (aclresult == ACLCHECK_OK) - return; - /* Else we must have REFERENCES on each column */ - for (i = 0; i < natts; i++) { - aclresult = pg_attribute_aclcheck(RelationGetRelid(rel), attnums[i], roleid, ACL_REFERENCES); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, RelationGetRelationName(rel)); - } -} - -/* - * Scan the existing rows in a table to verify they meet a proposed - * CHECK constraint. - * - * The caller must have opened and locked the relation appropriately. - */ -static void validateCheckConstraint(Relation rel, HeapTuple constrtup) -{ - Datum val; - HeapTuple tuple = NULL; - bool isnull = false; - MemoryContext oldcxt; - ListCell* lc = NULL; - - Form_pg_constraint constrForm = (Form_pg_constraint)GETSTRUCT(constrtup); - EState* estate = CreateExecutorState(); - - /* - * XXX this tuple doesn't really come from a syscache, but this doesn't - * matter to SysCacheGetAttr, because it only wants to be able to fetch - * the tupdesc - */ - val = SysCacheGetAttr(CONSTROID, constrtup, Anum_pg_constraint_conbin, &isnull); - if (isnull) - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("null conbin for constraint %u", HeapTupleGetOid(constrtup)))); - char* conbin = TextDatumGetCString(val); - Expr* origexpr = (Expr*)stringToNode(conbin); - List* exprstate = ExecPrepareExprList(make_ands_implicit(origexpr), estate); - ExprContext* econtext = GetPerTupleExprContext(estate); - TupleDesc tupdesc = RelationGetDescr(rel); - TupleTableSlot* slot = MakeSingleTupleTableSlot(tupdesc, false, rel->rd_tam_ops); - - econtext->ecxt_scantuple = slot; - - TableScanDesc scan = tableam_scan_begin(rel, SnapshotNow, 0, NULL); - - while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - (void)ExecStoreTuple(tuple, slot, InvalidBuffer, false); - - /* - * Switch to per-tuple memory context and reset it for each tuple - * produced, so we don't leak memory. - */ - oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - if (estate->es_is_flt_frame){ - foreach (lc, exprstate) { - ExprState* exprState = (ExprState*)lfirst(lc); - - if (!ExecCheckByFlatten(exprState, econtext)) - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", NameStr(constrForm->conname)))); - } - } else { - if (!ExecQualByRecursion(exprstate, econtext, true)){ - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("check constraint \"%s\" is violated by some row", NameStr(constrForm->conname)))); - } - } - - ResetExprContext(econtext); - MemoryContextSwitchTo(oldcxt); - } - - tableam_scan_end(scan); - ExecDropSingleTupleTableSlot(slot); - FreeExecutorState(estate); -} - -/* - * Validate check constraint for a list of bucket. - * - * The caller must have opened and locked the relation and(or) partition appropriately. - */ -static void validateCheckConstraintForBucket(Relation rel, Partition part, HeapTuple constrtup) -{ - Relation bucketRel = NULL; - oidvector* bucketlist = searchHashBucketByOid(rel->rd_bucketoid); - - for (int i = 0; i < bucketlist->dim1; i++) { - /* Open the bucket and do the real validate */ - bucketRel = bucketGetRelation(rel, part, bucketlist->values[i]); - - validateCheckConstraint(bucketRel, constrtup); - - bucketCloseRelation(bucketRel); - } -} - -/* - * Scan the existing rows in a table to verify they meet a proposed FK - * constraint. - * - * Caller must have opened and locked both relations appropriately. - */ -static void validateForeignKeyConstraint(char* conname, Relation rel, Relation pkrel, Oid pkindOid, Oid constraintOid) -{ - TableScanDesc scan; - HeapTuple tuple; - Trigger trig; - errno_t rc = EOK; - - ereport(DEBUG1, (errmsg("validating foreign key constraint \"%s\"", conname))); - - /* - * Build a trigger call structure; we'll need it either way. - */ - rc = memset_s(&trig, sizeof(trig), 0, sizeof(trig)); - securec_check(rc, "\0", "\0"); - trig.tgoid = InvalidOid; - trig.tgname = conname; - trig.tgenabled = TRIGGER_FIRES_ON_ORIGIN; - trig.tgisinternal = TRUE; - trig.tgconstrrelid = RelationGetRelid(pkrel); - trig.tgconstrindid = pkindOid; - trig.tgconstraint = constraintOid; - trig.tgdeferrable = FALSE; - trig.tginitdeferred = FALSE; - /* we needn't fill in tgargs or tgqual */ - /* - * See if we can do it with a single LEFT JOIN query. A FALSE result - * indicates we must proceed with the fire-the-trigger method. - */ - if (RI_Initial_Check(&trig, rel, pkrel)) - return; - - /* - * Scan through each tuple, calling RI_FKey_check_ins (insert trigger) as - * if that tuple had just been inserted. If any of those fail, it should - * ereport(ERROR) and that's that. - */ - scan = tableam_scan_begin(rel, SnapshotNow, 0, NULL); - - while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - FunctionCallInfoData fcinfo; - TriggerData trigdata; - - /* - * Make a call to the trigger function - * - * No parameters are passed, but we do set a context - */ - rc = memset_s(&fcinfo, sizeof(fcinfo), 0, sizeof(fcinfo)); - securec_check(rc, "\0", "\0"); - rc = memset_s(&trigdata, sizeof(trigdata), 0, sizeof(trigdata)); - securec_check(rc, "\0", "\0"); - /* - * We assume RI_FKey_check_ins won't look at flinfo... - */ - trigdata.type = T_TriggerData; - trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW; - trigdata.tg_relation = rel; - trigdata.tg_trigtuple = tuple; - trigdata.tg_newtuple = NULL; - trigdata.tg_trigger = &trig; - trigdata.tg_trigtuplebuf = scan->rs_cbuf; - trigdata.tg_newtuplebuf = InvalidBuffer; - - fcinfo.context = (Node*)&trigdata; - - RI_FKey_check_ins(&fcinfo); - } - - tableam_scan_end(scan); -} - -static void CreateFKCheckTrigger( - Oid myRelOid, Oid refRelOid, Constraint* fkconstraint, Oid constraintOid, Oid indexOid, bool on_insert) -{ - CreateTrigStmt* fk_trigger = NULL; - - /* - * Note: for a self-referential FK (referencing and referenced tables are - * the same), it is important that the ON UPDATE action fires before the - * CHECK action, since both triggers will fire on the same row during an - * UPDATE event; otherwise the CHECK trigger will be checking a non-final - * state of the row. Triggers fire in name order, so we ensure this by - * using names like "RI_ConstraintTrigger_a_NNNN" for the action triggers - * and "RI_ConstraintTrigger_c_NNNN" for the check triggers. - */ - fk_trigger = makeNode(CreateTrigStmt); - fk_trigger->trigname = "RI_ConstraintTrigger_c"; - fk_trigger->relation = NULL; - fk_trigger->row = true; - fk_trigger->timing = TRIGGER_TYPE_AFTER; - - /* Either ON INSERT or ON UPDATE */ - if (on_insert) { - fk_trigger->funcname = SystemFuncName("RI_FKey_check_ins"); - fk_trigger->events = TRIGGER_TYPE_INSERT; - } else { - fk_trigger->funcname = SystemFuncName("RI_FKey_check_upd"); - fk_trigger->events = TRIGGER_TYPE_UPDATE; - } - - fk_trigger->columns = NIL; - fk_trigger->whenClause = NULL; - fk_trigger->isconstraint = true; - fk_trigger->deferrable = fkconstraint->deferrable; - fk_trigger->initdeferred = fkconstraint->initdeferred; - fk_trigger->constrrel = NULL; - fk_trigger->args = NIL; - - (void)CreateTrigger(fk_trigger, NULL, myRelOid, refRelOid, constraintOid, indexOid, true); - - /* Make changes-so-far visible */ - CommandCounterIncrement(); -} - -/* - * Create the triggers that implement an FK constraint. - */ -static void createForeignKeyTriggers( - Relation rel, Oid refRelOid, Constraint* fkconstraint, Oid constraintOid, Oid indexOid) -{ - Oid myRelOid; - CreateTrigStmt* fk_trigger = NULL; - - myRelOid = RelationGetRelid(rel); - - /* Make changes-so-far visible */ - CommandCounterIncrement(); - - /* - * Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON - * DELETE action on the referenced table. - */ - fk_trigger = makeNode(CreateTrigStmt); - fk_trigger->trigname = "RI_ConstraintTrigger_a"; - fk_trigger->relation = NULL; - fk_trigger->row = true; - fk_trigger->timing = TRIGGER_TYPE_AFTER; - fk_trigger->events = TRIGGER_TYPE_DELETE; - fk_trigger->columns = NIL; - fk_trigger->whenClause = NULL; - fk_trigger->isconstraint = true; - fk_trigger->constrrel = NULL; - switch (fkconstraint->fk_del_action) { - case FKCONSTR_ACTION_NOACTION: - fk_trigger->deferrable = fkconstraint->deferrable; - fk_trigger->initdeferred = fkconstraint->initdeferred; - fk_trigger->funcname = SystemFuncName("RI_FKey_noaction_del"); - break; - case FKCONSTR_ACTION_RESTRICT: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_restrict_del"); - break; - case FKCONSTR_ACTION_CASCADE: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_cascade_del"); - break; - case FKCONSTR_ACTION_SETNULL: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_setnull_del"); - break; - case FKCONSTR_ACTION_SETDEFAULT: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_setdefault_del"); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized FK action type: %d", (int)fkconstraint->fk_del_action))); - break; - } - fk_trigger->args = NIL; - - (void)CreateTrigger(fk_trigger, NULL, refRelOid, myRelOid, constraintOid, indexOid, true); - - /* Make changes-so-far visible */ - CommandCounterIncrement(); - - /* - * Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON - * UPDATE action on the referenced table. - */ - fk_trigger = makeNode(CreateTrigStmt); - fk_trigger->trigname = "RI_ConstraintTrigger_a"; - fk_trigger->relation = NULL; - fk_trigger->row = true; - fk_trigger->timing = TRIGGER_TYPE_AFTER; - fk_trigger->events = TRIGGER_TYPE_UPDATE; - fk_trigger->columns = NIL; - fk_trigger->whenClause = NULL; - fk_trigger->isconstraint = true; - fk_trigger->constrrel = NULL; - switch (fkconstraint->fk_upd_action) { - case FKCONSTR_ACTION_NOACTION: - fk_trigger->deferrable = fkconstraint->deferrable; - fk_trigger->initdeferred = fkconstraint->initdeferred; - fk_trigger->funcname = SystemFuncName("RI_FKey_noaction_upd"); - break; - case FKCONSTR_ACTION_RESTRICT: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_restrict_upd"); - break; - case FKCONSTR_ACTION_CASCADE: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_cascade_upd"); - break; - case FKCONSTR_ACTION_SETNULL: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_setnull_upd"); - break; - case FKCONSTR_ACTION_SETDEFAULT: - fk_trigger->deferrable = false; - fk_trigger->initdeferred = false; - fk_trigger->funcname = SystemFuncName("RI_FKey_setdefault_upd"); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized FK action type: %d", (int)fkconstraint->fk_upd_action))); - break; - } - fk_trigger->args = NIL; - - (void)CreateTrigger(fk_trigger, NULL, refRelOid, myRelOid, constraintOid, indexOid, true); - - /* Make changes-so-far visible */ - CommandCounterIncrement(); - - /* - * Build and execute CREATE CONSTRAINT TRIGGER statements for the CHECK - * action for both INSERTs and UPDATEs on the referencing table. - */ - CreateFKCheckTrigger(myRelOid, refRelOid, fkconstraint, constraintOid, indexOid, true); - CreateFKCheckTrigger(myRelOid, refRelOid, fkconstraint, constraintOid, indexOid, false); -} - -bool ConstraintSatisfyAutoIncrement(HeapTuple tuple, TupleDesc desc, AttrNumber attrnum, char contype) -{ - if (attrnum == 0 || (contype != CONSTRAINT_PRIMARY && contype != CONSTRAINT_UNIQUE)) { - return false; - } - - Datum* keys = NULL; - int nKeys; - bool isnull = false; - Datum conkey = heap_getattr(tuple, Anum_pg_constraint_conkey, desc, &isnull); - if (isnull) { - return false; - } - deconstruct_array(DatumGetArrayTypeP(conkey), INT2OID, sizeof(int16), true, 's', &keys, NULL, &nKeys); - if (DatumGetInt16(keys[0]) == attrnum) { - pfree(keys); - return true; - } - pfree(keys); - return false; -} -/* - * ALTER TABLE DROP CONSTRAINT - * - * Like DROP COLUMN, we can't use the normal ALTER TABLE recursion mechanism. - */ -static void ATExecDropConstraint(Relation rel, const char* constrName, DropBehavior behavior, bool recurse, - bool recursing, bool missing_ok, LOCKMODE lockmode) -{ - List* children = NIL; - ListCell* child = NULL; - Relation conrel; - Form_pg_constraint con; - SysScanDesc scan; - ScanKeyData key; - HeapTuple tuple; - bool found = false; - bool is_no_inherit_constraint = false; - - /* At top level, permission check was done in ATPrepCmd, else do it */ - if (recursing) - ATSimplePermissions(rel, ATT_TABLE); - - conrel = heap_open(ConstraintRelationId, RowExclusiveLock); - - /* - * Find and drop the target constraint - */ - ScanKeyInit( - &key, Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, true, NULL, 1, &key); - - while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - ObjectAddress conobj; - - con = (Form_pg_constraint)GETSTRUCT(tuple); - - if (strcmp(NameStr(con->conname), constrName) != 0) { - continue; - } - - /* Don't drop inherited constraints */ - if (con->coninhcount > 0 && !recursing) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot drop inherited constraint \"%s\" of relation \"%s\"", - constrName, - RelationGetRelationName(rel)))); - - is_no_inherit_constraint = con->connoinherit; - - /* - * XXX as a special hack, we turn on no-inherit here unconditionally - * except for CHECK constraints. This is because 9.2 until beta2 - * contained a bug that marked it false for all constraints, even - * though it was only supported false for CHECK constraints. - * See bug #6712. - */ - if (con->contype != CONSTRAINT_CHECK) - is_no_inherit_constraint = true; - - /* drop partial cluster key */ - if (con->contype == CONSTRAINT_CLUSTER) { - SetRelHasClusterKey(rel, false); - } - -#ifndef ENABLE_MULTIPLE_NODES - /* - * If it's a foreign-key constraint, we'd better lock the referenced - * table and check that that's not in use, just as we've already done - * for the constrained table (else we might, eg, be dropping a trigger - * that has unfired events). But we can/must skip that in the - * self-referential case. - */ - if (con->contype == CONSTRAINT_FOREIGN && - con->confrelid != RelationGetRelid(rel)) - { - Relation frel; - - /* Must match lock taken by RemoveTriggerById: */ - frel = heap_open(con->confrelid, AccessExclusiveLock); - CheckTableNotInUse(frel, "ALTER TABLE"); - heap_close(frel, NoLock); - } -#endif - - /* - * Perform the actual constraint deletion - */ - conobj.classId = ConstraintRelationId; - conobj.objectId = HeapTupleGetOid(tuple); - conobj.objectSubId = 0; - - performDeletion(&conobj, behavior, 0); - - found = true; - - /* constraint found and dropped -- no need to keep looping */ - break; - } - - systable_endscan(scan); - - if (!found) { - if (!missing_ok) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("constraint \"%s\" of relation \"%s\" does not exist", - constrName, - RelationGetRelationName(rel)))); - } else { - ereport(NOTICE, - (errmsg("constraint \"%s\" of relation \"%s\" does not exist, skipping", - constrName, - RelationGetRelationName(rel)))); - heap_close(conrel, RowExclusiveLock); - return; - } - } - - /* - * Propagate to children as appropriate. Unlike most other ALTER - * routines, we have to do this one level of recursion at a time; we can't - * use find_all_inheritors to do it in one pass. - */ - if (!is_no_inherit_constraint) - children = find_inheritance_children(RelationGetRelid(rel), lockmode); - else - children = NIL; - - foreach (child, children) { - Oid childrelid = lfirst_oid(child); - Relation childrel; - HeapTuple copy_tuple; - - /* find_inheritance_children already got lock */ - childrel = heap_open(childrelid, NoLock); - CheckTableNotInUse(childrel, "ALTER TABLE"); - - ScanKeyInit(&key, Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(childrelid)); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, true, NULL, 1, &key); - - /* scan for matching tuple - there should only be one */ - while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - con = (Form_pg_constraint)GETSTRUCT(tuple); - - /* Right now only CHECK constraints can be inherited */ - if (con->contype != CONSTRAINT_CHECK) - continue; - - if (strcmp(NameStr(con->conname), constrName) == 0) - break; - } - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("constraint \"%s\" of relation \"%s\" does not exist", - constrName, - RelationGetRelationName(childrel)))); - - copy_tuple = (HeapTuple)tableam_tops_copy_tuple(tuple); - - systable_endscan(scan); - - con = (Form_pg_constraint)GETSTRUCT(copy_tuple); - - if (con->coninhcount <= 0) /* shouldn't happen */ - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("relation %u has non-inherited constraint \"%s\"", childrelid, constrName))); - - if (recurse) { - /* - * If the child constraint has other definition sources, just - * decrement its inheritance count; if not, recurse to delete it. - */ - if (con->coninhcount == 1 && !con->conislocal) { - /* Time to delete this child constraint, too */ - ATExecDropConstraint(childrel, constrName, behavior, true, true, false, lockmode); - } else { - /* Child constraint must survive my deletion */ - con->coninhcount--; - simple_heap_update(conrel, ©_tuple->t_self, copy_tuple); - CatalogUpdateIndexes(conrel, copy_tuple); - - /* Make update visible */ - CommandCounterIncrement(); - } - } else { - /* - * If we were told to drop ONLY in this table (no recursion), we - * need to mark the inheritors' constraints as locally defined - * rather than inherited. - */ - con->coninhcount--; - con->conislocal = true; - - simple_heap_update(conrel, ©_tuple->t_self, copy_tuple); - CatalogUpdateIndexes(conrel, copy_tuple); - - /* Make update visible */ - CommandCounterIncrement(); - } - - tableam_tops_free_tuple(copy_tuple); - - heap_close(childrel, NoLock); - } - - heap_close(conrel, RowExclusiveLock); -} - -static void CheckHugeToastInternal(TupleDesc reldesc, Relation rel, AttrNumber attnum) -{ - HeapTuple tuple; - Datum values[reldesc->natts]; - bool isnull[reldesc->natts]; - TableScanDesc scan = tableam_scan_begin(rel, SnapshotNow, 0, NULL); - while ((tuple = (HeapTuple)tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - tableam_tops_deform_tuple(tuple, reldesc, values, isnull); - if (!isnull[attnum - 1] && VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(values[attnum - 1]))) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support alter clob/blob to text type when more than 1GB"))); - } - } - tableam_scan_end(scan); -} - -void CheckHugeToast(AlteredTableInfo *tab, Relation rel, AttrNumber attnum) -{ - TupleDesc reldesc = tab->oldDesc; - Form_pg_attribute attr = &reldesc->attrs[attnum - 1]; - - if (attr->atttypid != CLOBOID && attr->atttypid != BLOBOID) { - return; - } - - if (RelationIsPartitioned(rel)) { - ListCell *partCell = NULL; - List *partList = relationGetPartitionList(rel, NoLock); - foreach (partCell, partList) { - Partition part = (Partition)lfirst(partCell); - Relation partRel = partitionGetRelation(rel, part); - if (RelationIsSubPartitioned(rel)) { - ListCell *subPartCell = NULL; - List *subPartList = relationGetPartitionList(partRel, NoLock); - foreach (subPartCell, subPartList) { - Partition subPart = (Partition)lfirst(subPartCell); - Relation subPartRel = partitionGetRelation(partRel, subPart); - CheckHugeToastInternal(reldesc, subPartRel, attnum); - releaseDummyRelation(&subPartRel); - } - releasePartitionList(partRel, &subPartList, NoLock); - } else { - CheckHugeToastInternal(reldesc, partRel, attnum); - } - releaseDummyRelation(&partRel); - } - releasePartitionList(rel, &partList, NoLock); - } else { - CheckHugeToastInternal(reldesc, rel, attnum); - } -} - -/* - * ALTER COLUMN TYPE - */ -static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, bool recursing, - AlterTableCmd* cmd, LOCKMODE lockmode) -{ - char* colName = cmd->name; - ColumnDef* def = (ColumnDef*)cmd->def; - TypeName* typname = def->typname; - Node* transform = def->raw_default; - HeapTuple tuple; - Form_pg_attribute attTup; - AttrNumber attnum; - Oid targettype = InvalidOid; - int32 targettypmod = -1; - Oid targetcollid = InvalidOid; - int target_charset = PG_INVALID_ENCODING; - NewColumnValue* newval = NULL; - ParseState* pstate = make_parsestate(NULL); - AclResult aclresult; - - if (rel->rd_rel->reloftype && !recursing) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot alter column type of typed table"))); - - /* lookup the attribute so we can check inheritance status */ - tuple = SearchSysCacheAttName(RelationGetRelid(rel), colName); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - attTup = (Form_pg_attribute)GETSTRUCT(tuple); - attnum = attTup->attnum; - - /* Can't alter a system attribute */ - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - /* Don't alter inherited columns */ - if (attTup->attinhcount > 0 && !recursing) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot alter inherited column \"%s\"", colName))); - if (typname && list_length(typname->names) == 1 && !typname->pct_type) { - char* tname = strVal(linitial(typname->names)); - - if (strcmp(tname, "smallserial") == 0 || strcmp(tname, "serial2") == 0 || strcmp(tname, "serial") == 0 || - strcmp(tname, "serial4") == 0 || strcmp(tname, "bigserial") == 0 || strcmp(tname, "serial8") == 0 || - strcmp(tname, "largeserial") == 0 || strcmp(tname, "serial16") == 0) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot alter column type to \"%s\"", tname))); - } - - if (typname == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("typname is null"))); - } - /* Look up the target type */ - typenameTypeIdAndMod(NULL, typname, &targettype, &targettypmod); - - /* And the collation */ - Oid rel_coll_oid = rel->rd_options == NULL ? InvalidOid : ((StdRdOptions*)(rel)->rd_options)->collate; - targetcollid = GetColumnDefCollation(NULL, def, targettype, rel_coll_oid); - if (DB_IS_CMPT(B_FORMAT)) { - targettype = binary_need_transform_typeid(targettype, &targetcollid); - target_charset = get_charset_by_collation(targetcollid); - } - - // check the unsupported datatype. - if (RelationIsColStore(rel) && !IsTypeSupportedByCStore(targettype)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("type \"%s\" is not supported in column store", - format_type_with_typemod(targettype, targettypmod)))); - } - - aclresult = pg_type_aclcheck(targettype, GetUserId(), ACL_USAGE); - if (aclresult != ACLCHECK_OK) - aclcheck_error_type(aclresult, targettype); - - /* make sure datatype is legal for a column */ - CheckAttributeType(colName, targettype, targetcollid, list_make1_oid(rel->rd_rel->reltype), false); - - if (tab->relkind == RELKIND_RELATION) { - if (IS_PGXC_COORDINATOR && !IsSystemRelation(rel) && !IsCStoreNamespace(rel->rd_rel->relnamespace)) { - HeapTuple tup = SearchSysCache(PGXCCLASSRELID, ObjectIdGetDatum(RelationGetRelid(rel)), 0, 0, 0); - - if (!HeapTupleIsValid(tup)) /* should not happen */ - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for pgxc_class %u", RelationGetRelid(rel)))); - - Form_pgxc_class pgxc_class = (Form_pgxc_class)GETSTRUCT(tup); - if ((pgxc_class->pclocatortype == 'H' || pgxc_class->pclocatortype == 'M' || - pgxc_class->pclocatortype == 'L' || pgxc_class->pclocatortype == 'G') && - IsDistribColumn(RelationGetRelid(rel), attnum)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter data type of distribute column"))); - - ReleaseSysCache(tup); - } - /* - * Set up an expression to transform the old data value to the new - * type. If a USING option was given, transform and use that - * expression, else just take the old value and try to coerce it. We - * do this first so that type incompatibility can be detected before - * we waste effort, and because we need the expression to be parsed - * against the original table row type. - */ - if (transform != NULL) { - /* Expression must be able to access vars of old table */ - RangeTblEntry* rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); - - addRTEtoQuery(pstate, rte, false, true, true); - - transform = transformExpr(pstate, transform, EXPR_KIND_ALTER_COL_TRANSFORM); - - if (RelationIsColStore(rel)) { - Bitmapset* attrs_referred = NULL; - /* Collect all the attributes refered in the expression. */ - pull_varattnos(transform, 1, &attrs_referred); - if (attrs_referred != NULL) { - attrs_referred = - bms_del_member(attrs_referred, attnum - FirstLowInvalidHeapAttributeNumber); // remove itself - if (!bms_is_empty(attrs_referred)) { - bms_free_ext(attrs_referred); - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg( - "cannot refer to other columns in transform expression for column store table"))); - } - bms_free_ext(attrs_referred); - } - } - - /* It can't return a set */ - if (expression_returns_set(transform)) - ereport( - ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("transform expression must not return a set"))); - - /* No subplans or aggregates, either... */ - if (pstate->p_hasSubLinks) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot use subquery in transform expression"))); - if (pstate->p_hasAggs) - ereport(ERROR, - (errcode(ERRCODE_GROUPING_ERROR), errmsg("cannot use aggregate function in transform expression"))); - if (pstate->p_hasWindowFuncs) - ereport(ERROR, - (errcode(ERRCODE_WINDOWING_ERROR), errmsg("cannot use window function in transform expression"))); - } else { - transform = (Node*)makeVar(1, attnum, attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0); - } - - transform = coerce_to_target_type(pstate, - transform, - exprType(transform), - targettype, - targettypmod, - COERCION_ASSIGNMENT, - COERCE_IMPLICIT_CAST, - -1); - if (transform == NULL) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg( - "column \"%s\" cannot be cast automatically to type %s", colName, format_type_be(targettype)), - errhint("Specify a USING expression to perform the conversion."))); -#ifndef ENABLE_MULTIPLE_NODES - transform = coerce_to_target_charset(transform, target_charset, attTup->atttypid, attTup->atttypmod, targetcollid); -#endif - /* Fix collations after all else */ - assign_expr_collations(pstate, transform); - - /* Plan the expr now so we can accurately assess the need to rewrite. */ - transform = (Node*)expression_planner((Expr*)transform); - - /* - * Add a work queue item to make ATRewriteTable update the column - * contents. - */ - newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue)); - newval->attnum = attnum; - newval->expr = (Expr*)transform; - newval->is_generated = false; - newval->is_autoinc = false; - newval->is_addloc = false; - newval->newattnum = 0; - newval->col_name = pstrdup(colName); - newval->generate_attnum = 0; - - tab->newvals = lappend(tab->newvals, newval); - if (ATColumnChangeRequiresRewrite(transform, attnum)) - tab->rewrite = AT_REWRITE_COLUMN_REWRITE; - if (targettype != CLOBOID && targettype != BLOBOID) { - CheckHugeToast(tab, rel, attnum); - } - } else if (transform != NULL) - ereport( - ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table", RelationGetRelationName(rel)))); - - if (tab->relkind == RELKIND_COMPOSITE_TYPE || tab->relkind == RELKIND_FOREIGN_TABLE - || tab->relkind == RELKIND_STREAM) { - /* - * For composite types, do this check now. Tables will check it later - * when the table is being rewritten. - */ - find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL); - } - - ReleaseSysCache(tuple); - - /* - * The recursion case is handled by ATSimpleRecursion. However, if we are - * told not to recurse, there had better not be any child tables; else the - * alter would put them out of step. - */ - if (recurse) - ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); - else if (!recursing && find_inheritance_children(RelationGetRelid(rel), NoLock) != NIL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("type of inherited column \"%s\" must be changed in child tables too", colName))); - - if (tab->relkind == RELKIND_COMPOSITE_TYPE) - ATTypedTableRecursion(wqueue, rel, cmd, lockmode); -} - -/* - * When the data type of a column is changed, a rewrite might not be required - * if the new type is sufficiently identical to the old one, and the USING - * clause isn't trying to insert some other value. It's safe to skip the - * rewrite if the old type is binary coercible to the new type, or if the - * new type is an unconstrained domain over the old type. In the case of a - * constrained domain, we could get by with scanning the table and checking - * the constraint rather than actually rewriting it, but we don't currently - * try to do that. - */ -static bool ATColumnChangeRequiresRewrite(Node* expr, AttrNumber varattno) -{ - Assert(expr != NULL); - - for (;;) { - /* only one varno, so no need to check that */ - if (IsA(expr, Var) && ((Var*)expr)->varattno == varattno) - return false; - else if (IsA(expr, RelabelType)) - expr = (Node*)((RelabelType*)expr)->arg; - else if (IsA(expr, CoerceToDomain)) { - CoerceToDomain* d = (CoerceToDomain*)expr; - - if (GetDomainConstraints(d->resulttype) != NIL) - return true; - expr = (Node*)d->arg; - } else - return true; - } -} - -static void DelDependencONDataType(const Relation rel, Relation depRel, const Form_pg_attribute attTup) -{ - ScanKeyData key[3]; - SysScanDesc scan; - HeapTuple depTup; - AttrNumber attnum = attTup->attnum; - - /* - * Now scan for dependencies of this column on other things. The only - * thing we should find is the dependency on the column datatype, which we - * want to remove, possibly a collation dependency, and dependencies on - * other columns if it is a generated column. - */ - ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum((int32)attnum)); - - scan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 3, key); - - while (HeapTupleIsValid(depTup = systable_getnext(scan))) { - Form_pg_depend foundDep = (Form_pg_depend)GETSTRUCT(depTup); - ObjectAddress foundObject; - foundObject.classId = foundDep->refclassid; - foundObject.objectId = foundDep->refobjid; - foundObject.objectSubId = foundDep->refobjsubid; - - if (foundDep->deptype != DEPENDENCY_NORMAL && foundDep->deptype != DEPENDENCY_AUTO) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("found unexpected dependency type '%c'", foundDep->deptype))); - } - if (!(foundDep->refclassid == TypeRelationId && foundDep->refobjid == attTup->atttypid) && - !(foundDep->refclassid == CollationRelationId && foundDep->refobjid == attTup->attcollation) && - !(foundDep->refclassid == RelationRelationId && foundDep->refobjid == RelationGetRelid(rel) && - foundDep->refobjsubid != 0)) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("found unexpected dependency for column"))); - } - - simple_heap_delete(depRel, &depTup->t_self); - } - - systable_endscan(scan); -} - -/* - * update pg_attrdef adnum for the modified column with first or after column. - */ -static void UpdateAttrdefAdnumFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, - bool *has_default) -{ - ScanKeyData key[2]; - HeapTuple def_tuple; - SysScanDesc scan; - - ScanKeyInit(&key[0], Anum_pg_attrdef_adrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid)); - ScanKeyInit(&key[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(curattnum)); - - scan = systable_beginscan(rel, AttrDefaultIndexId, true, NULL, 2, key); - - def_tuple = systable_getnext(scan); - if (HeapTupleIsValid(def_tuple)) { - Datum values[Natts_pg_attrdef] = { 0 }; - bool nulls[Natts_pg_attrdef] = { 0 }; - bool replaces[Natts_pg_attrdef] = { 0 }; - HeapTuple new_def_tuple; - - if (has_default != NULL) { - *has_default = true; - } - - values[Anum_pg_attrdef_adnum - 1] = Int16GetDatum(newattnum); - replaces[Anum_pg_attrdef_adnum - 1] = true; - - new_def_tuple = heap_modify_tuple(def_tuple, RelationGetDescr(rel), values, nulls, replaces); - simple_heap_update(rel, &new_def_tuple->t_self, new_def_tuple); - CatalogUpdateIndexes(rel, new_def_tuple); - - heap_freetuple_ext(new_def_tuple); - } - - systable_endscan(scan); -} - -/* - * update pg_description objsubid for the modified column with first or after column. - */ -static void UpdateDescriptionObjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, - bool *has_comment) -{ - ScanKeyData key[3]; - HeapTuple desc_tuple; - SysScanDesc scan; - - ScanKeyInit(&key[0], Anum_pg_description_objoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid)); - ScanKeyInit(&key[1], Anum_pg_description_classoid, BTEqualStrategyNumber, F_OIDEQ, RelationRelationId); - ScanKeyInit(&key[2], Anum_pg_description_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(curattnum)); - - scan = systable_beginscan(rel, DescriptionObjIndexId, true, NULL, 3, key); - - desc_tuple = systable_getnext(scan); - if (HeapTupleIsValid(desc_tuple)) { - Datum values[Natts_pg_description] = { 0 }; - bool nulls[Natts_pg_description] = { 0 }; - bool replaces[Natts_pg_description] = { 0 }; - HeapTuple new_desc_tuple; - - if (has_comment != NULL) { - *has_comment = true; - } - - values[Anum_pg_description_objsubid - 1] = Int32GetDatum(newattnum); - replaces[Anum_pg_description_objsubid - 1] = true; - - new_desc_tuple = heap_modify_tuple(desc_tuple, RelationGetDescr(rel), values, nulls, replaces); - simple_heap_update(rel, &new_desc_tuple->t_self, new_desc_tuple); - CatalogUpdateIndexes(rel, new_desc_tuple); - - heap_freetuple_ext(new_desc_tuple); - } - - systable_endscan(scan); -} - -/* - * update pg_depend refobjsubid for the modified column with first or after column. - */ -static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, - bool *has_depend) -{ - ScanKeyData key[2]; - HeapTuple dep_tuple; - Form_pg_depend dep_form; - SysScanDesc scan; - - ScanKeyInit(&key[0], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(curattnum)); - - scan = systable_beginscan(rel, DependReferenceIndexId, true, NULL, 2, key); - while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_depend] = { 0 }; - bool nulls[Natts_pg_depend] = { 0 }; - bool replaces[Natts_pg_depend] = { 0 }; - HeapTuple new_dep_tuple; - - dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple); - - if (has_depend != NULL) { - *has_depend = true; - } - - values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(-1); - replaces[Anum_pg_depend_refobjsubid - 1] = true; - - if (dep_form->objid == myrelid) { - int startattnum; - int endattnum; - bool is_increase = false; - if (newattnum <= curattnum - 1) { - startattnum = newattnum; - endattnum = curattnum - 1; - is_increase = true; - } else { - startattnum = curattnum + 1; - endattnum = newattnum; - } - if (dep_form->objsubid >= startattnum && dep_form->objsubid <= endattnum) { - values[Anum_pg_depend_objsubid - 1] = is_increase ? - Int32GetDatum(dep_form->objsubid + 1) : Int32GetDatum(dep_form->objsubid - 1); - replaces[Anum_pg_depend_objsubid - 1] = true; - } else if (dep_form->objsubid == curattnum) { - values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(newattnum); - replaces[Anum_pg_depend_objsubid - 1] = true; - } - } - - new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(rel), values, nulls, replaces); - simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(rel, new_dep_tuple); - - heap_freetuple_ext(new_dep_tuple); - } - systable_endscan(scan); -} - -/* - * update pg_depend refobjsubid for the modified column with first or after column. - */ -static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum) -{ - ScanKeyData key[2]; - HeapTuple dep_tuple; - Form_pg_depend dep_form; - SysScanDesc scan; - - ScanKeyInit(&key[0], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(curattnum)); - - scan = systable_beginscan(rel, DependReferenceIndexId, true, NULL, 2, key); - while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) { - Datum values[Natts_pg_depend] = { 0 }; - bool nulls[Natts_pg_depend] = { 0 }; - bool replaces[Natts_pg_depend] = { 0 }; - HeapTuple new_dep_tuple; - dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple); - - values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newattnum); - replaces[Anum_pg_depend_refobjsubid - 1] = true; - - new_dep_tuple = heap_modify_tuple(dep_tuple, RelationGetDescr(rel), values, nulls, replaces); - simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple); - CatalogUpdateIndexes(rel, new_dep_tuple); - - heap_freetuple_ext(new_dep_tuple); - } - - systable_endscan(scan); -} - -/* - * update pg_partition partkey for the modified column with first or after column. - */ -static void UpdatePartitionPartkeyFirstAfter(Oid myrelid, int curattnum, int newattnum) -{ - ScanKeyData skey; - HeapTuple par_tuple; - Relation par_rel; - SysScanDesc scan; - - par_rel = heap_open(PartitionRelationId, RowExclusiveLock); - - ScanKeyInit(&skey, Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(myrelid)); - - scan = systable_beginscan(par_rel, PartitionParentOidIndexId, true, NULL, 1, &skey); - while (HeapTupleIsValid(par_tuple = systable_getnext(scan))) { - bool is_null = false; - - // update pg_partition_partkey - Datum partkey_datum = SysCacheGetAttr(PARTRELID, par_tuple, Anum_pg_partition_partkey, &is_null); - if (!is_null) { - Datum values[Natts_pg_partition] = { 0 }; - bool nulls[Natts_pg_partition] = { 0 }; - bool replaces[Natts_pg_partition] = { 0 }; - int2vector *partkey = NULL; - int2vector *new_partKey = NULL; - HeapTuple new_par_tuple; - - partkey = (int2vector *)DatumGetPointer(partkey_datum); - new_partKey = buildint2vector(NULL, partkey->dim1); - for (int i = 0; i < partkey->dim1; i++) { - if (partkey->values[i] == curattnum) { - new_partKey->values[i] = newattnum; - } else { - new_partKey->values[i] = partkey->values[i]; - } - } - values[Anum_pg_partition_partkey - 1] = PointerGetDatum(new_partKey); - replaces[Anum_pg_partition_partkey - 1] = true; - - new_par_tuple = heap_modify_tuple(par_tuple, RelationGetDescr(par_rel), values, nulls, replaces); - simple_heap_update(par_rel, &new_par_tuple->t_self, new_par_tuple); - CatalogUpdateIndexes(par_rel, new_par_tuple); - - pfree_ext(new_partKey); - heap_freetuple_ext(new_par_tuple); - } - } - systable_endscan(scan); - heap_close(par_rel, RowExclusiveLock); -} - -static int GetNewattnumFirstAfter(Relation rel, AlterTableCmd* cmd, int curattnum) -{ - bool is_first = cmd->is_first; - char *after_name = cmd->after_name; - int newattnum = 0; - - if (is_first && curattnum == 1) { - return 0; - } - - if (is_first) { - newattnum = 1; - } else if (after_name != NULL) { - newattnum =GetAfterColumnAttnum(RelationGetRelid(rel), after_name) - 1; - if (newattnum + 1 == curattnum) { - return 0; - } - - if (newattnum == curattnum) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("Unknown column \"%s\" in \"%s\"", after_name, RelationGetRelationName(rel)))); - } else if (newattnum < curattnum) { - newattnum++; - } - } - return newattnum; -} - -static void AlterColumnToFirstAfter(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, - int curattnum) -{ - Oid myrelid = RelationGetRelid(rel); - int newattnum; - Relation attr_rel; - HeapTuple att_tuple_old, att_tuple_new; - Form_pg_attribute att_form_old, attr_form_new; - int startattnum, endattnum; - bool has_comment = false; - bool has_default = false; - bool has_depend = false; - bool has_partition = false; - bool is_increase = false; - List *query_str = NIL; - - newattnum = GetNewattnumFirstAfter(rel, cmd, curattnum); - if (newattnum == 0) { - return; - } - - tab->rewrite = true; - - attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); - - att_tuple_old = SearchSysCacheCopy2(ATTNUM, ObjectIdGetDatum(myrelid), Int16GetDatum(curattnum)); - if (!HeapTupleIsValid(att_tuple_old)) { - ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute %d of relation %u", curattnum, myrelid), errdetail("N/A"), - errcause("System error."), erraction("Contact engineer to support."))); - } - - att_form_old = (Form_pg_attribute)GETSTRUCT(att_tuple_old); - - att_form_old->attnum = 0; - - simple_heap_update(attr_rel, &att_tuple_old->t_self, att_tuple_old); - CatalogUpdateIndexes(attr_rel, att_tuple_old); - - Relation desc_rel = heap_open(DescriptionRelationId, RowExclusiveLock); - UpdateDescriptionObjsubidFirstAfter(desc_rel, myrelid, curattnum, 0, &has_comment); - - Relation def_rel = heap_open(AttrDefaultRelationId, RowExclusiveLock); - UpdateAttrdefAdnumFirstAfter(def_rel, myrelid, curattnum, 0, &has_default); - - Relation dep_rel = heap_open(DependRelationId, RowExclusiveLock); - UpdateDependRefobjsubidFirstAfter(dep_rel, myrelid, curattnum, newattnum, &has_depend); - - if (newattnum <= curattnum - 1) { - startattnum = newattnum; - endattnum = curattnum - 1; - is_increase = true; - } else { - startattnum = curattnum + 1; - endattnum = newattnum; - } - - UpdatePgPartitionFirstAfter(rel, startattnum, endattnum, is_increase, true, &has_partition); - UpdatePgAttributeFirstAfter(attr_rel, myrelid, startattnum, endattnum, is_increase); - UpdatePgDescriptionFirstAfter(rel, startattnum, endattnum, is_increase); - UpdatePgIndexFirstAfter(rel, startattnum, endattnum, is_increase); - UpdatePgConstraintFirstAfter(rel, startattnum, endattnum, is_increase); - UpdatePgConstraintConfkeyFirstAfter(rel, startattnum, endattnum, is_increase); - UpdatePgAttrdefFirstAfter(rel, startattnum, endattnum, is_increase); - query_str = CheckPgRewriteFirstAfter(rel); - UpdatePgTriggerFirstAfter(rel, startattnum, endattnum, is_increase); - UpdatePgRlspolicyFirstAfter(rel, startattnum, endattnum, is_increase); - CommandCounterIncrement(); - - UpdateGenerateColFirstAfter(rel, startattnum, endattnum, is_increase); - UpdatePgDependFirstAfter(rel, startattnum, endattnum, is_increase); - CommandCounterIncrement(); - - att_tuple_new = SearchSysCacheCopy2(ATTNUM, ObjectIdGetDatum(myrelid), Int16GetDatum(0)); - if (!HeapTupleIsValid(att_tuple_new)) { - ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute %d of relation %u", 0, myrelid), errdetail("N/A"), - errcause("System error."), erraction("Contact engineer to support."))); - } - attr_form_new = (Form_pg_attribute)GETSTRUCT(att_tuple_new); - - attr_form_new->attnum = newattnum; - simple_heap_update(attr_rel, &att_tuple_new->t_self, att_tuple_new); - // keep system catalog indexes current - CatalogUpdateIndexes(attr_rel, att_tuple_new); - - heap_close(attr_rel, RowExclusiveLock); - heap_freetuple_ext(att_tuple_old); - heap_freetuple_ext(att_tuple_new); - - if (has_comment) { - UpdateDescriptionObjsubidFirstAfter(desc_rel, myrelid, 0, newattnum, NULL); - } - heap_close(desc_rel, RowExclusiveLock); - - if (has_default) { - UpdateAttrdefAdnumFirstAfter(def_rel, myrelid, 0, newattnum, NULL); - } - heap_close(def_rel, RowExclusiveLock); - - if (has_depend) { - UpdateDependRefobjsubidToNewattnum(dep_rel, myrelid, -1, newattnum); - } - heap_close(dep_rel, RowExclusiveLock); - - if (has_partition) { - UpdatePartitionPartkeyFirstAfter(myrelid, 0, newattnum); - } - - CommandCounterIncrement(); - - /* create or replace view */ - ReplaceViewQueryFirstAfter(query_str); -} - -static bool CheckIndexIsConstraint(Relation dep_rel, Oid objid, Oid *refobjid) -{ - ScanKeyData key[2]; - HeapTuple dep_tuple; - SysScanDesc scan; - Form_pg_depend dep_form; - bool is_constraint = false; - - ScanKeyInit(&key[0], Anum_pg_depend_classid, - BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid)); - scan = systable_beginscan(dep_rel, DependDependerIndexId, true, NULL, 2, key); - - while (HeapTupleIsValid(dep_tuple = systable_getnext(scan))) { - dep_form = (Form_pg_depend)GETSTRUCT(dep_tuple); - if (dep_form->refclassid == ConstraintRelationId && dep_form->refobjsubid == 0) { - *refobjid = dep_form->refobjid; - is_constraint = true; - break; - } - } - systable_endscan(scan); - return is_constraint; -} - -static void UpdateNewvalsAttnum(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, char* col_name) -{ - ListCell* l = NULL; - foreach(l, tab->newvals) { - NewColumnValue* ex = (NewColumnValue*)lfirst(l); - - if (ex->col_name == NULL) { - continue; - } - - if (strcmp(ex->col_name, col_name) == 0) { - HeapTuple heap_tup; - Form_pg_attribute att_tup; - - heap_tup = SearchSysCacheCopyAttName(RelationGetRelid(rel), col_name); - if (!HeapTupleIsValid(heap_tup)) { /* shouldn't happen */ - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", col_name, RelationGetRelationName(rel)))); - } - att_tup = (Form_pg_attribute)GETSTRUCT(heap_tup); - ex->attnum = att_tup->attnum; - ex->newattnum = GetNewattnumFirstAfter(rel, cmd, ex->attnum); - - tableam_tops_free_tuple(heap_tup); - } - } -} - - -static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - char* colName = cmd->name; - ColumnDef* def = (ColumnDef*)cmd->def; - TypeName* typname = def->typname; - HeapTuple heapTup; - Form_pg_attribute attTup; - AttrNumber attnum; - HeapTuple typeTuple; - Form_pg_type tform; - Oid targettype = InvalidOid; - int32 targettypmod = -1; - Oid targetcollid = InvalidOid; - Node* defaultexpr = NULL; - Relation attrelation; - Relation depRel; - ScanKeyData key[3]; - SysScanDesc scan; - HeapTuple depTup; - char generatedCol = '\0'; - Node* update_expr = NULL; - bool flagDropOnUpdateTimestamp = false; - bool existOnUpdateTimestamp = false; - ObjectAddress address; - - attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - - /* Look up the target column */ - heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */ - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - attTup = (Form_pg_attribute)GETSTRUCT(heapTup); - attnum = attTup->attnum; - - /* - * data type of a partitioned table's partition key can not be changed - */ - if (RELATION_IS_PARTITIONED(rel) && is_partition_column(rel, attnum)) { - int2vector* partKey = ((RangePartitionMap*)rel->partMap)->partitionKey; - int i = 0; - - for (; i < partKey->dim1; i++) { - if (attnum == partKey->values[i]) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter data type of partitioning column \"%s\"", colName))); - } - } - - if ((rel)->rd_rel->parttype == PARTTYPE_SUBPARTITIONED_RELATION) { - List *partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, RelationGetRelid(rel)); - if (partTupleList != NIL) { - bool isnull = false; - HeapTuple partTuple = (HeapTuple)linitial(partTupleList); - Datum datum = SysCacheGetAttr(PARTRELID, partTuple, Anum_pg_partition_partkey, &isnull); - if (!isnull) { - int2vector *subpartkey = (int2vector *)DatumGetPointer(datum); - for (int j = 0; j < subpartkey->dim1; j++) { - if (attnum == subpartkey->values[j]) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter data type of subpartitioning column \"%s\"", colName))); - } - } - } - list_free_ext(partTupleList); - } - } - } - - /* Check for multiple ALTER TYPE on same column --- can't cope */ - if (!tab->is_first_after) { - if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1].atttypid || - attTup->atttypmod != tab->oldDesc->attrs[attnum - 1].atttypmod) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", colName))); - } - - /* Look up the target type (should not fail, since prep found it) */ - typeTuple = typenameType(NULL, typname, &targettypmod); - tform = (Form_pg_type)GETSTRUCT(typeTuple); - targettype = HeapTupleGetOid(typeTuple); - /* And the collation */ - Oid rel_coll_oid = rel->rd_options == NULL ? InvalidOid : ((StdRdOptions*)(rel)->rd_options)->collate; - targetcollid = GetColumnDefCollation(NULL, def, targettype, rel_coll_oid); - if (DB_IS_CMPT(B_FORMAT)) { - targettype = binary_need_transform_typeid(targettype, &targetcollid); - if (RelationIsColStore(rel) || RelationIsTsStore(rel)) { - check_unsupported_charset_for_column(targetcollid, colName); - } - } - if (attnum == RelAutoIncAttrNum(rel)) { - CheckAutoIncrementDatatype(targettype, colName); - } - generatedCol = GetGeneratedCol(rel->rd_att, attnum -1); - - /* - * If there is a default expression for the column, get it and ensure we - * can coerce it to the new datatype. (We must do this before changing - * the column type, because build_column_default itself will try to - * coerce, and will not issue the error message we want if it fails.) - * - * We remove any implicit coercion steps at the top level of the old - * default expression; this has been agreed to satisfy the principle of - * least surprise. (The conversion to the new column type should act like - * it started from what the user sees as the stored expression, and the - * implicit coercions aren't going to be shown.) - */ - if (attTup->atthasdef) { - if (RelAutoIncAttrNum(rel) == attnum) { - defaultexpr = RecookAutoincAttrDefault(rel, attnum, targettype, targettypmod); - if (defaultexpr == NULL) { - if (generatedCol == ATTRIBUTE_GENERATED_STORED) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("generation expression for column \"%s\" cannot be cast automatically to type %s", - colName, format_type_be(targettype)))); - } else { - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("default for column \"%s\" cannot be cast automatically to type %s", colName, - format_type_be(targettype)))); - } - } - } else { - defaultexpr = build_column_default(rel, attnum); - /* for column only with on update but no default ,here could be NULL*/ - if (defaultexpr != NULL) { - defaultexpr = strip_implicit_coercions(defaultexpr); - defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */ - defaultexpr, - exprType(defaultexpr), - targettype, - targettypmod, - COERCION_ASSIGNMENT, - COERCE_IMPLICIT_CAST, - -1); - if (defaultexpr == NULL) { - if (generatedCol == ATTRIBUTE_GENERATED_STORED) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("generation expression for column \"%s\" cannot be cast automatically to type %s", - colName, format_type_be(targettype)))); - } else { - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("default for column \"%s\" cannot be cast automatically to type %s", colName, - format_type_be(targettype)))); - } - } - } - } - } else - defaultexpr = NULL; - - /* - * Find everything that depends on the column (constraints, indexes, etc), - * and record enough information to let us recreate the objects. - * - * The actual recreation does not happen here, but only after we have - * performed all the individual ALTER TYPE operations. We have to save - * the info before executing ALTER TYPE, though, else the deparser will - * get confused. - * - * There could be multiple entries for the same object, so we must check - * to ensure we process each one only once. Note: we assume that an index - * that implements a constraint will not show a direct dependency on the - * column. - */ - depRel = heap_open(DependRelationId, RowExclusiveLock); - - ScanKeyInit( - &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit( - &key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum((int32)attnum)); - - scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 3, key); - - while (HeapTupleIsValid(depTup = systable_getnext(scan))) { - Form_pg_depend foundDep = (Form_pg_depend)GETSTRUCT(depTup); - ObjectAddress foundObject; - - /* We don't expect any PIN dependencies on columns */ - if (foundDep->deptype == DEPENDENCY_PIN) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of a pinned column"))); - - foundObject.classId = foundDep->classid; - foundObject.objectId = foundDep->objid; - foundObject.objectSubId = foundDep->objsubid; - - switch (getObjectClass(&foundObject)) { - case OCLASS_CLASS: { - char relKind = get_rel_relkind(foundObject.objectId); - - if (relKind == RELKIND_INDEX || relKind == RELKIND_GLOBAL_INDEX) { - Assert(foundObject.objectSubId == 0); - - Oid refobjid; - if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId) && - CheckIndexIsConstraint(depRel, foundObject.objectId, &refobjid)) { - tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, refobjid); - tab->changedConstraintDefs = - lappend(tab->changedConstraintDefs, pg_get_constraintdef_string(refobjid)); - } else if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) { - /* - * Question: alter table set datatype and table index execute concurrently, data inconsistency - * occurs. The index file is deleted and metadata is left. Because the data type is not locked - * after modification, which ultimately leads to could not open file. Alter table column set - * datatype maybe trigger index operation but index is not locked. When the index data is - * inconsistent, we can use"reindex index" to repair the index. - * Solution: we should lock index at the beginning.The AccessExclusiveLock for index is used - * because we think AccessExclusiveLock for data table will block any operation and index - * will be not used to query data. This operation will block individual index operations, - * such as reindex index\set index tablespace. - * Testcase: alter table row_table alter column col_varchar set data type text,alter column - * col_smallint set data type bigint + alter index idx set tablespace. - */ - LockRelationOid(foundObject.objectId, AccessExclusiveLock); - tab->changedIndexOids = lappend_oid(tab->changedIndexOids, foundObject.objectId); - tab->changedIndexDefs = - lappend(tab->changedIndexDefs, pg_get_indexdef_string(foundObject.objectId)); - } - } else if (RELKIND_IS_SEQUENCE(relKind)) { - /* - * This must be a SERIAL column's sequence. We need - * not do anything to it. - */ - Assert(foundObject.objectSubId == 0); - } else if (relKind == RELKIND_RELATION && foundObject.objectSubId != 0 && - GetGenerated(foundObject.objectId, foundObject.objectSubId)) { - /* - * Changing the type of a column that is used by a - * generated column is not allowed by SQL standard. It - * might be doable with some thinking and effort. - */ - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_SYNTAX_ERROR), - errmsg("cannot alter type of a column used by a generated column"), - errdetail("Column \"%s\" is used by generated column \"%s\".", colName, - get_attname(foundObject.objectId, foundObject.objectSubId)))); - }else { - /* Not expecting any other direct dependencies... */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unexpected object depending on column: %s", getObjectDescription(&foundObject)))); - } - break; - } - - case OCLASS_CONSTRAINT: - Assert(foundObject.objectSubId == 0); - if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId)) { - char* defstring = pg_get_constraintdef_string(foundObject.objectId); - - /* - * Put NORMAL dependencies at the front of the list and - * AUTO dependencies at the back. This makes sure that - * foreign-key constraints depending on this column will - * be dropped before unique or primary-key constraints of - * the column; which we must have because the FK - * constraints depend on the indexes belonging to the - * unique constraints. - */ - if (foundDep->deptype == DEPENDENCY_NORMAL) { - tab->changedConstraintOids = lcons_oid(foundObject.objectId, tab->changedConstraintOids); - tab->changedConstraintDefs = lcons(defstring, tab->changedConstraintDefs); - } else { - tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, foundObject.objectId); - tab->changedConstraintDefs = lappend(tab->changedConstraintDefs, defstring); - } - } - break; - - case OCLASS_REWRITE: - /* XXX someday see if we can cope with revising views */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type of a column used by a view or rule"), - errdetail("%s depends on column \"%s\"", getObjectDescription(&foundObject), colName))); - break; - - case OCLASS_TRIGGER: - - /* - * A trigger can depend on a column because the column is - * specified as an update target, or because the column is - * used in the trigger's WHEN condition. The first case would - * not require any extra work, but the second case would - * require updating the WHEN expression, which will take a - * significant amount of new code. Since we can't easily tell - * which case applies, we punt for both. - */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type of a column used in a trigger definition"), - errdetail("%s depends on column \"%s\"", getObjectDescription(&foundObject), colName))); - break; - - case OCLASS_DEFAULT: - - /* - * Ignore the column's default expression, since we will fix - * it below. - */ - break; - - case OCLASS_PROC: - case OCLASS_TYPE: - case OCLASS_CAST: - case OCLASS_COLLATION: - case OCLASS_CONVERSION: - case OCLASS_LANGUAGE: - case OCLASS_LARGEOBJECT: - case OCLASS_OPERATOR: - case OCLASS_OPCLASS: - case OCLASS_OPFAMILY: - case OCLASS_AMOP: - case OCLASS_AMPROC: - case OCLASS_SCHEMA: - case OCLASS_TSPARSER: - case OCLASS_TSDICT: - case OCLASS_TSTEMPLATE: - case OCLASS_TSCONFIG: - case OCLASS_ROLE: - case OCLASS_DATABASE: - case OCLASS_TBLSPACE: - case OCLASS_FDW: - case OCLASS_FOREIGN_SERVER: - case OCLASS_USER_MAPPING: - case OCLASS_DEFACL: - case OCLASS_EXTENSION: - case OCLASS_DATA_SOURCE: - case OCLASS_GLOBAL_SETTING_ARGS: - case OCLASS_GS_CL_PROC: - - /* - * We don't expect any of these sorts of objects to depend on - * a column. - */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unexpected object depending on column: %s", getObjectDescription(&foundObject)))); - break; - - default: - ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized object class: %u", foundObject.classId))); - } - } - - systable_endscan(scan); - - DelDependencONDataType(rel, depRel, attTup); - - heap_close(depRel, RowExclusiveLock); - - /* - * Here we go --- change the recorded column type and collation. (Note - * heapTup is a copy of the syscache entry, so okay to scribble on.) - */ - attTup->atttypid = targettype; - attTup->atttypmod = targettypmod; - attTup->attcollation = targetcollid; - attTup->attndims = list_length(typname->arrayBounds); - attTup->attlen = tform->typlen; - attTup->attbyval = tform->typbyval; - attTup->attalign = tform->typalign; - attTup->attstorage = tform->typstorage; - - ReleaseSysCache(typeTuple); - - simple_heap_update(attrelation, &heapTup->t_self, heapTup); - - /* keep system catalog indexes current */ - CatalogUpdateIndexes(attrelation, heapTup); - - heap_close(attrelation, RowExclusiveLock); - - /* Install dependencies on new datatype and collation */ - add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype); - add_column_collation_dependency(RelationGetRelid(rel), attnum, targetcollid); - - /* - * Drop any pg_statistic entry for the column, since it's now wrong type - */ - if (RELATION_IS_GLOBAL_TEMP(rel)) { - remove_gtt_att_statistic(RelationGetRelid(rel), attnum); - } else { - RemoveStatistics<'c'>(RelationGetRelid(rel), attnum); - } - - /* def->constraints maybe is null when execute sql(alter table x alter column type new_type) */ - if (cmd->subtype == AT_AlterColumnType && def->constraints && def->constraints->head) { - Constraint* temp_cons = (Constraint*)lfirst(def->constraints->head); - if (temp_cons->contype == CONSTR_DEFAULT && temp_cons->update_expr != NULL) { - update_expr = temp_cons->update_expr; - } - } - - /* when the default expr is NULL and on update expr exist, the defaultexpr should be NULL. - * because the build_column_default maybe return the on update expr. */ - if (rel->rd_att->constr && rel->rd_att->constr->num_defval > 0) { - int ndef = rel->rd_att->constr->num_defval -1; - while (ndef >= 0 && rel->rd_att->constr->defval[ndef].adnum != attnum) { - /* modify column on update expr when the column don't at the end of table */ - --ndef; - } - if (ndef >= 0) { - if (pg_strcasecmp(rel->rd_att->constr->defval[ndef].adbin, "") == 0 && - rel->rd_att->constr->defval[ndef].has_on_update) { - existOnUpdateTimestamp = true; - if (update_expr == NULL) { - CommandCounterIncrement(); - RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true, true); - flagDropOnUpdateTimestamp = true; - } - } - } - } - - /* - * Update the default, if present, by brute force --- remove and re-add - * the default. Probably unsafe to take shortcuts, since the new version - * may well have additional dependencies. (It's okay to do this now, - * rather than after other ALTER TYPE commands, since the default won't - * depend on other column types.) - */ - if ((defaultexpr != NULL || update_expr != NULL) && !flagDropOnUpdateTimestamp) { - /* Must make new row visible since it will be updated again */ - CommandCounterIncrement(); - - /* - * We use RESTRICT here for safety, but at present we do not expect - * anything to depend on the default. - */ - if (defaultexpr != NULL || (update_expr != NULL && existOnUpdateTimestamp)) { - RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true, true); - } - if (update_expr != NULL) { - ParseState* pstate = make_parsestate(NULL); - RangeTblEntry* rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); - addRTEtoQuery(pstate, rte, true, true, true); - pstate->p_rawdefaultlist = NULL; - update_expr = cookDefault(pstate, update_expr, attTup->atttypid, attTup->atttypmod, - attTup->attcollation, NameStr(attTup->attname), def->generatedCol); - } - - StoreAttrDefault(rel, attnum, defaultexpr, generatedCol, update_expr); - } - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - /* Cleanup */ - tableam_tops_free_tuple(heapTup); - return address; -} - - /* - * Returns the address of the modified column - */ -static ObjectAddress ATExecAlterColumnGenericOptions(Relation rel, const char* colName, List* options, LOCKMODE lockmode) -{ - Relation ftrel; - Relation attrel; - ForeignServer* server = NULL; - ForeignDataWrapper* fdw = NULL; - HeapTuple tuple; - HeapTuple newtuple; - bool isnull = false; - Datum repl_val[Natts_pg_attribute]; - bool repl_null[Natts_pg_attribute]; - bool repl_repl[Natts_pg_attribute]; - Datum datum; - Form_pg_foreign_table fttableform; - Form_pg_attribute atttableform; - errno_t rc = EOK; - AttrNumber attnum; - ObjectAddress address; - - if (options == NIL) - return InvalidObjectAddress; - - /* First, determine FDW validator associated to the foreign table. */ - ftrel = heap_open(ForeignTableRelationId, AccessShareLock); - tuple = SearchSysCache1(FOREIGNTABLEREL, rel->rd_id); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("foreign table \"%s\" does not exist", RelationGetRelationName(rel)))); - fttableform = (Form_pg_foreign_table)GETSTRUCT(tuple); - server = GetForeignServer(fttableform->ftserver); - fdw = GetForeignDataWrapper(server->fdwid); - - heap_close(ftrel, AccessShareLock); - ReleaseSysCache(tuple); - - attrel = heap_open(AttributeRelationId, RowExclusiveLock); - tuple = SearchSysCacheAttName(RelationGetRelid(rel), colName); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - - /* Prevent them from altering a system attribute */ - atttableform = (Form_pg_attribute)GETSTRUCT(tuple); - attnum = atttableform->attnum; - if (attnum <= 0) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); - - /* Initialize buffers for new tuple values */ - rc = memset_s(repl_val, sizeof(repl_val), 0, sizeof(repl_val)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - - /* Extract the current options */ - datum = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_attfdwoptions, &isnull); - if (isnull) - datum = PointerGetDatum(NULL); - - /* Transform the options */ - datum = transformGenericOptions(AttributeRelationId, datum, options, fdw->fdwvalidator); - - if (PointerIsValid(DatumGetPointer(datum))) - repl_val[Anum_pg_attribute_attfdwoptions - 1] = datum; - else - repl_null[Anum_pg_attribute_attfdwoptions - 1] = true; - - repl_repl[Anum_pg_attribute_attfdwoptions - 1] = true; - - /* Everything looks good - update the tuple */ - newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(attrel), repl_val, repl_null, repl_repl); - - ObjectAddressSubSet(address, RelationRelationId, - RelationGetRelid(rel), attnum); - - ReleaseSysCache(tuple); - - simple_heap_update(attrel, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(attrel, newtuple); - - heap_close(attrel, RowExclusiveLock); - - tableam_tops_free_tuple(newtuple); - - return address; -} - -// delete record about psort oid and index oid from pg_depend, -// cut off their dependency and make psort independent. -// *oldIndexPassList* depends on AT_PASS_OLD_INDEX list. -static void ATCutOffPSortDependency(List* oldIndexPassList, LOCKMODE lockmode) -{ - ListCell* lcell = NULL; - long ndeleted = 0; - long nupdated = 0; - - foreach (lcell, oldIndexPassList) { - AlterTableCmd* atCmd = (AlterTableCmd*)lfirst(lcell); - Assert(atCmd); - if (atCmd->subtype != AT_ReAddIndex) - continue; - - IndexStmt* istmt = (IndexStmt*)atCmd->def; - Assert(istmt); - - // valid oldPSortOid implies that we want to reuse the - // existing psort index data, so we have to cut off their - // dependency and keep it from performing deleting. - if (!OidIsValid(istmt->oldPSortOid)) - continue; - - long num = deleteDependencyRecordsForClass(RelationRelationId, // class id - istmt->oldPSortOid, // depender oid - RelationRelationId, // referenece class id - DEPENDENCY_INTERNAL); // dependency type - if (num != 1) { - Assert(false); - ereport(ERROR, - (errcode(ERRCODE_OPERATE_RESULT_NOT_EXPECTED), - errmsg("PSort %u should depend on only one index relation but not %ld.", istmt->oldPSortOid, num))); - } - ++ndeleted; - - // if the index is a partition table, handle all the - // psort info for each partition. - if (istmt->isPartitioned && !istmt->isGlobal) { - Relation hrel = relation_openrv(istmt->relation, lockmode); - Relation irel = index_open(istmt->indexOid, lockmode); - List* partHeapOids = relationGetPartitionOidList(hrel); - - ListCell* cell = NULL; - Relation pg_part = heap_open(PartitionRelationId, RowExclusiveLock); - foreach (cell, partHeapOids) { - // step 1: find each part oid of this index tuple. - Oid partHeapOid = lfirst_oid(cell); - Oid partIdxOid = getPartitionIndexOid(istmt->indexOid, partHeapOid); - - // step 2: lookup the right index tuple according to part oid. - HeapTuple partIdxTup = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(partIdxOid)); - if (!HeapTupleIsValid(partIdxTup)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("Partition cache lookup failed for index partition %u", partIdxOid))); - } - - // step 3: reset psort oid to be 0. - // so psort index cannot be droppend later. - // refer to index_drop() and heapDropPartitionIndex(). - Form_pg_partition partForm = (Form_pg_partition)GETSTRUCT(partIdxTup); - partForm->relcudescrelid = InvalidOid; - - // step 4: update pg_partition relation and its index info. - simple_heap_update(pg_part, &partIdxTup->t_self, partIdxTup); - CatalogUpdateIndexes(pg_part, partIdxTup); - - tableam_tops_free_tuple(partIdxTup); - ++nupdated; - } - heap_close(pg_part, RowExclusiveLock); - - releasePartitionOidList(&partHeapOids); - index_close(irel, NoLock); - heap_close(hrel, NoLock); - } - } - - // Make deletion of dependency record visible - if ((ndeleted + nupdated) > 0) { - CommandCounterIncrement(); - } -} - -/* - * Cleanup after we've finished all the ALTER TYPE operations for a - * particular relation. We have to drop and recreate all the indexes - * and constraints that depend on the altered columns. - */ -static void ATPostAlterTypeCleanup(List** wqueue, AlteredTableInfo* tab, LOCKMODE lockmode) -{ - ObjectAddress obj; - ListCell* def_item = NULL; - ListCell* oid_item = NULL; - - /* - * Re-parse the index and constraint definitions, and attach them to the - * appropriate work queue entries. We do this before dropping because in - * the case of a FOREIGN KEY constraint, we might not yet have exclusive - * lock on the table the constraint is attached to, and we need to get - * that before dropping. It's safe because the parser won't actually look - * at the catalogs to detect the existing entry. - * - * We can't rely on the output of deparsing to tell us which relation - * to operate on, because concurrent activity might have made the name - * resolve differently. Instead, we've got to use the OID of the - * constraint or index we're processing to figure out which relation - * to operate on. - */ - forboth(oid_item, tab->changedConstraintOids, def_item, tab->changedConstraintDefs) - { - Oid oldId = lfirst_oid(oid_item); - Oid relid; - Oid confrelid; - - get_constraint_relation_oids(oldId, &relid, &confrelid); - ATPostAlterTypeParse(oldId, relid, confrelid, (char*)lfirst(def_item), wqueue, lockmode, tab->rewrite > 0); - } - forboth(oid_item, tab->changedIndexOids, def_item, tab->changedIndexDefs) - { - Oid oldId = lfirst_oid(oid_item); - Oid relid; - - relid = IndexGetRelation(oldId, false); - ATPostAlterTypeParse(oldId, relid, InvalidOid, (char*)lfirst(def_item), wqueue, lockmode, tab->rewrite > 0); - } - - /* - * Now we can drop the existing constraints and indexes --- constraints - * first, since some of them might depend on the indexes. In fact, we - * have to delete FOREIGN KEY constraints before UNIQUE constraints, but - * we already ordered the constraint list to ensure that would happen. It - * should be okay to use DROP_RESTRICT here, since nothing else should be - * depending on these objects. - */ - foreach (oid_item, tab->changedConstraintOids) { - obj.classId = ConstraintRelationId; - obj.objectId = lfirst_oid(oid_item); - obj.objectSubId = 0; - performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - } - - if (tab->rewrite <= 0 && tab->subcmds[AT_PASS_OLD_INDEX]) { - /* - * existing indexes will be dropped. but rewrite is false, that - * means existing indexes should be reserved and reused. so we - * should remove the dependency between psort and its reference - * index relation. - */ - ATCutOffPSortDependency(tab->subcmds[AT_PASS_OLD_INDEX], lockmode); - } - - foreach (oid_item, tab->changedIndexOids) { - obj.classId = RelationRelationId; - obj.objectId = lfirst_oid(oid_item); - obj.objectSubId = 0; - performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - } - - foreach (oid_item, tab->changedTriggerOids) { - obj.classId = TriggerRelationId; - obj.objectId = lfirst_oid(oid_item); - obj.objectSubId = 0; - performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - } - - /* - * The objects will get recreated during subsequent passes over the work - * queue. - */ -} - -static void setPrimaryNotnull(Oid relid, IndexStmt *stmt, AlteredTableInfo* tab) -{ - if (stmt->primary && !stmt->internal_flag) { - ListCell* columns = NULL; - IndexElem* iparam = NULL; - - tab->is_modify_primary = true; - foreach (columns, stmt->indexParams) { - HeapTuple atttuple; - Form_pg_attribute attform; - - iparam = (IndexElem*)lfirst(columns); - - atttuple = SearchSysCacheAttName(relid, iparam->name); - if (!HeapTupleIsValid(atttuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute %s of relation %u", iparam->name, relid))); - } - attform = (Form_pg_attribute)GETSTRUCT(atttuple); - - if (!attform->attnotnull) { - /* Add a subcommand to make this one NOT NULL */ - AlterTableCmd* cmd = makeNode(AlterTableCmd); - - cmd->subtype = AT_SetNotNull; - cmd->name = pstrdup(NameStr(attform->attname)); - tab->subcmds[AT_PASS_ADD_CONSTR] = - lappend(tab->subcmds[AT_PASS_ADD_CONSTR], cmd); - } - - ReleaseSysCache(atttuple); - } - } -} - -/* - * Attach each generated command to the proper place in the work queue. - * Note this could result in creation of entirely new work-queue entries. - * - * Also note that we have to tweak the command subtypes, because it turns - * out that re-creation of indexes and constraints has to act a bit - * differently from initial creation. - */ -static void AttachEachCommandInQueue( - Oid oldId, Oid refRelId, List** wqueue, bool rewrite, List* querytree_list, Relation rel) -{ - ListCell* list_item = NULL; - foreach (list_item, querytree_list) { - Node* stm = (Node*)lfirst(list_item); - AlteredTableInfo* tab = NULL; - - switch (nodeTag(stm)) { - case T_IndexStmt: { - IndexStmt* stmt = (IndexStmt*)stm; - SetPartionIndexType(stmt, rel, true); - AlterTableCmd* newcmd = NULL; - - if (!rewrite) { - if (!stmt->isPartitioned || stmt->isGlobal) { - TryReuseIndex(oldId, stmt); - } else { - tryReusePartedIndex(oldId, stmt, rel); - } - } - - tab = ATGetQueueEntry(wqueue, rel); - newcmd = makeNode(AlterTableCmd); - newcmd->subtype = AT_ReAddIndex; - newcmd->def = (Node*)stmt; - tab->subcmds[AT_PASS_OLD_INDEX] = lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); - break; - } - case T_AlterTableStmt: { - AlterTableStmt* stmt = (AlterTableStmt*)stm; - ListCell* lcmd = NULL; - - tab = ATGetQueueEntry(wqueue, rel); - foreach (lcmd, stmt->cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); - Constraint* con = NULL; - - switch (cmd->subtype) { - case AT_AddIndex: - Assert(IsA(cmd->def, IndexStmt)); - SetPartionIndexType((IndexStmt*)cmd->def, rel, true); - if (!rewrite) { - if (RelationIsPartitioned(rel) && !((IndexStmt*)cmd->def)->isGlobal) { - tryReusePartedIndex(get_constraint_index(oldId), (IndexStmt*)cmd->def, rel); - } else { - TryReuseIndex(get_constraint_index(oldId), (IndexStmt*)cmd->def); - } - } - - cmd->subtype = AT_ReAddIndex; - tab->subcmds[AT_PASS_OLD_INDEX] = lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); - - if (tab->is_first_after) { - setPrimaryNotnull(rel->rd_id, (IndexStmt*)cmd->def, tab); - } - break; - case AT_AddConstraint: - Assert(IsA(cmd->def, Constraint)); - con = (Constraint*)cmd->def; - con->old_pktable_oid = refRelId; - /* rewriting neither side of a FK */ - if (con->contype == CONSTR_FOREIGN && !rewrite && tab->rewrite <= 0) - TryReuseForeignKey(oldId, con); - cmd->subtype = AT_ReAddConstraint; - tab->subcmds[AT_PASS_OLD_CONSTR] = lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("unexpected statement type: %d", (int)cmd->subtype))); - } - } - break; - } - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unexpected statement type: %d", (int)nodeTag(stm)))); - } - } - } -} - -static void ATPostAlterTypeParse( - Oid oldId, Oid oldRelId, Oid refRelId, const char* cmd_str, List** wqueue, LOCKMODE lockmode, bool rewrite) -{ - ListCell* list_item = NULL; - List* raw_parsetree_list = raw_parser(cmd_str); - List* querytree_list = NIL; - Relation rel; - - /* - * We expect that we will get only ALTER TABLE and CREATE INDEX - * statements. Hence, there is no need to pass them through - * parse_analyze() or the rewriter, but instead we need to pass them - * through parse_utilcmd.c to make them ready for execution. - */ - foreach (list_item, raw_parsetree_list) { - Node* stmt = (Node*)lfirst(list_item); - - if (IsA(stmt, IndexStmt)) { - querytree_list = lappend(querytree_list, transformIndexStmt(oldRelId, (IndexStmt*)stmt, cmd_str)); - } else if (IsA(stmt, AlterTableStmt)) { - querytree_list = - list_concat(querytree_list, transformAlterTableStmt(oldRelId, (AlterTableStmt*)stmt, cmd_str)); - } else { - querytree_list = lappend(querytree_list, stmt); - } - } - - /* Caller should already have acquired whatever lock we need. */ - rel = relation_open(oldRelId, NoLock); - - AttachEachCommandInQueue(oldId, refRelId, wqueue, rewrite, querytree_list, rel); - - relation_close(rel, NoLock); -} - -/* - * Subroutine for ATPostAlterTypeParse(). Calls out to CheckIndexCompatible() - * for the real analysis, then mutates the IndexStmt based on that verdict. - * Global temporary table cannot reuse relfilenode because the relfilenode mapping is maintained in memory. - */ -void TryReuseIndex(Oid oldId, IndexStmt* stmt) -{ - if (CheckIndexCompatible(oldId, stmt->accessMethod, stmt->indexParams, stmt->excludeOpNames) && - !IsGlobalTempTable(oldId)) { - Relation irel = index_open(oldId, NoLock); - - /* - * try to reuse existing index storage. for row relation, only need *oldNode*. - *for column relation, also remember the oid of psort index. - */ - stmt->oldNode = irel->rd_node.relNode; - stmt->oldPSortOid = irel->rd_rel->relcudescrelid; - stmt->indexOid = oldId; - index_close(irel, NoLock); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Subroutine for ATPostAlterTypeParse(). Calls out to CheckIndexCompatible() - * for the real analysis, then mutates the IndexStmt based on that verdict. - * Description : - * Notes : - * Input : - * Output : - */ -void tryReusePartedIndex(Oid oldId, IndexStmt* stmt, Relation rel) -{ - List* partOids = NIL; - ListCell* cell = NULL; - Oid partOid = InvalidOid; - Oid partIndexOid = InvalidOid; - Partition partition = NULL; - - if (CheckIndexCompatible(oldId, stmt->accessMethod, stmt->indexParams, stmt->excludeOpNames)) { - Relation irel = index_open(oldId, NoLock); - - /* - * try to reuse existing index storage. for row relation, only need 'oldNode'. - * for column relation, also remember the oid of psort index. index partition - * relation oid is needed for each partition. - */ - stmt->oldNode = irel->rd_node.relNode; - stmt->oldPSortOid = irel->rd_rel->relcudescrelid; - stmt->indexOid = oldId; - - partOids = relationGetPartitionOidList(rel); - foreach (cell, partOids) { - partOid = lfirst_oid(cell); - partIndexOid = getPartitionIndexOid(oldId, partOid); - - partition = partitionOpen(irel, partIndexOid, NoLock); - /* remember the old relfilenodes and psort oids. */ - stmt->partIndexOldNodes = lappend_oid(stmt->partIndexOldNodes, partition->pd_part->relfilenode); - stmt->partIndexOldPSortOid = lappend_oid(stmt->partIndexOldPSortOid, partition->pd_part->relcudescrelid); - partitionClose(irel, partition, NoLock); - } - releasePartitionOidList(&partOids); - - index_close(irel, NoLock); - } -} - -/* - * Subroutine for ATPostAlterTypeParse(). - * - * Stash the old P-F equality operator into the Constraint node, for possible - * use by ATAddForeignKeyConstraint() in determining whether revalidation of - * this constraint can be skipped. - */ -static void TryReuseForeignKey(Oid oldId, Constraint* con) -{ - HeapTuple tup; - Datum adatum; - bool isNull = false; - ArrayType* arr = NULL; - Oid* rawarr = NULL; - int numkeys; - int i; - - Assert(con->contype == CONSTR_FOREIGN); - Assert(con->old_conpfeqop == NIL); /* already prepared this node */ - - tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId)); - if (!HeapTupleIsValid(tup)) { - /* should not happen */ - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for constraint %u", oldId))); - } - - adatum = SysCacheGetAttr(CONSTROID, tup, Anum_pg_constraint_conpfeqop, &isNull); - if (isNull) - ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("null conpfeqop for constraint %u", oldId))); - arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ - numkeys = ARR_DIMS(arr)[0]; - /* test follows the one in ri_FetchConstraintInfo() */ - if (ARR_NDIM(arr) != 1 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != OIDOID) - ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("conpfeqop is not a 1-D Oid array"))); - rawarr = (Oid*)ARR_DATA_PTR(arr); - - /* stash a List of the operator Oids in our Constraint node */ - for (i = 0; i < numkeys; i++) - con->old_conpfeqop = lcons_oid(rawarr[i], con->old_conpfeqop); - - ReleaseSysCache(tup); -} - -static void AlterTypeOwnerByTbl(Relation target_rel, Oid newOwnerId) -{ - TupleDesc tupDesc = target_rel->rd_att; - char* typnamespace = NULL; - char* typname = NULL; - HeapTuple tuple; - Form_pg_type typeForm; - - for (int i = 0; i < tupDesc->natts; i++) { - if (!tupDesc->attrs[i].attisdropped && type_is_set(tupDesc->attrs[i].atttypid)) { - Oid atttypid = tupDesc->attrs[i].atttypid; - - tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(atttypid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for type %u", atttypid))); - } - typeForm = (Form_pg_type)GETSTRUCT(tuple); - typnamespace = pstrdup(get_namespace_name(typeForm->typnamespace)); - typname = NameStr(typeForm->typname); - - List *typlist = list_make2(makeString(typnamespace), makeString(typname)); - AlterTypeOwner(typlist, newOwnerId, OBJECT_TYPE, false); - ReleaseSysCache(tuple); - } - } -} - -/* - * ALTER TABLE OWNER - * - * recursing is true if we are recursing from a table to its indexes, - * sequences, or toast table. We don't allow the ownership of those things to - * be changed separately from the parent table. Also, we can skip permission - * checks (this is necessary not just an optimization, else we'd fail to - * handle toast tables properly). - * - * recursing is also true if ALTER TYPE OWNER is calling us to fix up a - * free-standing composite type. - */ -void ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lockmode) -{ - Relation target_rel; - Relation class_rel; - HeapTuple tuple; - Form_pg_class tuple_class; - - /* - * Get exclusive lock till end of transaction on the target table. Use - * relation_open so that we can work on indexes and sequences. - */ - target_rel = relation_open(relationOid, lockmode); - - /* Get its pg_class tuple, too */ - class_rel = heap_open(RelationRelationId, RowExclusiveLock); - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationOid)); - if (!HeapTupleIsValid(tuple)) - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relationOid))); - tuple_class = (Form_pg_class)GETSTRUCT(tuple); - - /* Can we change the ownership of this tuple? */ - switch (tuple_class->relkind) { - case RELKIND_RELATION: - case RELKIND_VIEW: - case RELKIND_CONTQUERY: - case RELKIND_MATVIEW: - case RELKIND_FOREIGN_TABLE: - case RELKIND_STREAM: - /* ok to change owner */ - if (IS_PGXC_COORDINATOR && in_logic_cluster()) { - Oid new_group_oid, group_oid; - bool is_admin = systemDBA_arg(newOwnerId) || superuser_arg(newOwnerId); - - /* check whether the group_oid of the two owners are same. - * Only allow changing owner to other group's role in redistribution. - * Or allow changing owner to sysadmin users. - */ - group_oid = get_pgxc_logic_groupoid(tuple_class->relowner); - if (tuple_class->relkind != RELKIND_CONTQUERY && tuple_class->relkind != RELKIND_VIEW - && !OidIsValid(group_oid) && is_pgxc_class_table(relationOid)) { - group_oid = get_pgxc_class_groupoid(relationOid); - } - - new_group_oid = get_pgxc_logic_groupoid(newOwnerId); - - if (group_oid != new_group_oid && OidIsValid(group_oid) && !is_admin) { - char in_redis_new = 'n'; - char in_redis = get_pgxc_group_redistributionstatus(group_oid); - if (OidIsValid(new_group_oid)) { - in_redis_new = get_pgxc_group_redistributionstatus(new_group_oid); - } - if (in_redis != 'y' && in_redis != 't' && in_redis_new != 'y' && in_redis_new != 't') { - /* Old group and new group are not redistribution group */ - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Can not change owner of \"%s\" to other logic cluster users.", - NameStr(tuple_class->relname)))); - } - } - } - break; - case RELKIND_INDEX: - case RELKIND_GLOBAL_INDEX: - if (!recursing) { - /* - * Because ALTER INDEX OWNER used to be allowed, and in fact - * is generated by old versions of pg_dump, we give a warning - * and do nothing rather than erroring out. Also, to avoid - * unnecessary chatter while restoring those old dumps, say - * nothing at all if the command would be a no-op anyway. - */ - if (tuple_class->relowner != newOwnerId) - ereport(WARNING, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot change owner of index \"%s\"", NameStr(tuple_class->relname)), - errhint("Change the ownership of the index's table, instead."))); - /* quick hack to exit via the no-op path */ - newOwnerId = tuple_class->relowner; - } - break; - case RELKIND_SEQUENCE: - case RELKIND_LARGE_SEQUENCE: - if (!recursing && tuple_class->relowner != newOwnerId) { - /* if it's an owned sequence, disallow changing it by itself */ - Oid tableId; - int32 colId; - - if (sequenceIsOwned(relationOid, &tableId, &colId)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot change owner of (large) sequence \"%s\"", NameStr(tuple_class->relname)), - errdetail("Sequence \"%s\" is linked to table \"%s\".", - NameStr(tuple_class->relname), - get_rel_name(tableId)))); - } - break; - case RELKIND_COMPOSITE_TYPE: - if (recursing) - break; - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is a composite type", NameStr(tuple_class->relname)), - errhint("Use ALTER TYPE instead."))); - break; - case RELKIND_TOASTVALUE: - if (recursing) - break; - /* fall through */ - default: - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, sequence, or foreign table", NameStr(tuple_class->relname)))); - } - - /* - * If the new owner is the same as the existing owner, consider the - * command to have succeeded. This is for dump restoration purposes. - */ - if (tuple_class->relowner != newOwnerId) { - Datum repl_val[Natts_pg_class]; - bool repl_null[Natts_pg_class]; - bool repl_repl[Natts_pg_class]; - Acl* newAcl = NULL; - Datum aclDatum; - bool isNull = false; - HeapTuple newtuple; - errno_t rc = EOK; - - /* skip permission checks when recursing to index or toast table */ - if (!recursing) { - /* Superusers can always do it, except manipulate independent role's objects. */ - if ((!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) || - is_role_independent(tuple_class->relowner)) { - Oid namespaceOid = tuple_class->relnamespace; - AclResult aclresult; - - /* Otherwise, must be owner or has privileges of the existing object's owner. */ - if (!has_privs_of_role(GetUserId(), tuple_class->relowner)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(target_rel)); - - /* Must be able to become new owner */ - check_is_member_of_role(GetUserId(), newOwnerId); - - /* New owner must have CREATE privilege on namespace */ - aclresult = pg_namespace_aclcheck(namespaceOid, newOwnerId, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceOid)); - } - - /* check whether newOwner has enough space on CN, or translation tableSize from oldOwner to newOwner on DN*/ - if (g_instance.attr.attr_resource.enable_perm_space && !IS_SINGLE_NODE) { - UserData* newUserdata = GetUserDataFromHTab(newOwnerId, false); - - FunctionCallInfo fcinfo = - (FunctionCallInfo)palloc0(sizeof(FunctionCallInfoData) + sizeof(bool) + sizeof(Datum)); - - fcinfo->nargs = 1; - fcinfo->argnull = (bool*)((char*)fcinfo + sizeof(FunctionCallInfoData)); - fcinfo->argnull[0] = false; - fcinfo->arg = (Datum*)((char*)fcinfo->argnull + sizeof(bool)); - fcinfo->arg[0] = UInt32GetDatum(relationOid); - - int64 tableSize = DatumGetInt64(pg_total_relation_size(fcinfo)); - - pfree_ext(fcinfo); - - DataSpaceType type = RelationUsesSpaceType(target_rel->rd_rel->relpersistence); - - if (IS_PGXC_COORDINATOR) { - if (type == SP_PERM) { - if (newUserdata->spacelimit > 0 && - (newUserdata->totalspace + tableSize > newUserdata->spacelimit)) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("no perm space is available for the targeted owner"))); - } - if (newUserdata->parent && newUserdata->parent->spacelimit > 0 && - (newUserdata->parent->totalspace + tableSize > newUserdata->parent->spacelimit)) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("no perm space is available for the targeted user group"))); - } - } else { - if (newUserdata->tmpSpaceLimit > 0 && - (newUserdata->tmpSpace + tableSize > newUserdata->tmpSpaceLimit)) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("no temp space is available for the targeted owner"))); - } - if (newUserdata->parent && newUserdata->parent->tmpSpaceLimit > 0 && - (newUserdata->parent->tmpSpace + tableSize > newUserdata->parent->tmpSpaceLimit)) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("no temp space is available for the targeted user group"))); - } - } - } - - if (IS_PGXC_DATANODE) { - if (newOwnerId != BOOTSTRAP_SUPERUSERID) { - perm_space_increase(newOwnerId, tableSize, type); - } - perm_space_decrease(tuple_class->relowner, tableSize, type); - } - } - } - - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - - repl_repl[Anum_pg_class_relowner - 1] = true; - repl_val[Anum_pg_class_relowner - 1] = ObjectIdGetDatum(newOwnerId); - - /* - * Determine the modified ACL for the new owner. This is only - * necessary when the ACL is non-null. - */ - aclDatum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relacl, &isNull); - if (!isNull) { - newAcl = aclnewowner(DatumGetAclP(aclDatum), tuple_class->relowner, newOwnerId); - repl_repl[Anum_pg_class_relacl - 1] = true; - repl_val[Anum_pg_class_relacl - 1] = PointerGetDatum(newAcl); - } - - newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(class_rel), repl_val, repl_null, repl_repl); - - simple_heap_update(class_rel, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(class_rel, newtuple); - - tableam_tops_free_tuple(newtuple); - - /* - * We must similarly update any per-column ACLs to reflect the new - * owner; for neatness reasons that's split out as a subroutine. - */ - change_owner_fix_column_acls(relationOid, tuple_class->relowner, newOwnerId); - - /* - * Update owner dependency reference, if any. A composite type has - * none, because it's tracked for the pg_type entry instead of here; - * indexes and TOAST tables don't have their own entries either. - * the table in CSTORE_NAMESPACE such like pg_cudesc_xxx, pg_delta_xxx - * don't have their own entries either. - */ - if (tuple_class->relkind != RELKIND_COMPOSITE_TYPE && tuple_class->relkind != RELKIND_INDEX && - tuple_class->relkind != RELKIND_GLOBAL_INDEX && tuple_class->relkind != RELKIND_TOASTVALUE && - tuple_class->relnamespace != CSTORE_NAMESPACE) - changeDependencyOnOwner(RelationRelationId, relationOid, newOwnerId); - - /* - * Also change the ownership of the table's row type, if it has one - */ - if (tuple_class->relkind != RELKIND_INDEX && tuple_class->relkind != RELKIND_GLOBAL_INDEX) - AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId, tuple_class->relkind == RELKIND_COMPOSITE_TYPE); - - /* - * If we are operating on a table, also change the ownership of any - * indexes and sequences that belong to the table, as well as the - * table's toast table (if it has one) - */ - if (tuple_class->relkind == RELKIND_RELATION || - tuple_class->relkind == RELKIND_MATVIEW || - tuple_class->relkind == RELKIND_TOASTVALUE) { - List* index_oid_list = NIL; - ListCell* i = NULL; - - /* Find all the indexes belonging to this relation */ - index_oid_list = RelationGetIndexList(target_rel); - - /* For each index, recursively change its ownership */ - foreach (i, index_oid_list) - ATExecChangeOwner(lfirst_oid(i), newOwnerId, true, lockmode); - - list_free_ext(index_oid_list); - } - - if (tuple_class->relkind == RELKIND_RELATION || - tuple_class->relkind == RELKIND_MATVIEW) { - /* If it has a toast table, recurse to change its ownership */ - if (tuple_class->reltoastrelid != InvalidOid) - ATExecChangeOwner(tuple_class->reltoastrelid, newOwnerId, true, lockmode); - - /* If it has a map table, recurse to change its ownership */ - if (tuple_class->relkind == RELKIND_MATVIEW) { - char matviewmap[NAMEDATALEN]; - errno_t rc = snprintf_s(matviewmap, sizeof(matviewmap), sizeof(matviewmap) - 1, - "matviewmap_%u", relationOid); - securec_check_ss(rc, "\0", "\0"); - - Oid mapOid = RelnameGetRelid(matviewmap); - if (mapOid != InvalidOid) { - ATExecChangeOwner(mapOid, newOwnerId, true, lockmode); - } - } - - /* If it has a mlog table, recurse to change its ownership */ - if (target_rel->rd_mlogoid != InvalidOid) { - char mlog[NAMEDATALEN]; - errno_t rc = snprintf_s(mlog, sizeof(mlog), sizeof(mlog) - 1, "mlog_%u", relationOid); - securec_check_ss(rc, "\0", "\0"); - - Oid mlogOid = RelnameGetRelid(mlog); - if (mlogOid != InvalidOid) { - ATExecChangeOwner(mlogOid, newOwnerId, true, lockmode); - } - } - - /* If it has a cudesc table, recurse to change its ownership */ - if (tuple_class->relcudescrelid != InvalidOid) - /* - * for timeseries cudesc table, its relcudescrelid is its partition oid, - * do not need to change partition's owner - * for column store table, the cudescrelid is the only one cudesc row table oid - */ - if (!isPartitionObject(tuple_class->relcudescrelid, PART_OBJ_TYPE_TABLE_PARTITION, true)) { - ATExecChangeOwner(tuple_class->relcudescrelid, newOwnerId, true, lockmode); - } - - /* If it has a delta table, recurse to change its ownership */ - if (tuple_class->reldeltarelid != InvalidOid) - ATExecChangeOwner(tuple_class->reldeltarelid, newOwnerId, true, lockmode); - /* the timeseries table should recurse to change its tag table */ -#ifdef ENABLE_MULTIPLE_NODES - /* if with reloption period or ttl, will need to update job owner */ - update_rel_job_owner(target_rel, newOwnerId, lockmode); - if (unlikely(RelationIsTsStore(target_rel))) { - Oid tag_relid = get_tag_relid(RelationGetRelationName(target_rel), - target_rel->rd_rel->relnamespace); - ATExecChangeOwner(tag_relid, newOwnerId, true, lockmode); - } -#endif - /* If it has dependent sequences, recurse to change them too */ - change_owner_recurse_to_sequences(relationOid, newOwnerId, lockmode); - } - - if ((tuple_class->relkind == RELKIND_INDEX || tuple_class->relkind == RELKIND_GLOBAL_INDEX) && - tuple_class->relam == PSORT_AM_OID) { - /* if it is PSORT index, recurse to change PSORT releateion's ownership */ - if (tuple_class->relcudescrelid != InvalidOid) - ATExecChangeOwner(tuple_class->relcudescrelid, newOwnerId, true, lockmode); - } - - if (tuple_class->relkind == RELKIND_FOREIGN_TABLE || tuple_class->relkind == RELKIND_STREAM) { - // if it is a foreign talbe or a stream, should change its errortable's ownership - DefElem* def = GetForeignTableOptionByName(relationOid, optErrorRel); - - if (def != NULL) { - char* relname = strVal(def->arg); - Oid errorOid = get_relname_relid(relname, get_rel_namespace(relationOid)); - ATExecChangeOwner(errorOid, newOwnerId, true, lockmode); - } - } - - /* also need change the owner of type if the attribute is set type */ - if (tuple_class->relkind == RELKIND_RELATION) { - AlterTypeOwnerByTbl(target_rel, newOwnerId); - } - - if (tuple_class->parttype == PARTTYPE_PARTITIONED_RELATION) { - List* partCacheList = NIL; - ListCell* cell = NULL; - HeapTuple partTuple = NULL; - Form_pg_partition partForm = NULL; - - if (tuple_class->relkind == RELKIND_RELATION) { - partCacheList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relationOid); - } else if (tuple_class->relkind == RELKIND_INDEX) { - partCacheList = searchPgPartitionByParentId(PART_OBJ_TYPE_INDEX_PARTITION, relationOid); - } else if (tuple_class->relkind == RELKIND_GLOBAL_INDEX) { - /* If it has a toast table, recurse to change its ownership */ - if (tuple_class->reltoastrelid != InvalidOid) - ATExecChangeOwner(tuple_class->reltoastrelid, newOwnerId, true, lockmode); - - /* If it has a cudesc table, recurse to change its ownership */ - if (tuple_class->relcudescrelid != InvalidOid) - ATExecChangeOwner(tuple_class->relcudescrelid, newOwnerId, true, lockmode); - - /* If it has a delta table, recurse to change its ownership */ - if (tuple_class->reldeltarelid != InvalidOid) - ATExecChangeOwner(tuple_class->reldeltarelid, newOwnerId, true, lockmode); - partCacheList = NIL; - } else { - partCacheList = NIL; - } - - if (partCacheList != NIL) { - foreach (cell, partCacheList) { - partTuple = (HeapTuple)lfirst(cell); - - if (PointerIsValid(partTuple)) { - partForm = (Form_pg_partition)GETSTRUCT(partTuple); - /* If it has a toast table, recurse to change its ownership */ - if (partForm->reltoastrelid != InvalidOid) - ATExecChangeOwner(partForm->reltoastrelid, newOwnerId, true, lockmode); - - /* If it has a cudesc table, recurse to change its ownership */ - if (partForm->relcudescrelid != InvalidOid) - ATExecChangeOwner(partForm->relcudescrelid, newOwnerId, true, lockmode); - - /* If it has a delta table, recurse to change its ownership */ - if (partForm->reldeltarelid != InvalidOid) - ATExecChangeOwner(partForm->reldeltarelid, newOwnerId, true, lockmode); - /* change the cudesc rel owner */ -#ifdef ENABLE_MULTIPLE_NODES - if (RelationIsTsStore(target_rel)) { - Oid part_oid = HeapTupleGetOid(partTuple); - List* cudesclist = search_cudesc(part_oid, true); - ListCell* lc; - foreach(lc, cudesclist) { - ATExecChangeOwner(lfirst_oid(lc), newOwnerId, true, lockmode); - } - list_free_ext(cudesclist); - } -#endif - } - } - freePartList(partCacheList); - } - } - } - - ReleaseSysCache(tuple); - heap_close(class_rel, RowExclusiveLock); - relation_close(target_rel, NoLock); -} - -/* - * change_owner_fix_column_acls - * - * Helper function for ATExecChangeOwner. Scan the columns of the table - * and fix any non-null column ACLs to reflect the new owner. - */ -static void change_owner_fix_column_acls(Oid relationOid, Oid oldOwnerId, Oid newOwnerId) -{ - Relation attRelation; - SysScanDesc scan; - ScanKeyData key[1]; - HeapTuple attributeTuple; - - attRelation = heap_open(AttributeRelationId, RowExclusiveLock); - ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationOid)); - scan = systable_beginscan(attRelation, AttributeRelidNumIndexId, true, NULL, 1, key); - while (HeapTupleIsValid(attributeTuple = systable_getnext(scan))) { - Form_pg_attribute att = (Form_pg_attribute)GETSTRUCT(attributeTuple); - Datum repl_val[Natts_pg_attribute]; - bool repl_null[Natts_pg_attribute]; - bool repl_repl[Natts_pg_attribute]; - Acl* newAcl = NULL; - Datum aclDatum; - bool isNull = false; - HeapTuple newtuple; - errno_t rc = EOK; - - /* Ignore dropped columns */ - if (att->attisdropped) - continue; - - aclDatum = heap_getattr(attributeTuple, Anum_pg_attribute_attacl, RelationGetDescr(attRelation), &isNull); - /* Null ACLs do not require changes */ - if (isNull) - continue; - - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - - newAcl = aclnewowner(DatumGetAclP(aclDatum), oldOwnerId, newOwnerId); - repl_repl[Anum_pg_attribute_attacl - 1] = true; - repl_val[Anum_pg_attribute_attacl - 1] = PointerGetDatum(newAcl); - - newtuple = (HeapTuple) tableam_tops_modify_tuple(attributeTuple, RelationGetDescr(attRelation), repl_val, repl_null, repl_repl); - - simple_heap_update(attRelation, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(attRelation, newtuple); - - tableam_tops_free_tuple(newtuple); - } - systable_endscan(scan); - heap_close(attRelation, RowExclusiveLock); -} - -/* - * change_owner_recurse_to_sequences - * - * Helper function for ATExecChangeOwner. Examines pg_depend searching - * for sequences that are dependent on serial columns, and changes their - * ownership. - */ -static void change_owner_recurse_to_sequences(Oid relationOid, Oid newOwnerId, LOCKMODE lockmode) -{ - Relation depRel; - SysScanDesc scan; - ScanKeyData key[2]; - HeapTuple tup; - - /* - * SERIAL sequences are those having an auto dependency on one of the - * table's columns (we don't care *which* column, exactly). - */ - depRel = heap_open(DependRelationId, AccessShareLock); - - ScanKeyInit( - &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationOid)); - /* we leave refobjsubid unspecified */ - scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 2, key); - - while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tup); - Relation seqRel; - - /* skip dependencies other than auto dependencies on columns */ - if (depForm->refobjsubid == 0 || depForm->classid != RelationRelationId || depForm->objsubid != 0 || - depForm->deptype != DEPENDENCY_AUTO) - continue; - - /* Use relation_open just in case it's an index */ - seqRel = relation_open(depForm->objid, lockmode); - - /* skip non-sequence relations */ - if (!RELKIND_IS_SEQUENCE(RelationGetForm(seqRel)->relkind)) { - /* No need to keep the lock */ - relation_close(seqRel, lockmode); - continue; - } - - /* We don't need to close the sequence while we alter it. */ - ATExecChangeOwner(depForm->objid, newOwnerId, true, lockmode); - - /* Now we can close it. Keep the lock till end of transaction. */ - relation_close(seqRel, NoLock); - } - - systable_endscan(scan); - - relation_close(depRel, AccessShareLock); -} - -/* - * ALTER TABLE CLUSTER ON - * - * The only thing we have to do is to change the indisclustered bits. - * Return the address of the new clustering index. - */ -static ObjectAddress ATExecClusterOn(Relation rel, const char* indexName, LOCKMODE lockmode) -{ - Oid indexOid; - ObjectAddress address; - - indexOid = get_relname_relid(indexName, rel->rd_rel->relnamespace); - - if (!OidIsValid(indexOid)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("index \"%s\" for table \"%s\" does not exist", indexName, RelationGetRelationName(rel)))); - - /* Check index is valid to cluster on */ - check_index_is_clusterable(rel, indexOid, false, lockmode); - - /* And do the work */ - mark_index_clustered(rel, indexOid); - ObjectAddressSet(address, - RelationRelationId, indexOid); - - return address; -} - -/* - * ALTER TABLE SET WITHOUT CLUSTER - * - * We have to find any indexes on the table that have indisclustered bit - * set and turn it off. - */ -static void ATExecDropCluster(Relation rel, LOCKMODE lockmode) -{ - mark_index_clustered(rel, InvalidOid); -} - -/* - * ALTER TABLE SET TABLESPACE - */ -static void ATPrepSetTableSpace(AlteredTableInfo* tab, Relation rel, const char* tablespacename, LOCKMODE lockmode) -{ - Oid tablespaceId; - AclResult aclresult; - - /* Check that the tablespace exists */ - tablespaceId = get_tablespace_oid(tablespacename, false); - - /* Check its permissions */ - aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, tablespacename); - - /* Save info for Phase 3 to do the real work */ - if (OidIsValid(tab->newTableSpace)) - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("cannot have multiple SET TABLESPACE subcommands"))); - tab->newTableSpace = tablespaceId; -} - -// psort tuple is fetched from pg_class either normal or partition relation. -// for normal relation, the tuple oid is stored in FormData_pg_class.relcudescrelid, -// while partition relation, tuple oid is stored in Form_pg_partition.relcudescrelid. -// -static void ATExecSetPSortOption(Oid psortOid, List* defList, AlterTableType operation, LOCKMODE lockmode) -{ - HeapTuple newtuple; - Datum datum; - bool isnull = false; - Datum newOptions; - Datum repl_val[Natts_pg_class]; - bool repl_null[Natts_pg_class]; - bool repl_repl[Natts_pg_class]; - - Assert(InvalidOid != psortOid); - Relation pgclass = heap_open(RelationRelationId, RowExclusiveLock); - Relation psortRel = heap_open(psortOid, lockmode); - - HeapTuple psortTup = SearchSysCache1(RELOID, ObjectIdGetDatum(psortOid)); - if (!HeapTupleIsValid(psortTup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", psortOid))); - } - - // check that it's a Column-Store heap relation. - if (!RelationIsColStore(psortRel)) { - if (RELKIND_RELATION != psortRel->rd_rel->relkind) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relkind of psort tuple shouldn't be '%c'.", psortRel->rd_rel->relkind), - errdetail("psort tuple's relkind must be '%c'.", RELKIND_RELATION))); - } else { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("psort tuple doesn't have the correct ORIENTATION value."), - errdetail("ORIENTATION value within psort tuple must be COLUMN."))); - } - } - - ForbidUserToSetDefinedOptions(defList); - ForbidToSetOptionsForPSort(defList); - - if (operation == AT_ReplaceRelOptions) { - datum = (Datum)0; - isnull = true; - } else { - datum = SysCacheGetAttr(RELOID, psortTup, Anum_pg_class_reloptions, &isnull); - } - - newOptions = - transformRelOptions(isnull ? (Datum)0 : datum, defList, NULL, NULL, false, operation == AT_ResetRelOptions); - - bytea* heapRelOpt = heap_reloptions(RELKIND_RELATION, newOptions, true); - Assert(heapRelOpt != NULL); - CheckCStoreRelOption((StdRdOptions*)heapRelOpt); - - errno_t rc = 0; - rc = memset_s(repl_val, sizeof(repl_val), 0, sizeof(repl_val)); - securec_check_c(rc, "\0", "\0"); - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check_c(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check_c(rc, "\0", "\0"); - - if (newOptions != (Datum)0) - repl_val[Anum_pg_class_reloptions - 1] = newOptions; - else - repl_null[Anum_pg_class_reloptions - 1] = true; - repl_repl[Anum_pg_class_reloptions - 1] = true; - - newtuple = (HeapTuple) tableam_tops_modify_tuple(psortTup, RelationGetDescr(pgclass), repl_val, repl_null, repl_repl); - simple_heap_update(pgclass, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(pgclass, newtuple); - tableam_tops_free_tuple(newtuple); - - ReleaseSysCache(psortTup); - - heap_close(psortRel, NoLock); - heap_close(pgclass, RowExclusiveLock); -} - -static void ATExecSetRelOptionsToast(Oid toastid, List* defList, AlterTableType operation, LOCKMODE lockmode) -{ - Relation pgclass; - HeapTuple tuple; - HeapTuple newtuple; - Datum datum; - bool isnull = false; - Datum newOptions; - Datum repl_val[Natts_pg_class]; - bool repl_null[Natts_pg_class]; - bool repl_repl[Natts_pg_class]; - static const char* const validnsps[] = HEAP_RELOPT_NAMESPACES; - errno_t rc = EOK; - - Relation toastrel; - - pgclass = heap_open(RelationRelationId, RowExclusiveLock); - - toastrel = heap_open(toastid, lockmode); - - /* Fetch heap tuple */ - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(toastid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", toastid))); - } - - if (operation == AT_ReplaceRelOptions) { - /* - * If we're supposed to replace the reloptions list, we just - * pretend there were none before. - */ - datum = (Datum)0; - isnull = true; - } else { - /* Get the old reloptions */ - datum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); - } - - newOptions = transformRelOptions( - isnull ? (Datum)0 : datum, defList, "toast", validnsps, false, operation == AT_ResetRelOptions); - - (void)heap_reloptions(RELKIND_TOASTVALUE, newOptions, true); - - rc = memset_s(repl_val, sizeof(repl_val), 0, sizeof(repl_val)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - - if (newOptions != (Datum)0) - repl_val[Anum_pg_class_reloptions - 1] = newOptions; - else - repl_null[Anum_pg_class_reloptions - 1] = true; - - repl_repl[Anum_pg_class_reloptions - 1] = true; - - newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(pgclass), repl_val, repl_null, repl_repl); - - simple_heap_update(pgclass, &newtuple->t_self, newtuple); - - CatalogUpdateIndexes(pgclass, newtuple); - - tableam_tops_free_tuple(newtuple); - - ReleaseSysCache(tuple); - - heap_close(toastrel, NoLock); - heap_close(pgclass, RowExclusiveLock); -} - -/* Check if compressed options have changed. */ -inline bool CheckIfModifyCompressedOptions(PageCompressOpts* newCompressOpt, RelFileCompressOption current) -{ - if (newCompressOpt->compressType != (int)current.compressAlgorithm || - newCompressOpt->compressLevel != (int)current.compressLevel || - newCompressOpt->compressChunkSize != CHUNK_SIZE_LIST[current.compressChunkSize] || - newCompressOpt->compressPreallocChunks != (int)current.compressPreallocChunks || - newCompressOpt->compressByteConvert != (int)current.byteConvert || - newCompressOpt->compressDiffConvert != (int)current.diffConvert) { - return true; - } - return false; -} - -/** - * Row compressed Options can not be used in segment table, - * column table, view, unlogged table or temp table. - */ -bool CheckTableSupportSetCompressedOptions(Relation rel) -{ - if (rel == NULL) { - return false; - } - - if (rel->rd_rel->relkind != RELKIND_RELATION) { - return false; - } - - if (!RelationIsPermanent(rel)) { - return false; - } - - if (RelationIsColStore(rel) || RelationIsTsStore(rel) || RelationIsSegmentTable(rel)) { - return false; - } - return true; -} - -bool CheckDefListContainsCompressedOptions(List* defList) -{ - if (defList == NULL) { - return false; - } - static const char *compressedOptions[] = {"compresstype", "compress_chunk_size", "compress_prealloc_chunks", - "compress_level", "compress_byte_convert", "compress_diff_convert"}; - static const int compressedOptionsNum = 6; - ListCell *opt = NULL; - for (int i = 0; i < compressedOptionsNum; ++i) { - foreach (opt, defList) { - DefElem *def = (DefElem *)lfirst(opt); - if (pg_strcasecmp(def->defname, compressedOptions[i]) == 0) { - return true; - } - } - } - return false; -} - -/** - * Set compressed options need to rebuild table, check whether the modification need to rebuild table, - * and check whether the new compressed options is valid. - */ -bool static transformTableCompressedOptions(Relation rel, bytea* relOption, List* defList) -{ - /* only row table can modify compressed options */ - if (!CheckTableSupportSetCompressedOptions(rel)) { - ForbidUserToSetCompressedOptions(defList); - return false; - } - - PageCompressOpts* newCompressOpt = &(((StdRdOptions*)relOption)->compress); - RelFileCompressOption currentCompressOpt; - TransCompressOptions(rel->rd_node, ¤tCompressOpt); - - if(!CheckIfModifyCompressedOptions(newCompressOpt, currentCompressOpt)) { - return false; - } - - /* check whether the new compression parameter is valid */ - if (newCompressOpt->compressType == COMPRESS_TYPE_NONE && - (newCompressOpt->compressLevel != 0 || newCompressOpt->compressChunkSize != BLCKSZ / 2 || - newCompressOpt->compressPreallocChunks != 0 || newCompressOpt->compressByteConvert != false || - newCompressOpt->compressDiffConvert != false)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_level=0, compress_chunk_size=4096, compress_prealloc_chunks=0, compress_byte_convert=false, compress_diff_convert=false should be set when compresstype=0"))); - } - - if (newCompressOpt->compressType == COMPRESS_TYPE_PGZSTD) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("row-compression feature current not support algorithm is PGZSTD."))); - } - - if (newCompressOpt->compressType != COMPRESS_TYPE_ZSTD && newCompressOpt->compressLevel != 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_level should be used with ZSTD algorithm."))); - } - - if (!newCompressOpt->compressByteConvert && newCompressOpt->compressDiffConvert) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_diff_convert should be used with compress_byte_convert."))); - } - - bool success = false; - ConvertChunkSize(newCompressOpt->compressChunkSize, &success); - if (!success) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("invalid compress_chunk_size %u, must be one of %d, %d, %d or %d", - newCompressOpt->compressChunkSize, BLCKSZ / 16, BLCKSZ / 8, BLCKSZ / 4, BLCKSZ / 2))); - } - if (newCompressOpt->compressPreallocChunks >= BLCKSZ / newCompressOpt->compressChunkSize) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("invalid compress_prealloc_chunks %u, must be less than %u for %s", - newCompressOpt->compressPreallocChunks, - BLCKSZ / newCompressOpt->compressChunkSize, - RelationGetRelationName(rel)))); - } - - return true; -} - -/** - * Do not modify compression parameters of index. - */ -void static transfromIndexCompressedOptions(Relation rel, bytea* relOoption, List* defList) -{ - if (rel->rd_node.opt == 0) { - ForbidUserToSetCompressedOptions(defList); - return; - } - - PageCompressOpts* newCompressOpt = &(((StdRdOptions*)relOoption)->compress); - RelFileCompressOption current; - TransCompressOptions(rel->rd_node, ¤t); - if (newCompressOpt) { - if (newCompressOpt->compressType != (int)current.compressAlgorithm) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("change compresstype OPTION is not supported"))); - } - if ((int)current.compressAlgorithm != COMPRESS_TYPE_NONE && - newCompressOpt->compressChunkSize != CHUNK_SIZE_LIST[current.compressChunkSize]) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("change compress_chunk_size OPTION is not supported"))); - } - if (!newCompressOpt->compressByteConvert && newCompressOpt->compressDiffConvert) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_diff_convert should be used with compress_byte_convert."))); - } - if (current.compressAlgorithm == COMPRESS_TYPE_PGLZ) { - ListCell *opt = NULL; - foreach (opt, defList) { - DefElem *def = (DefElem *)lfirst(opt); - if (pg_strcasecmp(def->defname, "compress_level") == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_level should be used with ZSTD algorithm."))); - } - } - } - } else { - if ((int)current.compressAlgorithm != COMPRESS_TYPE_NONE) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("change compresstype OPTION is not supported"))); - } - } - - /* - * forbid modify partition CompressOption - */ - if (HEAP_IS_PARTITIONED(rel)) { - if ((int)current.compressLevel != newCompressOpt->compressLevel) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("change partition compressLevel OPTION is not supported"))); - } - if ((int)current.compressPreallocChunks != newCompressOpt->compressPreallocChunks) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("change partition compress_prealloc_chunks OPTION is not supported"))); - } - } -} - -/* - * Check whether the new compressed options are valid, and whether need to rewrite table. - * Modifying the compressed options of row table causes to rewrite. - */ -bool static transformCompressedOptions(Relation rel, bytea* relOption, List* defList, AlteredTableInfo* tab) { - /* If delist doesn't contains compressed options, return false. */ - if (!relOption || defList == NULL || !CheckDefListContainsCompressedOptions(defList)) { - return false; - } - - /* If the relkind doesn't support compressed options, check if delist contains compressed options. - * If does, throw exception. - */ - if (!REL_SUPPORT_COMPRESSED(rel)) { - ForbidUserToSetCompressedOptions(defList); - return false; - } - - /* Most compressed options can be modified only in row table */ - if (tab != NULL && RelationIsRelation(rel)) { - return transformTableCompressedOptions(rel, relOption, defList); - } else { - transfromIndexCompressedOptions(rel, relOption, defList); - } - return false; -} - -/* - * Set, reset, or replace reloptions. - */ -static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType operation, LOCKMODE lockmode, bool innerset, AlteredTableInfo* tab) -{ - Oid relid; - Relation pgclass; - HeapTuple tuple, newtuple, tmptuple; - Datum datum; - Datum oldOptions; - bool isnull = false; - Datum newOptions; - Datum repl_val[Natts_pg_class]; - bool repl_null[Natts_pg_class]; - bool repl_repl[Natts_pg_class]; - static const char* const validnsps[] = HEAP_RELOPT_NAMESPACES; - List* partCacheList = NIL; - ListCell* cell = NULL; - Oid toastOid = InvalidOid; - HeapTuple partTuple = NULL; - Form_pg_partition partForm = NULL; - errno_t rc = EOK; - Oid rel_cn_oid = InvalidOid; - RedisHtlAction redis_action = REDIS_REL_INVALID; - char* merge_list = NULL; - bool oldRelHasUids = RELATION_HAS_UIDS(rel); - bool newRelHasUids = false; - if (defList == NIL && operation != AT_ReplaceRelOptions) - return; /* nothing to do */ - - if (GttOncommitOption(defList) != ONCOMMIT_NOOP) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("table cannot add or modify on commit parameter by ALTER TABLE command."))); - } - - if (!innerset) { - /* forbid user to set or change inner options */ - ForbidOutUsersToSetInnerOptions(defList); - ForbidToAlterOptionsForTdeTbl(defList); - } - -#ifdef ENABLE_MULTIPLE_NODES - CheckRedistributeOption(defList, &rel_cn_oid, &redis_action, &merge_list, rel); -#else - CheckRedistributeOption(defList, &rel_cn_oid, &redis_action, rel); -#endif - - defList = list_delete_name(defList, "merge_list"); - pgclass = heap_open(RelationRelationId, RowExclusiveLock); - - /* Fetch heap tuple */ - relid = RelationGetRelid(rel); - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid))); - } - - // we have to handle psort tuple's options if this is an index relation using PSORT method. - // it's identifyed by access method whose oid is PSORT_AM_OID. - // and the psort tuple id is saved in index relation's relcudescrelid field. - - bool needSetPsortOpt = false; - Oid psortTid = InvalidOid; - Oid indexAmId = DatumGetObjectId(SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relam, &isnull)); - if (!isnull && indexAmId == PSORT_AM_OID) { - needSetPsortOpt = true; - Assert(RelationIsIndex(rel)); - psortTid = DatumGetObjectId(SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relcudescrelid, &isnull)); - } - - if (operation == AT_ReplaceRelOptions) { - /* - * If we're supposed to replace the reloptions list, we just pretend - * there were none before. - */ - datum = (Datum)0; - isnull = true; - } else { - /* Get the old reloptions */ - datum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); - } - if (rel->rd_rel->relkind == RELKIND_RELATION) { - oldOptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); - } - - /* remove the redis reloptions. */ - if (redis_action == REDIS_REL_NORMAL) { - List* old_reloptions = untransformRelOptions(datum); - RemoveRedisRelOptionsFromList(&old_reloptions); - RemoveRedisRelOptionsFromList(&defList); - - /* defList should keep unchanged. */ - if (old_reloptions != NIL) { - defList = list_concat(defList, old_reloptions); - } - /* Generate new proposed reloptions (text array) */ - newOptions = transformRelOptions((Datum)0, defList, NULL, validnsps, false, operation == AT_ResetRelOptions); - - if (old_reloptions != NIL && defList != NULL) { - defList = list_truncate(defList, list_length(defList) - list_length(old_reloptions)); - } - - list_free_ext(old_reloptions); - List *redis_reloptions = AlterTableSetRedistribute(rel, redis_action, merge_list); - list_free(redis_reloptions); - } else if (redis_action == REDIS_REL_READ_ONLY) { - List* redis_reloptions = AlterTableSetRedistribute(rel, redis_action, merge_list); - List* old_reloptions = untransformRelOptions(datum); - RemoveRedisRelOptionsFromList(&old_reloptions); - - if (old_reloptions != NIL) { - redis_reloptions = list_concat(redis_reloptions, old_reloptions); - } - - if (redis_reloptions != NIL) { - defList = list_concat(defList, redis_reloptions); - } - - newOptions = transformRelOptions((Datum)0, defList, NULL, validnsps, false, operation == AT_ResetRelOptions); - - if (redis_reloptions != NIL) { - defList = list_truncate(defList, list_length(defList) - list_length(redis_reloptions)); - } - - list_free_ext(redis_reloptions); - } else { - /* Generate new proposed reloptions (text array) */ - List* redis_reloptions = AlterTableSetRedistribute(rel, redis_action, merge_list); - - if (redis_reloptions != NIL) - defList = list_concat(defList, redis_reloptions); - - newOptions = transformRelOptions( - isnull ? (Datum)0 : datum, defList, NULL, validnsps, false, operation == AT_ResetRelOptions); - if (redis_reloptions != NIL) - defList = list_truncate(defList, list_length(defList) - list_length(redis_reloptions)); - - list_free_ext(redis_reloptions); - } - - /* Validate */ - bytea* relOpt = NULL; - switch (rel->rd_rel->relkind) { - case RELKIND_RELATION: { - /* this options only can be used when define a new relation. - * forbid to change or reset these options. - */ - ForbidUserToSetDefinedOptions(defList); - - bytea* heapRelOpt = heap_reloptions(rel->rd_rel->relkind, newOptions, true); - relOpt = heapRelOpt; - const char* algo = RelationGetAlgo(rel); - newRelHasUids = StdRdOptionsHasUids(heapRelOpt, RELKIND_RELATION); - if (rel->rd_rel->relhasoids && newRelHasUids) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("table with oids cannot add or modify hasuids by ALTER TABLE command."))); - } - if (ENABLE_DMS && newRelHasUids) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("table under Shared Storage cannot add or modify hasuids by ALTER TABLE command."))); - } - if (RelationIsColStore(rel)) { - /* un-supported options. dont care its values */ - ForbidToSetOptionsForColTbl(defList); - - if (NULL != heapRelOpt) { - /* validate the values of these options */ - CheckCStoreRelOption((StdRdOptions*)heapRelOpt); - } - } else if (RelationIsTsStore(rel)) { - forbid_to_set_options_for_timeseries_tbl(defList); - } else { - /* un-supported options. dont care its values */ - ForbidToSetOptionsForRowTbl(defList); - if (algo == NULL || *algo == '\0') { - ForbidToSetTdeOptionsForNonTdeTbl(defList); - } - if (RelationIsUstoreFormat(rel)) { - ForbidToSetOptionsForUstoreTbl(defList); - } - } - - /* validate the values of ttl and period for partition manager */ - if (NULL != heapRelOpt) { - check_partion_policy_rel_option(defList, (StdRdOptions*)heapRelOpt); - } - break; - } - case RELKIND_TOASTVALUE: - case RELKIND_MATVIEW: - case RELKIND_CONTQUERY: - case RELKIND_VIEW:{ - Assert(oldRelHasUids == false); - (void)heap_reloptions(rel->rd_rel->relkind, newOptions, true); - break; - } - case RELKIND_INDEX: - case RELKIND_GLOBAL_INDEX: { - ForbidUserToSetDefinedIndexOptions(rel, defList); - Assert(oldRelHasUids == false); - relOpt = index_reloptions(rel->rd_am->amoptions, newOptions, true); - break; - } - default: - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, materialized view, index, or TOAST table", RelationGetRelationName(rel)))); - break; - } - - if (rel->rd_options && REL_SUPPORT_COMPRESSED(rel)) { - SetupPageCompressForRelation(&rel->rd_node, &((StdRdOptions *)(void *)(rel->rd_options))->compress, - RelationGetRelationName(rel)); - } - - /* Only row table support rewriting table when modifying compressed options. */ - if (transformCompressedOptions(rel, relOpt, defList, tab)) { - /* - * The oldOptions will be used in phase 3 to delete the old data file correctly, - * so the tmpTuple will not be released manually, but be released by memory context. - */ - tmptuple = heap_copytuple(tuple); - oldOptions = SysCacheGetAttr(RELOID, tmptuple, Anum_pg_class_reloptions, &isnull); - - /* - * If modiying compressed options of row table, set tab->rewrite to - * AT_REWRITE_ALTER_COMPRESSION, which will case rewriting table during - * the phase 3 of altering reloptions. - */ - tab->rewrite = AT_REWRITE_ALTER_COMPRESSION; - tab->opt = rel->rd_node.opt; - tab->newOptions = newOptions; - tab->oldOptions = oldOptions; - } else if (tab != NULL) { - tab->rewrite = 0; - } - - /* Special-case validation of view options */ - if (rel->rd_rel->relkind == RELKIND_VIEW) { - Query* view_query = get_view_query(rel); - ListCell* cell = NULL; - bool check_option = false; - - foreach(cell, defList) { - DefElem* defel = (DefElem*)lfirst(cell); - - if (pg_strcasecmp(defel->defname, "check_option") == 0) { - check_option = true; - break; - } - } - - /* - * If the check option is specified, look to see if the view is - * actually auto-updatable or not. - */ - if (check_option) { - const char *view_updatable_error = view_query_is_auto_updatable(view_query, true); - - if (view_updatable_error) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("WITH CHECK OPTION is supported only on auto-updatable views"), - errhint("%s", view_updatable_error))); - - /* - * Views based on MySQL foreign table is not allowed to add check option, - * because returning clause which check option dependend on is not supported - * on MySQL. - */ - if (CheckMySQLFdwForWCO(view_query)) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("WITH CHECK OPTION is not supported on views that base on MySQL foreign table"))); - } - } - - /* - * All we need do here is update the pg_class row; the new options will be - * propagated into relcaches during post-commit cache inval. - */ - rc = memset_s(repl_val, sizeof(repl_val), 0, sizeof(repl_val)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - - if (newOptions != (Datum)0) - repl_val[Anum_pg_class_reloptions - 1] = newOptions; - else - repl_null[Anum_pg_class_reloptions - 1] = true; - - repl_repl[Anum_pg_class_reloptions - 1] = true; - - newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(pgclass), repl_val, repl_null, repl_repl); - - simple_heap_update(pgclass, &newtuple->t_self, newtuple); - - CatalogUpdateIndexes(pgclass, newtuple); - - tableam_tops_free_tuple(newtuple); - - ReleaseSysCache(tuple); - - AddOrDropUidsAttr(relid, oldRelHasUids, newRelHasUids); - - /* repeat the whole exercise for the toast table, if there's one */ - if (RELATION_IS_PARTITIONED(rel)) { - partCacheList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relid); - foreach (cell, partCacheList) { - partTuple = (HeapTuple)lfirst(cell); - - if (PointerIsValid(partTuple)) { - partForm = (Form_pg_partition)GETSTRUCT(partTuple); - toastOid = partForm->reltoastrelid; - if (OidIsValid(toastOid)) { - ATExecSetRelOptionsToast(toastOid, defList, operation, lockmode); - } - } - } - } else if (RelationIsPartitioned(rel) && needSetPsortOpt) { - // fetch index-partition tuples from pg_partition table, which has the same parentid, - // then set the relation-options for psort tuple in pg_class. - // - partCacheList = searchPgPartitionByParentId(PART_OBJ_TYPE_INDEX_PARTITION, relid); - foreach (cell, partCacheList) { - partTuple = (HeapTuple)lfirst(cell); - if (PointerIsValid(partTuple)) { - partForm = (Form_pg_partition)GETSTRUCT(partTuple); - ATExecSetPSortOption(partForm->relcudescrelid, defList, operation, lockmode); - } - } - } else { - if (OidIsValid(rel->rd_rel->reltoastrelid)) { - ATExecSetRelOptionsToast(rel->rd_rel->reltoastrelid, defList, operation, lockmode); - } - - if (needSetPsortOpt) { - ATExecSetPSortOption(psortTid, defList, operation, lockmode); - } - } - heap_close(pgclass, RowExclusiveLock); - - if (RELATION_IS_PARTITIONED(rel)) { - AlterTableSetPartRelOptions(rel, defList, operation, lockmode, merge_list, redis_action); -#ifdef ENABLE_MULTIPLE_NODES - if (IS_MAIN_COORDINATOR) { - UpdatePartPolicyWhenATRelOptions(rel, defList); - } - /* we need to call ATDeltaReloptions on each node for table creating, dropping, deltamerge */ - if (RelationIsTsStore(rel)) { - Tsdb::ATDeltaReloptions(rel, defList); - } -#endif - } - if (merge_list != NULL) { - pfree(merge_list); - merge_list = NULL; - } - list_free_deep(partCacheList); -} - -/* - * Target : data partition - * Brief : - * Description : get oid of target partition from Node *partition, and - * : save it in AlteredTableInfo->partid - * Notes : - */ -static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation rel, Node* partition) -{ - RangePartitionDefState* rangePartDef = NULL; - Oid partOid = InvalidOid; - - if (!RelationIsPartitioned(rel)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg( - "\"%s\" must be a partitioned table for 'MOVE PARTITION CLAUSE'", RelationGetRelationName(rel)))); - } - - if (RelationIsSubPartitioned(rel)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, modifying tablespace is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - - if (!RelationIsRelation(rel) && !RelationIsIndex(rel)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("can not set tablespace for partition of neither table nor index"))); - } - - if (OidIsValid(tab->partid)) { - ereport( - ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("cannot have multiple MOVE TABLESPACE subcommands"))); - } - - /* check partition node type, it can be RangeVar or RangePartitionDefState */ - switch (nodeTag(partition)) { - case T_RangeVar: { - char objectType = RelationIsRelation(rel) ? PART_OBJ_TYPE_TABLE_PARTITION : PART_OBJ_TYPE_INDEX_PARTITION; - - partOid = PartitionNameGetPartitionOid(rel->rd_id, - ((RangeVar*)partition)->relname, - objectType, - AccessExclusiveLock, - false, - false, - NULL, - NULL, - NoLock); - break; - } - case T_RangePartitionDefState: { - if (RelationIsIndex(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("can not specify 'PARTITION FOR (value,,,)' for 'MOVE PARTITION CLAUSE'"))); - } - rangePartDef = (RangePartitionDefState*)partition; - transformPartitionValue(make_parsestate(NULL), (Node*)rangePartDef, false); - rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, - rangePartDef->boundary); - partOid = - PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, false, false); - break; - } - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("invalid partition node type in 'MOVE PARTITION CLAUSE'"))); - break; - } - } - - /* cehck oid validity of found partition */ - if (!OidIsValid(partOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range"))); - } - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(rel)) { - LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - tab->partid = partOid; -} - -/* - * Target : data partition - * Brief : - * Description : move a partition to new tablespace - * Notes : - */ -static void ATExecSetTableSpaceForPartitionP3(Oid tableOid, Oid partOid, Oid newTableSpace, LOCKMODE lockmode) -{ - Relation rel; - Relation partRel; - Partition part; - Oid oldTableSpace; - Oid reltoastrelid; - Oid reltoastidxid; - Oid newrelfilenode; - Relation pg_partition; - HeapTuple tuple; - Form_pg_partition pd_part; - bool isbucket; - bool newcbi = false; - - /* - * Need lock here in case we are recursing to toast table or index - */ - rel = relation_open(tableOid, NoLock); - if (RelationIsSubPartitioned(rel)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, modifying tablespace is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - part = partitionOpen(rel, partOid, lockmode); - - /* - * No work if no change in tablespace. - */ - oldTableSpace = part->pd_part->reltablespace; - if (newTableSpace == oldTableSpace || - (newTableSpace == u_sess->proc_cxt.MyDatabaseTableSpace && oldTableSpace == 0)) { - partitionClose(rel, part, NoLock); - relation_close(rel, NoLock); - return; - } - - /* Can't move a non-shared relation into pg_global */ - if (newTableSpace == GLOBALTABLESPACE_OID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("only shared relations can be placed in pg_global tablespace"))); - - reltoastrelid = part->pd_part->reltoastrelid; - reltoastidxid = part->pd_part->reltoastidxid; - - /* Get a modifiable copy of the relation's pg_class row */ - pg_partition = heap_open(PartitionRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(partOid)); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for partition %u", partOid))); - - pd_part = (Form_pg_partition)GETSTRUCT(tuple); - - /* - * Relfilenodes are not unique across tablespaces, so we need to allocate - * a new one in the new tablespace. - */ - if (RelationGetStorageType(rel) == (uint4)HEAP_DISK) { - newrelfilenode = GetNewRelFileNode(newTableSpace, NULL, rel->rd_rel->relpersistence); - } else { - newcbi = RelationIsCrossBucketIndex(rel); - isbucket = BUCKET_OID_IS_VALID(rel->rd_bucketoid) && !newcbi; - Oid database_id = (ConvertToRelfilenodeTblspcOid(newTableSpace) == GLOBALTABLESPACE_OID) ? - InvalidOid : u_sess->proc_cxt.MyDatabaseId; - newrelfilenode = seg_alloc_segment(ConvertToRelfilenodeTblspcOid(newTableSpace), - database_id, isbucket, InvalidBlockNumber); - } - partRel = partitionGetRelation(rel, part); - /* make sure we create the right underlying storage for cross-bucket index */ - partRel->newcbi = newcbi; - atexecset_table_space(partRel, newTableSpace, newrelfilenode); - - elog(LOG, - "Row Partition: %s(%u) tblspc %u/%u/%u => %u/%u/%u", - RelationGetRelationName(partRel), - RelationGetRelid(partRel), - partRel->rd_node.spcNode, - partRel->rd_node.dbNode, - partRel->rd_node.relNode, - newTableSpace, - partRel->rd_node.dbNode, - newrelfilenode); - - /* update the pg_class row */ - pd_part->reltablespace = ConvertToPgclassRelTablespaceOid(newTableSpace); - pd_part->relfilenode = newrelfilenode; - - simple_heap_update(pg_partition, &tuple->t_self, tuple); - CatalogUpdateIndexes(pg_partition, tuple); - - tableam_tops_free_tuple(tuple); - - heap_close(pg_partition, RowExclusiveLock); - - partitionClose(rel, part, NoLock); - releaseDummyRelation(&partRel); - relation_close(rel, NoLock); - - /* Make sure the reltablespace change is visible */ - CommandCounterIncrement(); - - /* Move associated toast relation and/or index, too */ - if (OidIsValid(reltoastrelid)) - ATExecSetTableSpace(reltoastrelid, newTableSpace, lockmode); - if (OidIsValid(reltoastidxid)) - ATExecSetTableSpace(reltoastidxid, newTableSpace, lockmode); -} - -static void atexecset_table_space_internal(Relation rel, RelFileNode& newrnode, Oid newTableSpace, Oid newrelfilenode) -{ - ForkNumber forkNum; - SMgrRelation dstrel; - - /* Open old and new relation */ - dstrel = smgropen(newrnode, rel->rd_backend); - - /* open rel storage avoid relcache invalided*/ - RelationOpenSmgr(rel); - - /* copy main fork */ - copy_relation_data(rel, &dstrel, MAIN_FORKNUM, rel->rd_rel->relpersistence); - - /* copy those extra forks that exist */ - for (forkNum = (ForkNumber)(MAIN_FORKNUM + 1); forkNum <= MAX_FORKNUM; forkNum = (ForkNumber)(forkNum + 1)) { - /* it's meaningless to copy BCM files, so ignore it */ - if ((BCM_FORKNUM != forkNum) && smgrexists(rel->rd_smgr, forkNum)) { - smgrcreate(dstrel, forkNum, false); - /* - * WAL log creation if the relation is persistent, or this is the - * init fork of an unlogged relation. - */ - if (RelationNeedsWAL(rel) || - (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED && forkNum == INIT_FORKNUM)) - log_smgrcreate(&newrnode, forkNum); - - copy_relation_data(rel, &dstrel, forkNum, rel->rd_rel->relpersistence); - } - } - - /* close new one */ - smgrclose(dstrel); -} - -static void atexecset_table_space(Relation rel, Oid newTableSpace, Oid newrelfilenode) -{ - /* - * Since we copy the file directly without looking at the shared buffers, - * we'd better first flush out any pages of the source relation that are - * in shared buffers. We assume no new changes will be made while we are - * holding exclusive lock on the rel. - */ - FlushRelationBuffers(rel); - - /* - * Create and copy all forks of the relation, and schedule unlinking of - * old physical files. - * - * NOTE: any conflict in relfilenode value will be caught in - * RelationCreateStorage(). - */ - RelFileNode newrnode; - newrnode = rel->rd_node; - newrnode.relNode = newrelfilenode; - newrnode.spcNode = newTableSpace; - RelationCreateStorage(newrnode, rel->rd_rel->relpersistence, rel->rd_rel->relowner, rel->rd_bucketoid, rel); - - if (RELATION_CREATE_BUCKET(rel)) { - oidvector* bucketlist = searchHashBucketByOid(rel->rd_bucketoid); - for (int i = 0; i < bucketlist->dim1; i++) { - Relation bucket = bucketGetRelation(rel, NULL, bucketlist->values[i]); - - newrnode.bucketNode = bucket->rd_node.bucketNode; - atexecset_table_space_internal(bucket, newrnode, newTableSpace, newrelfilenode); - - bucketCloseRelation(bucket); - } - } else { - // Set base relation tablespace with no hashbucket. - atexecset_table_space_internal(rel, newrnode, newTableSpace, newrelfilenode); - } - - // We must open smgr again, because once deal with invalid syscache msg, - // smgr maybe is closed and removed from smgr hash table, thus dst and - // rel->smgr are dangling pointer. If this memory area are reused, it is very - // dangerous if we still use dst and rel->smgr. - RelationOpenSmgr(rel); - - /* drop old relation */ - RelationDropStorage(rel); -} - -static bool NeedToSetTableSpace(Relation oldRelation, Oid targetTableSpace) -{ - /* - * No work if no change in tablespace. - */ - Oid oldTableSpace = oldRelation->rd_rel->reltablespace; - if (targetTableSpace == oldTableSpace || - (targetTableSpace == u_sess->proc_cxt.MyDatabaseTableSpace && oldTableSpace == InvalidOid)) { - return false; - } - - /* - * We cannot support moving mapped relations into different tablespaces. - * (In particular this eliminates all shared catalogs.) - */ - if (RelationIsMapped(oldRelation)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move system relation \"%s\"", RelationGetRelationName(oldRelation)))); - - /* Can't move a non-shared relation into pg_global */ - if (targetTableSpace == GLOBALTABLESPACE_OID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("only shared relations can be placed in pg_global tablespace"))); - - /* - * Don't allow moving temp tables of other backends ... their local buffer - * manager is not going to cope. - * If the top relation is a temp relation in this session, then we treat its - * all subordinate relations as temp. - */ - if (!u_sess->cmd_cxt.topRelatationIsInMyTempSession && RELATION_IS_OTHER_TEMP(oldRelation)) - ereport( - ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot move temporary tables of other sessions"))); - - return true; -} - -/* - * Execute ALTER TABLE SET TABLESPACE for cases where there is no tuple - * rewriting to be done, so we just want to copy the data as fast as possible. - */ -static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode) -{ - Relation rel; - Oid reltoastrelid; - Oid reltoastidxid; - Oid newrelfilenode; - Relation pg_class; - HeapTuple tuple; - Form_pg_class rd_rel; - bool isbucket; - - /* require that newTableSpace is valid */ - Assert(OidIsValid(newTableSpace)); - - /* - * Need lock here in case we are recursing to toast table or index - */ - rel = relation_open(tableOid, lockmode); - - if (RELATION_IS_GLOBAL_TEMP(rel)) { - const char* objType = RelationIsIndex(rel) ? "index" : "table"; - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("not support alter %s set tablespace on global temp table.", objType))); - } - - /* - * No work if no change in tablespace. - */ - if (!NeedToSetTableSpace(rel, newTableSpace)) { - relation_close(rel, NoLock); - return; - } - - reltoastrelid = rel->rd_rel->reltoastrelid; - reltoastidxid = rel->rd_rel->reltoastidxid; - - /* Get a modifiable copy of the relation's pg_class row */ - pg_class = heap_open(RelationRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(tableOid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", tableOid))); - } - rd_rel = (Form_pg_class)GETSTRUCT(tuple); - - /* - * Relfilenodes are not unique across tablespaces, so we need to allocate - * a new one in the new tablespace. - */ - if (rel->storage_type == HEAP_DISK) { - newrelfilenode = GetNewRelFileNode(newTableSpace, NULL, rel->rd_rel->relpersistence); - } else { - Assert(rel->storage_type == SEGMENT_PAGE); - bool newcbi = RelationIsCrossBucketIndex(rel); - isbucket = BUCKET_OID_IS_VALID(rel->rd_bucketoid) && !newcbi; - newrelfilenode = seg_alloc_segment(newTableSpace, rel->rd_node.dbNode, isbucket, InvalidBlockNumber); - /* make sure we create the right underlying storage for cross-bucket index */ - rel->newcbi = newcbi; - } - - atexecset_table_space(rel, newTableSpace, newrelfilenode); - - elog(LOG, "Row Relation: %s(%u) tblspc %u/%u/%u => %u/%u/%u", - RelationGetRelationName(rel), RelationGetRelid(rel), rel->rd_node.spcNode, rel->rd_node.dbNode, - rel->rd_node.relNode, newTableSpace, rel->rd_node.dbNode, newrelfilenode); - - /* update the pg_class row */ - rd_rel->reltablespace = ConvertToPgclassRelTablespaceOid(newTableSpace); - rd_rel->relfilenode = newrelfilenode; - simple_heap_update(pg_class, &tuple->t_self, tuple); - CatalogUpdateIndexes(pg_class, tuple); - - tableam_tops_free_tuple(tuple); - - heap_close(pg_class, RowExclusiveLock); - UpdatePgObjectChangecsn(tableOid, rel->rd_rel->relkind); - - relation_close(rel, NoLock); - - /* Make sure the reltablespace change is visible */ - CommandCounterIncrement(); - - /* Move associated toast relation and/or index, too */ - if (OidIsValid(reltoastrelid)) - ATExecSetTableSpace(reltoastrelid, newTableSpace, lockmode); - if (OidIsValid(reltoastidxid)) - ATExecSetTableSpace(reltoastidxid, newTableSpace, lockmode); -} - - /* - * If the rel isn't temp, we must fsync it down to disk before it's safe - * to commit the transaction. (For a temp rel we don't care since the rel - * will be uninteresting after a crash anyway.) - * - * It's obvious that we must do this when not WAL-logging the copy. It's - * less obvious that we have to do it even if we did WAL-log the copied - * pages. The reason is that since we're copying outside shared buffers, a - * CHECKPOINT occurring during the copy has no way to flush the previously - * written data to disk (indeed it won't know the new rel even exists). A - * crash later on would replay WAL from the checkpoint, therefore it - * wouldn't replay our earlier WAL entries. If we do not fsync those pages - * here, they might still not be on disk when the crash occurs. - */ -static void JudgeSmgrDsync(char relpersistence, bool copying_initfork, SMgrRelation dst, ForkNumber forkNum) -{ - if (relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork || - ((relpersistence == RELPERSISTENCE_TEMP) && STMT_RETRY_ENABLED)) { - smgrimmedsync(dst, forkNum); - } -} - -/* - * Copy data, block by block - */ -static void copy_relation_data(Relation rel, SMgrRelation* dstptr, ForkNumber forkNum, char relpersistence) -{ - char* buf = NULL; - char* unalign_buffer = NULL; - Page page; - bool use_wal = false; - bool copying_initfork = false; - char *bufToWrite = NULL; - BlockNumber nblocks; - BlockNumber blkno; - TdeInfo tde_info = {0}; - SMgrRelation src = rel->rd_smgr; - SMgrRelation dst = *dstptr; - RelFileNode newFileNode = dst->smgr_rnode.node; - BackendId backendId = dst->smgr_rnode.backend; - - /* - * palloc the buffer so that it's MAXALIGN'd. If it were just a local - * char[] array, the compiler might align it on any byte boundary, which - * can seriously hurt transfer speed to and from the kernel; not to - * mention possibly making log_newpage's accesses to the page header fail. - */ - ADIO_RUN() - { - buf = (char*)adio_align_alloc(BLCKSZ); - } - ADIO_ELSE() - { - if (ENABLE_DSS) { - unalign_buffer = (char*)palloc(BLCKSZ + ALIGNOF_BUFFER); - buf = (char*)BUFFERALIGN(unalign_buffer); - } else { - buf = (char*)palloc(BLCKSZ); - } - } - ADIO_END(); - page = (Page)buf; - - /* - * The init fork for an unlogged relation in many respects has to be - * treated the same as normal relation, changes need to be WAL logged and - * it needs to be synced to disk. - */ - copying_initfork = relpersistence == RELPERSISTENCE_UNLOGGED && forkNum == INIT_FORKNUM; - - /* - * We need to log the copied data in WAL iff WAL archiving/streaming is - * enabled AND it's a permanent relation. - */ - use_wal = XLogIsNeeded() && (relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork || - ((relpersistence == RELPERSISTENCE_TEMP) && STMT_RETRY_ENABLED)); - - nblocks = smgrnblocks(src, forkNum); - - /* - * check tablespace size limitation when extending new relation file. - * Segment-page storage will check limitation in smgrextend. But we have to get tablespace limit - * infomation here to avoid accessing system table in smgrextend. - */ - if (IsSegmentFileNode(newFileNode)) { - TableSpaceUsageManager::IsExceedMaxsize(newFileNode.spcNode, 0, true); - } else { - TableSpaceUsageManager::IsExceedMaxsize(newFileNode.spcNode, ((uint64)BLCKSZ) * nblocks, false); - } - - // We must open smgr again, because once deal with invalid syscache msg, - // smgr maybe is closed and removed from smgr hash table, thus dst and - // rel->smgr are dangling pointer. If this memory area are reused, it is very - // dangerous if we still use dst and rel->smgr. - // - RelationOpenSmgr(rel); - *dstptr = dst = smgropen(newFileNode, backendId); - src = rel->rd_smgr; - - /* maybe can add prefetch here */ - for (blkno = 0; blkno < nblocks; blkno++) { - /* If we got a cancel signal during the copy of the data, quit */ - CHECK_FOR_INTERRUPTS(); - - SMGR_READ_STATUS rdStatus = smgrread(src, forkNum, blkno, buf); - - if (rdStatus == SMGR_RD_CRC_ERROR) { - addBadBlockStat(&src->smgr_rnode.node, forkNum); - addGlobalRepairBadBlockStat(src->smgr_rnode, forkNum, blkno); - - if (RelationNeedsWAL(rel) && CanRemoteRead() && !IsSegmentFileNode(src->smgr_rnode.node)) { - ereport(WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid page in block %u of relation %s, try to remote read", - blkno, - relpath(src->smgr_rnode, forkNum)), - handle_in_client(true))); - - RemoteReadBlock(src->smgr_rnode, forkNum, blkno, buf, NULL); - - /* - * segment-page storage may fail here, because it use logic blocknumber while CRC - * use physical block number - */ - if (PageIsVerified(page, blkno)) { - smgrwrite(src, forkNum, blkno, buf, false); - UpdateRepairTime(src->smgr_rnode.node, forkNum, blkno); - } else { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - (errmsg("fail to remote read page, data corrupted in network")))); - } - } else { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid page in block %u of relation %s", - blkno, - relpathbackend(src->smgr_rnode.node, src->smgr_rnode.backend, forkNum)))); - } - } - - PageDataDecryptIfNeed(page); - - if (RelationisEncryptEnable(rel)) { - GetTdeInfoFromRel(rel, &tde_info); - } - - if (IsSegmentFileNode(dst->smgr_rnode.node)) { - /* - * Segment-page storage requiress extending data through buffer to set LSN correctly, - * and avoid half-page write. DataEncrypt and SetChecksum is done during buffer flush. - */ - Assert(use_wal); - - Buffer buf = ReadBufferWithoutRelcache(dst->smgr_rnode.node, forkNum, P_NEW, RBM_NORMAL, NULL, NULL); -#ifdef USE_ASSERT_CHECKING - BufferDesc *buf_desc = GetBufferDescriptor(buf - 1); - Assert(buf_desc->tag.blockNum == blkno); -#endif - LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); - XLogRecPtr xlog_ptr = log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false, &tde_info); - errno_t rc = memcpy_s(BufferGetBlock(buf), BLCKSZ, page, BLCKSZ); - securec_check(rc, "\0", "\0"); - PageSetLSN(BufferGetPage(buf), xlog_ptr); - MarkBufferDirty(buf); - - UnlockReleaseBuffer(buf); - } else { - /* - * WAL-log the copied page. Unfortunately we don't know what kind of a - * page this is, so we have to log the full page including any unused - * space. - */ - if (use_wal) { - log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false, &tde_info); - } - - if (RelationisEncryptEnable(rel)) { - bufToWrite = PageDataEncryptIfNeed(page, &tde_info, true); - } else { - bufToWrite = page; - } - - PageSetChecksumInplace((Page)bufToWrite, blkno); - - smgrextend(dst, forkNum, blkno, bufToWrite, true); - } - } - - ADIO_RUN() - { - adio_align_free(buf); - } - ADIO_ELSE() - { - if (ENABLE_DSS) { - pfree_ext(unalign_buffer); - } else { - pfree_ext(buf); - } - } - ADIO_END(); - - JudgeSmgrDsync(relpersistence, copying_initfork, dst, forkNum); -} - -static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char relpersistence, BlockNumber srcBlocks, - BlockNumber destBlocks, TupleDesc srcTupleDesc, Oid srcToastOid, Oid destToastOid, HTAB* chunkIdHashTable, - bool destHasFSM) -{ - char* buf = NULL; - char* unaligned_buffer = NULL; - char* bufToWrite = NULL; - Page page = NULL; - bool use_wal = false; - BlockNumber src_blkno = 0; - BlockNumber dest_blkno = 0; - HeapTupleData tuple; - TdeInfo tde_info = {0}; - errno_t rc = EOK; - - if (srcBlocks == 0) { - return; - } - - // check tablespace size limitation when extending new files. - STORAGE_SPACE_OPERATION(dest, ((uint64)BLCKSZ) * srcBlocks); - - /* - * palloc the buffer so that it's MAXALIGN'd. If it were just a local - * char[] array, the compiler might align it on any byte boundary, which - * can seriously hurt transfer speed to and from the kernel; not to - * mention possibly making log_newpage's accesses to the page header fail. - */ - ADIO_RUN() - { - buf = (char*)adio_align_alloc(BLCKSZ); - } - ADIO_ELSE() - { - if (ENABLE_DSS) { - unaligned_buffer = (char*)palloc(BLCKSZ + ALIGNOF_BUFFER); - buf = (char*)BUFFERALIGN(unaligned_buffer); - } else { - buf = (char*)palloc(BLCKSZ); - } - } - ADIO_END(); - page = (Page)buf; - - /* - * We need to log the copied data in WAL iff WAL archiving/streaming is - * enabled AND it's a permanent relation. - */ - use_wal = XLogIsNeeded() && (relpersistence == RELPERSISTENCE_PERMANENT || - ((relpersistence == RELPERSISTENCE_TEMP) && STMT_RETRY_ENABLED)); - - for (src_blkno = 0; src_blkno < srcBlocks; src_blkno++) { - OffsetNumber tupleNum = 0; - OffsetNumber tupleNo = 0; - - /* If we got a cancel signal during the copy of the data, quit */ - CHECK_FOR_INTERRUPTS(); - - RelationOpenSmgr(src); - SMGR_READ_STATUS rdStatus = smgrread(src->rd_smgr, forkNum, src_blkno, buf); - - if (rdStatus == SMGR_RD_CRC_ERROR) { - addBadBlockStat(&src->rd_node, forkNum); - addGlobalRepairBadBlockStat(src->rd_smgr->smgr_rnode, forkNum, src_blkno); - - if (RelationNeedsWAL(src) && CanRemoteRead() && !IsSegmentFileNode(src->rd_node)) { - ereport(WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid page in block %u of relation %s, try to remote read", - src_blkno, - relpath(src->rd_smgr->smgr_rnode, forkNum)), - handle_in_client(true))); - - RemoteReadBlock(src->rd_smgr->smgr_rnode, forkNum, src_blkno, buf, NULL); - - if (PageIsVerified(page, src_blkno)) { - smgrwrite(src->rd_smgr, forkNum, src_blkno, buf, false); - UpdateRepairTime(src->rd_node, forkNum, src_blkno); - } else { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - (errmsg("fail to remote read page, data corrupted in network")))); - } - } else { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid page in block %u of relation %s", - src_blkno, - relpathbackend(src->rd_smgr->smgr_rnode.node, src->rd_smgr->smgr_rnode.backend, forkNum)))); - } - } - - PageDataDecryptIfNeed(page); - - dest_blkno = destBlocks + src_blkno; - tupleNum = tableam_tops_page_get_max_offsetnumber(src, page); - - for (tupleNo = FirstOffsetNumber; tupleNo <= tupleNum; tupleNo++) { - - if (!tableam_tops_page_get_item(src, &tuple, page, tupleNo, destBlocks)) { - continue; - } - ChunkIdHashKey hashkey; - OldToNewChunkIdMapping mapping = NULL; - - /* If toast storage, modify va_toastrelid and va_valueid. */ - if (OidIsValid(destToastOid)) { - int numAttrs = srcTupleDesc->natts; - Datum values[numAttrs]; - bool isNull[numAttrs]; - int i = 0; - - /* Ustore not support compress yet */ - if (RelationIsUstoreFormat(src) || !HEAP_TUPLE_IS_COMPRESSED(tuple.t_data)) { - tableam_tops_deform_tuple(&tuple, srcTupleDesc, values, isNull); - } else { - Assert(page != NULL); - Assert(PageIsCompressed(page)); - tableam_tops_deform_cmprs_tuple(&tuple, srcTupleDesc, values, isNull, (char*)getPageDict(page)); - } - for (i = 0; i < numAttrs; i++) { - struct varlena* value = NULL; - - value = (struct varlena*)DatumGetPointer(values[i]); - if (srcTupleDesc->attrs[i].attlen == -1 && !isNull[i] && VARATT_IS_EXTERNAL(value)) { - struct varatt_external* toastPointer = NULL; - - toastPointer = (varatt_external*)(VARDATA_EXTERNAL((varattrib_1b_e*)(value))); - toastPointer->va_toastrelid = destToastOid; - - rc = memset_s(&hashkey, sizeof(hashkey), 0, sizeof(hashkey)); - securec_check(rc, "\0", "\0"); - hashkey.toastTableOid = srcToastOid; - hashkey.oldChunkId = toastPointer->va_valueid; - - mapping = (OldToNewChunkIdMapping)hash_search(chunkIdHashTable, &hashkey, HASH_FIND, NULL); - - if (PointerIsValid(mapping)) { - toastPointer->va_valueid = mapping->newChunkId; - } - } - } - } else if (RelationIsToast(dest)) { - /* for toast, more than 1GB CLOB/BLOB the first chunk chunk_data */ - Datum values[3]; - bool isNull[3]; - tableam_tops_deform_tuple(&tuple, src->rd_att, values, isNull); - struct varlena* value = (struct varlena*)DatumGetPointer(values[2]); - if (!isNull[2] && VARATT_IS_EXTERNAL_ONDISK_B(value)) { - struct varatt_external* toastPointer = NULL; - - toastPointer = (varatt_external*)(VARDATA_EXTERNAL((varattrib_1b_e*)(value))); - Assert(toastPointer->va_toastrelid == src->rd_id); - toastPointer->va_toastrelid = dest->rd_id; - - rc = memset_s(&hashkey, sizeof(hashkey), 0, sizeof(hashkey)); - securec_check(rc, "\0", "\0"); - hashkey.toastTableOid = src->rd_id; - hashkey.oldChunkId = toastPointer->va_valueid; - - mapping = (OldToNewChunkIdMapping)hash_search(chunkIdHashTable, &hashkey, HASH_FIND, NULL); - - if (PointerIsValid(mapping)) { - toastPointer->va_valueid = mapping->newChunkId; - } - } - } - } - - /* merge FSM */ - if (RelationisEncryptEnable(src)) { - GetTdeInfoFromRel(src, &tde_info); - } - if (destHasFSM) { - Size freespace = tableam_tops_page_get_freespace(src, page); - - RecordPageWithFreeSpace(dest, dest_blkno, freespace); - } - - if (IsSegmentFileNode(dest->rd_node)) { - Assert(use_wal); - Buffer buf = ReadBufferExtended(dest, forkNum, P_NEW, RBM_NORMAL, NULL); -#ifdef USE_ASSERT_CHECKING - BufferDesc *buf_desc = GetBufferDescriptor(buf - 1); - Assert(buf_desc->tag.blockNum == dest_blkno); -#endif - LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); - XLogRecPtr xlog_ptr = log_newpage(&dest->rd_node, forkNum, dest_blkno, page, true, &tde_info); - errno_t rc = memcpy_s(BufferGetBlock(buf), BLCKSZ, page, BLCKSZ); - securec_check(rc, "\0", "\0"); - PageSetLSN(BufferGetPage(buf), xlog_ptr); - MarkBufferDirty(buf); - UnlockReleaseBuffer(buf); - } else { - /* - * XLOG stuff - * Retry to open smgr in case it is cloesd when we process SI messages - */ - RelationOpenSmgr(dest); - if (use_wal) { - log_newpage(&dest->rd_smgr->smgr_rnode.node, forkNum, dest_blkno, page, true, &tde_info); - } - - if (RelationisEncryptEnable(src)) { - bufToWrite = PageDataEncryptIfNeed(page, &tde_info, true); - } else { - bufToWrite = page; - } - - /* heap block mix in the block number to checksum. need recalculate */ - PageSetChecksumInplace((Page)bufToWrite, dest_blkno); - - smgrextend(dest->rd_smgr, forkNum, dest_blkno, bufToWrite, true); - } - } - - ADIO_RUN() - { - adio_align_free(buf); - } - ADIO_ELSE() - { - if (ENABLE_DSS) { - pfree_ext(unaligned_buffer); - } else { - pfree_ext(buf); - } - } - ADIO_END(); - - /* - * If the rel isn't temp, we must fsync it down to disk before it's safe - * to commit the transaction. (For a temp rel we don't care since the rel - * will be uninteresting after a crash anyway.) - * - * It's obvious that we must do this when not WAL-logging the copy. It's - * less obvious that we have to do it even if we did WAL-log the copied - * pages. The reason is that since we're copying outside shared buffers, a - * CHECKPOINT occurring during the copy has no way to flush the previously - * written data to disk (indeed it won't know the new rel even exists). A - * crash later on would replay WAL from the checkpoint, therefore it - * wouldn't replay our earlier WAL entries. If we do not fsync those pages - * here, they might still not be on disk when the crash occurs. - */ - if (relpersistence == RELPERSISTENCE_PERMANENT || ((relpersistence == RELPERSISTENCE_TEMP) && STMT_RETRY_ENABLED)) - smgrimmedsync(dest->rd_smgr, forkNum); -} - -static void mergeVMBlock(Relation src, Relation dest, BlockNumber srcHeapBlocks, BlockNumber destHeapBlocks) -{ - Buffer srcVMBuffer = InvalidBuffer; - Buffer destVMBuffer = InvalidBuffer; - BlockNumber srcBlockNum = 0; - - for (srcBlockNum = 0; srcBlockNum < srcHeapBlocks; srcBlockNum++) { - bool VMValue = false; - - visibilitymap_pin(dest, destHeapBlocks + srcBlockNum, &destVMBuffer); - VMValue = visibilitymap_test(src, srcBlockNum, &srcVMBuffer); - - if (VMValue) { - visibilitymap_set(dest, - destHeapBlocks + srcBlockNum, - InvalidBuffer, - InvalidXLogRecPtr, - destVMBuffer, - InvalidTransactionId, - false); - } - - } - - if (BufferIsValid(srcVMBuffer)) { - ReleaseBuffer(srcVMBuffer); - } - - if (BufferIsValid(destVMBuffer)) { - ReleaseBuffer(destVMBuffer); - } -} - -/* - * ALTER TABLE ENABLE/DISABLE TRIGGER - * - * We just pass this off to trigger.c. - */ -static void ATExecEnableDisableTrigger( - Relation rel, const char* trigname, char fires_when, bool skip_system, LOCKMODE lockmode) -{ - EnableDisableTrigger(rel, trigname, fires_when, skip_system); -} - -/* - * ALTER TABLE ENABLE/DISABLE RULE - * - * We just pass this off to rewriteDefine.c. - */ -static void ATExecEnableDisableRule(Relation rel, const char* trigname, char fires_when, LOCKMODE lockmode) -{ - EnableDisableRule(rel, trigname, fires_when); -} - -/* - * ATExecEnableDisableRls - * - * ALTER TABLE table_name ENABLE/DISABLE ROW LEVEL SECURITY. - * ALTER TABLE table_name FORCE/NO FORCE ROW LEVEL SECURITY. - * - * @param (in) rel: the relation information - * @param (in) changeType: ENABLE, DISABLE, FORCE, NO FORCE R.L.S - * @param (in) lockmode: Lock mode for relation open - * @return: NULL - */ -void ATExecEnableDisableRls(Relation rel, RelationRlsStatus changeType, LOCKMODE lockmode) -{ - /* Check whether need to change on current node */ - if (SupportRlsOnCurrentNode() == false) { - return; - } - /* Check license whether support this feature */ - LicenseSupportRls(); - - /* Check whether support RLS for this relation */ - SupportRlsForRel(rel); - - List* roption = NULL; - /* Enable or Disable Rls */ - switch (changeType) { - case RELATION_RLS_ENABLE: - /* update the column data when RLS is disabled */ - roption = list_make1(makeDefElem("enable_rowsecurity", (Node*)makeString("true"))); - ATExecSetRelOptions(rel, roption, AT_SetRelOptions, lockmode); - break; - case RELATION_RLS_DISABLE: - /* update the column data when RLS is disabled */ - roption = list_make1(makeDefElem("enable_rowsecurity", NULL)); - ATExecSetRelOptions(rel, roption, AT_ResetRelOptions, lockmode); - break; - case RELATION_RLS_FORCE_ENABLE: - /* update the column data when RLS is not forced */ - roption = list_make1(makeDefElem("force_rowsecurity", (Node*)makeString("true"))); - ATExecSetRelOptions(rel, roption, AT_SetRelOptions, lockmode); - break; - case RELATION_RLS_FORCE_DISABLE: - /* only update the column data when RLS is forced */ - roption = list_make1(makeDefElem("force_rowsecurity", NULL)); - ATExecSetRelOptions(rel, roption, AT_ResetRelOptions, lockmode); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("unknown type %u for ALTER TABLE ROW LEVEL SECURITY", changeType))); - } -} - -/* - * ATExecEncryptionKeyRotation - * - * ALTER TABLE table_name ENCRYPTION KEY ROTATION. - * - * @param (in) rel: the relation information - * @param (in) lockmode: Lock mode for relation open - * @return: NULL - */ -static void ATExecEncryptionKeyRotation(Relation rel, LOCKMODE lockmode) -{ - List* tde_reloption = NULL; - char* dek_cipher = NULL; - const TDEData* tde_data = NULL; - errno_t rc = EOK; - - /* get new tde info */ - TDEKeyManager *tde_key_manager= New(CurrentMemoryContext) TDEKeyManager(); - tde_key_manager->init(); - tde_data = tde_key_manager->create_dek(); - - dek_cipher = (char*)palloc0(strlen(tde_data->dek_cipher) + 1); - rc = memcpy_s(dek_cipher, (strlen(tde_data->dek_cipher) + 1), tde_data->dek_cipher, - (strlen(tde_data->dek_cipher) + 1)); - securec_check(rc, "\0", "\0"); - - /* encryption key rotation */ - tde_reloption = list_make1(makeDefElem("dek_cipher", (Node*)makeString(dek_cipher))); - ATExecSetRelOptions(rel, tde_reloption, AT_SetRelOptions, lockmode, true); - ereport(LOG, (errmsg("TDE key rotation success"), errdetail("check TDE table info dek_cipher is changed"))); - /* sync TDE storage hash table */ - if (IS_PGXC_DATANODE) { - tde_key_manager->save_key(tde_data); - } - DELETE_EX2(tde_key_manager); - pfree_ext(dek_cipher); -} - -/* - * ALTER TABLE INHERIT - * - * Add a parent to the child's parents. This verifies that all the columns and - * check constraints of the parent appear in the child and that they have the - * same data types and expressions. - */ -static void ATPrepAddInherit(Relation child_rel) -{ - if (child_rel->rd_rel->reloftype) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot change inheritance of typed table"))); -} - /* - * Return the address of the new parent relation. - */ -static ObjectAddress ATExecAddInherit(Relation child_rel, RangeVar* parent, LOCKMODE lockmode) -{ - Relation parent_rel, catalogRelation; - SysScanDesc scan; - ScanKeyData key; - HeapTuple inheritsTuple; - int32 inhseqno; - List* children = NIL; - ObjectAddress address; - - if (RELATION_IS_PARTITIONED(child_rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("relation \"%s\" is a partitioned table", RelationGetRelationName(child_rel)), - errdetail("can not add inheritance for a partitioned table"))); - } - - /* - * A self-exclusive lock is needed here. See the similar case in - * MergeAttributes() for a full explanation. - */ - parent_rel = heap_openrv(parent, ShareUpdateExclusiveLock); - - if (RELATION_IS_PARTITIONED(parent_rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("inherited relation \"%s\" is a partitioned table", parent->relname), - errdetail("can not inherit from partitioned table"))); - } - - /* - * Must be owner of both parent and child -- child was checked by - * ATSimplePermissions call in ATPrepCmd - */ - ATSimplePermissions(parent_rel, ATT_TABLE); - - /* Permanent rels cannot inherit from temporary ones */ - if (parent_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP && - child_rel->rd_rel->relpersistence != RELPERSISTENCE_TEMP) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot inherit from temporary relation \"%s\"", RelationGetRelationName(parent_rel)))); - - /* If parent rel is temp, it must belong to this session */ - if (parent_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP && !RelationIsLocalTemp(parent_rel)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot inherit from temporary relation of another session"))); - - /* Ditto for the child */ - if (child_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP && !RelationIsLocalTemp(child_rel)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot inherit to temporary relation of another session"))); - - /* - * Check for duplicates in the list of parents, and determine the highest - * inhseqno already present; we'll use the next one for the new parent. - * (Note: get RowExclusiveLock because we will write pg_inherits below.) - * - * Note: we do not reject the case where the child already inherits from - * the parent indirectly; CREATE TABLE doesn't reject comparable cases. - */ - catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock); - ScanKeyInit( - &key, Anum_pg_inherits_inhrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(child_rel))); - scan = systable_beginscan(catalogRelation, InheritsRelidSeqnoIndexId, true, NULL, 1, &key); - - /* inhseqno sequences start at 1 */ - inhseqno = 0; - while (HeapTupleIsValid(inheritsTuple = systable_getnext(scan))) { - Form_pg_inherits inh = (Form_pg_inherits)GETSTRUCT(inheritsTuple); - - if (inh->inhparent == RelationGetRelid(parent_rel)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_TABLE), - errmsg("relation \"%s\" would be inherited from more than once", - RelationGetRelationName(parent_rel)))); - if (inh->inhseqno > inhseqno) - inhseqno = inh->inhseqno; - } - systable_endscan(scan); - - /* - * Prevent circularity by seeing if proposed parent inherits from child. - * (In particular, this disallows making a rel inherit from itself.) - * - * This is not completely bulletproof because of race conditions: in - * multi-level inheritance trees, someone else could concurrently be - * making another inheritance link that closes the loop but does not join - * either of the rels we have locked. Preventing that seems to require - * exclusive locks on the entire inheritance tree, which is a cure worse - * than the disease. find_all_inheritors() will cope with circularity - * anyway, so don't sweat it too much. - * - * We use weakest lock we can on child's children, namely AccessShareLock. - */ - children = find_all_inheritors(RelationGetRelid(child_rel), AccessShareLock, NULL); - - if (list_member_oid(children, RelationGetRelid(parent_rel))) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_TABLE), - errmsg("circular inheritance not allowed"), - errdetail( - "\"%s\" is already a child of \"%s\".", parent->relname, RelationGetRelationName(child_rel)))); - - /* If parent has OIDs then child must have OIDs */ - if (parent_rel->rd_rel->relhasoids && !child_rel->rd_rel->relhasoids) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs", - RelationGetRelationName(child_rel), - RelationGetRelationName(parent_rel)))); - - /* Match up the columns and bump attinhcount as needed */ - MergeAttributesIntoExisting(child_rel, parent_rel); - - /* Match up the constraints and bump coninhcount as needed */ - MergeConstraintsIntoExisting(child_rel, parent_rel); - - /* - * OK, it looks valid. Make the catalog entries that show inheritance. - */ - StoreCatalogInheritance1(RelationGetRelid(child_rel), RelationGetRelid(parent_rel), inhseqno + 1, catalogRelation); - - ObjectAddressSet(address, RelationRelationId, - RelationGetRelid(parent_rel)); - - /* Now we're done with pg_inherits */ - heap_close(catalogRelation, RowExclusiveLock); - - /* keep our lock on the parent relation until commit */ - heap_close(parent_rel, NoLock); - - return address; -} - -/* - * Obtain the source-text form of the constraint expression for a check - * constraint, given its pg_constraint tuple - */ -static char* decompile_conbin(HeapTuple contup, TupleDesc tupdesc) -{ - Form_pg_constraint con; - bool isnull = false; - Datum attr; - Datum expr; - - con = (Form_pg_constraint)GETSTRUCT(contup); - attr = heap_getattr(contup, Anum_pg_constraint_conbin, tupdesc, &isnull); - if (isnull) - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("null conbin for constraint %u", HeapTupleGetOid(contup)))); - - expr = DirectFunctionCall2(pg_get_expr, attr, ObjectIdGetDatum(con->conrelid)); - return TextDatumGetCString(expr); -} - -/* - * Determine whether two check constraints are functionally equivalent - * - * The test we apply is to see whether they reverse-compile to the same - * source string. This insulates us from issues like whether attributes - * have the same physical column numbers in parent and child relations. - */ -static bool constraints_equivalent(HeapTuple a, HeapTuple b, TupleDesc tupleDesc) -{ - Form_pg_constraint acon = (Form_pg_constraint)GETSTRUCT(a); - Form_pg_constraint bcon = (Form_pg_constraint)GETSTRUCT(b); - - if (acon->condeferrable != bcon->condeferrable || acon->condeferred != bcon->condeferred || - strcmp(decompile_conbin(a, tupleDesc), decompile_conbin(b, tupleDesc)) != 0) - return false; - else - return true; -} - -/* - * Check columns in child table match up with columns in parent, and increment - * their attinhcount. - * - * Called by ATExecAddInherit - * - * Currently all parent columns must be found in child. Missing columns are an - * error. One day we might consider creating new columns like CREATE TABLE - * does. However, that is widely unpopular --- in the common use case of - * partitioned tables it's a foot-gun. - * - * The data type must match exactly. If the parent column is NOT NULL then - * the child must be as well. Defaults are not compared, however. - */ -static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel) -{ - Relation attrrel; - AttrNumber parent_attno; - int parent_natts; - TupleDesc tupleDesc; - HeapTuple tuple; - - attrrel = heap_open(AttributeRelationId, RowExclusiveLock); - - tupleDesc = RelationGetDescr(parent_rel); - parent_natts = tupleDesc->natts; - - for (parent_attno = 1; parent_attno <= parent_natts; parent_attno++) { - Form_pg_attribute attribute = &tupleDesc->attrs[parent_attno - 1]; - char* attributeName = NameStr(attribute->attname); - - /* Ignore dropped columns in the parent. */ - if (attribute->attisdropped) - continue; - - /* Find same column in child (matching on column name). */ - tuple = SearchSysCacheCopyAttName(RelationGetRelid(child_rel), attributeName); - if (HeapTupleIsValid(tuple)) { - /* Check they are same type, typmod, and collation */ - Form_pg_attribute childatt = (Form_pg_attribute)GETSTRUCT(tuple); - - if (attribute->atttypid != childatt->atttypid || attribute->atttypmod != childatt->atttypmod) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("child table \"%s\" has different type for column \"%s\"", - RelationGetRelationName(child_rel), - attributeName))); - - if (attribute->attcollation != childatt->attcollation) - ereport(ERROR, - (errcode(ERRCODE_COLLATION_MISMATCH), - errmsg("child table \"%s\" has different collation for column \"%s\"", - RelationGetRelationName(child_rel), - attributeName))); - - /* - * Check child doesn't discard NOT NULL property. (Other - * constraints are checked elsewhere.) - */ - if (attribute->attnotnull && !childatt->attnotnull) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" in child table must be marked NOT NULL", attributeName))); - - /* - * OK, bump the child column's inheritance count. (If we fail - * later on, this change will just roll back.) - */ - childatt->attinhcount++; - simple_heap_update(attrrel, &tuple->t_self, tuple); - CatalogUpdateIndexes(attrrel, tuple); - tableam_tops_free_tuple(tuple); - } else { - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("child table is missing column \"%s\"", attributeName))); - } - } - - heap_close(attrrel, RowExclusiveLock); -} - -/* - * Check constraints in child table match up with constraints in parent, - * and increment their coninhcount. - * - * Constraints that are marked ONLY in the parent are ignored. - * - * Called by ATExecAddInherit - * - * Currently all constraints in parent must be present in the child. One day we - * may consider adding new constraints like CREATE TABLE does. - * - * XXX This is O(N^2) which may be an issue with tables with hundreds of - * constraints. As long as tables have more like 10 constraints it shouldn't be - * a problem though. Even 100 constraints ought not be the end of the world. - * - * XXX See MergeWithExistingConstraint too if you change this code. - */ -static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel) -{ - Relation catalog_relation; - TupleDesc tuple_desc; - SysScanDesc parent_scan; - ScanKeyData parent_key; - HeapTuple parent_tuple; - - catalog_relation = heap_open(ConstraintRelationId, RowExclusiveLock); - tuple_desc = RelationGetDescr(catalog_relation); - - /* Outer loop scans through the parent's constraint definitions */ - ScanKeyInit(&parent_key, - Anum_pg_constraint_conrelid, - BTEqualStrategyNumber, - F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(parent_rel))); - parent_scan = systable_beginscan(catalog_relation, ConstraintRelidIndexId, true, NULL, 1, &parent_key); - - while (HeapTupleIsValid(parent_tuple = systable_getnext(parent_scan))) { - Form_pg_constraint parent_con = (Form_pg_constraint)GETSTRUCT(parent_tuple); - SysScanDesc child_scan; - ScanKeyData child_key; - HeapTuple child_tuple; - bool found = false; - - if (parent_con->contype != CONSTRAINT_CHECK) - continue; - - /* if the parent's constraint is marked NO INHERIT, it's not inherited */ - if (parent_con->connoinherit) - continue; - - /* Search for a child constraint matching this one */ - ScanKeyInit(&child_key, - Anum_pg_constraint_conrelid, - BTEqualStrategyNumber, - F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(child_rel))); - child_scan = systable_beginscan(catalog_relation, ConstraintRelidIndexId, true, NULL, 1, &child_key); - - while (HeapTupleIsValid(child_tuple = systable_getnext(child_scan))) { - Form_pg_constraint child_con = (Form_pg_constraint)GETSTRUCT(child_tuple); - HeapTuple child_copy; - - if (child_con->contype != CONSTRAINT_CHECK) - continue; - - if (strcmp(NameStr(parent_con->conname), NameStr(child_con->conname)) != 0) - continue; - - if (!constraints_equivalent(parent_tuple, child_tuple, tuple_desc)) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("child table \"%s\" has different definition for check constraint \"%s\"", - RelationGetRelationName(child_rel), - NameStr(parent_con->conname)))); - - /* If the constraint is "no inherit" then cannot merge */ - if (child_con->connoinherit) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("constraint \"%s\" conflicts with non-inherited constraint on child table \"%s\"", - NameStr(child_con->conname), - RelationGetRelationName(child_rel)))); - - /* - * OK, bump the child constraint's inheritance count. (If we fail - * later on, this change will just roll back.) - */ - child_copy = (HeapTuple)tableam_tops_copy_tuple(child_tuple); - child_con = (Form_pg_constraint)GETSTRUCT(child_copy); - child_con->coninhcount++; - simple_heap_update(catalog_relation, &child_copy->t_self, child_copy); - CatalogUpdateIndexes(catalog_relation, child_copy); - tableam_tops_free_tuple(child_copy); - - found = true; - break; - } - - systable_endscan(child_scan); - - if (!found) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("child table is missing constraint \"%s\"", NameStr(parent_con->conname)))); - } - - systable_endscan(parent_scan); - heap_close(catalog_relation, RowExclusiveLock); -} - -/* - * ALTER TABLE NO INHERIT - * - * Drop a parent from the child's parents. This just adjusts the attinhcount - * and attislocal of the columns and removes the pg_inherit and pg_depend - * entries. - * - * If attinhcount goes to 0 then attislocal gets set to true. If it goes back - * up attislocal stays true, which means if a child is ever removed from a - * parent then its columns will never be automatically dropped which may - * surprise. But at least we'll never surprise by dropping columns someone - * isn't expecting to be dropped which would actually mean data loss. - * - * coninhcount and conislocal for inherited constraints are adjusted in - * exactly the same way. - */ -static ObjectAddress ATExecDropInherit(Relation rel, RangeVar* parent, LOCKMODE lockmode) -{ - Relation parent_rel; - Oid parent_oid; - Relation catalogRelation; - SysScanDesc scan; - ScanKeyData key[3]; - HeapTuple inheritsTuple, attributeTuple, constraintTuple; - List* connames = NIL; - bool found = false; - ObjectAddress address; - - /* - * AccessShareLock on the parent is probably enough, seeing that DROP - * TABLE doesn't lock parent tables at all. We need some lock since we'll - * be inspecting the parent's schema. - */ - parent_rel = heap_openrv(parent, AccessShareLock); - - /* - * We don't bother to check ownership of the parent table --- ownership of - * the child is presumed enough rights. - */ - /* - * Find and destroy the pg_inherits entry linking the two, or error out if - * there is none. - */ - catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock); - ScanKeyInit( - &key[0], Anum_pg_inherits_inhrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(catalogRelation, InheritsRelidSeqnoIndexId, true, NULL, 1, key); - - while (HeapTupleIsValid(inheritsTuple = systable_getnext(scan))) { - Oid inhparent; - - inhparent = ((Form_pg_inherits)GETSTRUCT(inheritsTuple))->inhparent; - if (inhparent == RelationGetRelid(parent_rel)) { - simple_heap_delete(catalogRelation, &inheritsTuple->t_self); - found = true; - break; - } - } - parent_oid = RelationGetRelid(parent_rel); - systable_endscan(scan); - heap_close(catalogRelation, RowExclusiveLock); - - if (!found) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("relation \"%s\" is not a parent of relation \"%s\"", - RelationGetRelationName(parent_rel), - RelationGetRelationName(rel)))); - - /* - * Search through child columns looking for ones matching parent rel - */ - catalogRelation = heap_open(AttributeRelationId, RowExclusiveLock); - ScanKeyInit( - &key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(catalogRelation, AttributeRelidNumIndexId, true, NULL, 1, key); - while (HeapTupleIsValid(attributeTuple = systable_getnext(scan))) { - Form_pg_attribute att = (Form_pg_attribute)GETSTRUCT(attributeTuple); - - /* Ignore if dropped or not inherited */ - if (att->attisdropped) - continue; - if (att->attinhcount <= 0) - continue; - - if (SearchSysCacheExistsAttName(RelationGetRelid(parent_rel), NameStr(att->attname))) { - /* Decrement inhcount and possibly set islocal to true */ - HeapTuple copyTuple = (HeapTuple)tableam_tops_copy_tuple(attributeTuple); - Form_pg_attribute copy_att = (Form_pg_attribute)GETSTRUCT(copyTuple); - - copy_att->attinhcount--; - if (copy_att->attinhcount == 0) - copy_att->attislocal = true; - - simple_heap_update(catalogRelation, ©Tuple->t_self, copyTuple); - CatalogUpdateIndexes(catalogRelation, copyTuple); - tableam_tops_free_tuple(copyTuple); - } - } - systable_endscan(scan); - heap_close(catalogRelation, RowExclusiveLock); - - /* - * Likewise, find inherited check constraints and disinherit them. To do - * this, we first need a list of the names of the parent's check - * constraints. (We cheat a bit by only checking for name matches, - * assuming that the expressions will match.) - */ - catalogRelation = heap_open(ConstraintRelationId, RowExclusiveLock); - ScanKeyInit(&key[0], - Anum_pg_constraint_conrelid, - BTEqualStrategyNumber, - F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(parent_rel))); - scan = systable_beginscan(catalogRelation, ConstraintRelidIndexId, true, NULL, 1, key); - - connames = NIL; - - while (HeapTupleIsValid(constraintTuple = systable_getnext(scan))) { - Form_pg_constraint con = (Form_pg_constraint)GETSTRUCT(constraintTuple); - - if (con->contype == CONSTRAINT_CHECK) - connames = lappend(connames, pstrdup(NameStr(con->conname))); - } - - systable_endscan(scan); - - /* Now scan the child's constraints */ - ScanKeyInit( - &key[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(catalogRelation, ConstraintRelidIndexId, true, NULL, 1, key); - - while (HeapTupleIsValid(constraintTuple = systable_getnext(scan))) { - Form_pg_constraint con = (Form_pg_constraint)GETSTRUCT(constraintTuple); - - if (con->contype != CONSTRAINT_CHECK) - continue; - - bool match = false; - ListCell* lc = NULL; - - foreach (lc, connames) { - if (strcmp(NameStr(con->conname), (char*)lfirst(lc)) == 0) { - match = true; - break; - } - } - - if (match) { - /* Decrement inhcount and possibly set islocal to true */ - HeapTuple copyTuple = (HeapTuple) tableam_tops_copy_tuple(constraintTuple); - Form_pg_constraint copy_con = (Form_pg_constraint)GETSTRUCT(copyTuple); - - if (copy_con->coninhcount <= 0) /* shouldn't happen */ - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("relation %u has non-inherited constraint \"%s\"", - RelationGetRelid(rel), - NameStr(copy_con->conname)))); - - copy_con->coninhcount--; - if (copy_con->coninhcount == 0) - copy_con->conislocal = true; - - simple_heap_update(catalogRelation, ©Tuple->t_self, copyTuple); - CatalogUpdateIndexes(catalogRelation, copyTuple); - tableam_tops_free_tuple(copyTuple); - } - } - list_free_deep(connames); - systable_endscan(scan); - heap_close(catalogRelation, RowExclusiveLock); - - drop_parent_dependency(RelationGetRelid(rel), RelationRelationId, RelationGetRelid(parent_rel)); - - /* keep our lock on the parent relation until commit */ - heap_close(parent_rel, NoLock); - - ObjectAddressSet(address, RelationRelationId, parent_oid); - - return address; -} - -/* - * Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE - * INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or - * heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will - * be TypeRelationId). There's no convenient way to do this, so go trawling - * through pg_depend. - */ -static void drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid) -{ - Relation catalogRelation; - SysScanDesc scan; - ScanKeyData key[3]; - HeapTuple depTuple; - - catalogRelation = heap_open(DependRelationId, RowExclusiveLock); - - ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(0)); - - scan = systable_beginscan(catalogRelation, DependDependerIndexId, true, NULL, 3, key); - - while (HeapTupleIsValid(depTuple = systable_getnext(scan))) { - Form_pg_depend dep = (Form_pg_depend)GETSTRUCT(depTuple); - - if (dep->refclassid == refclassid && dep->refobjid == refobjid && dep->refobjsubid == 0 && - dep->deptype == DEPENDENCY_NORMAL) - simple_heap_delete(catalogRelation, &depTuple->t_self); - } - - systable_endscan(scan); - heap_close(catalogRelation, RowExclusiveLock); -} - -/* - * ALTER TABLE OF - * - * Attach a table to a composite type, as though it had been created with CREATE - * TABLE OF. All attname, atttypid, atttypmod and attcollation must match. The - * subject table must not have inheritance parents. These restrictions ensure - * that you cannot create a configuration impossible with CREATE TABLE OF alone. - */ -static ObjectAddress ATExecAddOf(Relation rel, const TypeName* ofTypename, LOCKMODE lockmode) -{ - Oid relid = RelationGetRelid(rel); - Type typetuple; - Oid typid; - Relation inheritsRelation, relationRelation; - SysScanDesc scan; - ScanKeyData key; - AttrNumber table_attno, type_attno; - TupleDesc typeTupleDesc, tableTupleDesc; - ObjectAddress tableobj, typeobj; - HeapTuple classtuple; - - if (RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relation \"%s\" is a partitioned table", RelationGetRelationName(rel)), - errdetail("can not add of_type for partitioned table"))); - } - - /* Validate the type. */ - typetuple = typenameType(NULL, ofTypename, NULL); - check_of_type(typetuple); - typid = HeapTupleGetOid(typetuple); - - /* Fail if the table has any inheritance parents. */ - inheritsRelation = heap_open(InheritsRelationId, AccessShareLock); - ScanKeyInit(&key, Anum_pg_inherits_inhrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - scan = systable_beginscan(inheritsRelation, InheritsRelidSeqnoIndexId, true, NULL, 1, &key); - if (HeapTupleIsValid(systable_getnext(scan))) - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("typed tables cannot inherit"))); - systable_endscan(scan); - heap_close(inheritsRelation, AccessShareLock); - - /* - * Check the tuple descriptors for compatibility. Unlike inheritance, we - * require that the order also match. However, attnotnull need not match. - * Also unlike inheritance, we do not require matching relhasoids. - */ - typeTupleDesc = lookup_rowtype_tupdesc(typid, -1); - tableTupleDesc = RelationGetDescr(rel); - table_attno = 1; - for (type_attno = 1; type_attno <= typeTupleDesc->natts; type_attno++) { - Form_pg_attribute type_attr, table_attr; - const char* type_attname = NULL; - const char* table_attname = NULL; - - /* Get the next non-dropped type attribute. */ - type_attr = &typeTupleDesc->attrs[type_attno - 1]; - if (type_attr->attisdropped) - continue; - type_attname = NameStr(type_attr->attname); - - /* Get the next non-dropped table attribute. */ - do { - if (table_attno > tableTupleDesc->natts) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table is missing column \"%s\"", type_attname))); - table_attr = &tableTupleDesc->attrs[table_attno++ - 1]; - } while (table_attr->attisdropped); - table_attname = NameStr(table_attr->attname); - - /* Compare name. */ - if (strncmp(table_attname, type_attname, NAMEDATALEN) != 0) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table has column \"%s\" where type requires \"%s\"", table_attname, type_attname))); - - /* Compare type. */ - if (table_attr->atttypid != type_attr->atttypid || table_attr->atttypmod != type_attr->atttypmod || - table_attr->attcollation != type_attr->attcollation) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table \"%s\" has different type for column \"%s\"", - RelationGetRelationName(rel), - type_attname))); - } - DecrTupleDescRefCount(typeTupleDesc); - - /* Any remaining columns at the end of the table had better be dropped. */ - for (; table_attno <= tableTupleDesc->natts; table_attno++) { - Form_pg_attribute table_attr = &tableTupleDesc->attrs[table_attno - 1]; - - if (!table_attr->attisdropped) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table has extra column \"%s\"", NameStr(table_attr->attname)))); - } - - /* If the table was already typed, drop the existing dependency. */ - if (rel->rd_rel->reloftype) - drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype); - - /* Record a dependency on the new type. */ - tableobj.classId = RelationRelationId; - tableobj.objectId = relid; - tableobj.objectSubId = 0; - typeobj.classId = TypeRelationId; - typeobj.objectId = typid; - typeobj.objectSubId = 0; - recordDependencyOn(&tableobj, &typeobj, DEPENDENCY_NORMAL); - - /* Update pg_class.reloftype */ - relationRelation = heap_open(RelationRelationId, RowExclusiveLock); - classtuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(classtuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid))); - } - ((Form_pg_class)GETSTRUCT(classtuple))->reloftype = typid; - simple_heap_update(relationRelation, &classtuple->t_self, classtuple); - CatalogUpdateIndexes(relationRelation, classtuple); - tableam_tops_free_tuple(classtuple); - heap_close(relationRelation, RowExclusiveLock); - - ReleaseSysCache(typetuple); - - return typeobj; -} - -/* - * ALTER TABLE NOT OF - * - * Detach a typed table from its originating type. Just clear reloftype and - * remove the dependency. - */ -static void ATExecDropOf(Relation rel, LOCKMODE lockmode) -{ - Oid relid = RelationGetRelid(rel); - Relation relationRelation; - HeapTuple tuple; - - if (!OidIsValid(rel->rd_rel->reloftype)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a typed table", RelationGetRelationName(rel)))); - - if (RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relation \"%s\" is a partitioned table", RelationGetRelationName(rel)), - errdetail("drop of_type for partitioned table, this is a could not happening event"))); - } - - /* - * We don't bother to check ownership of the type --- ownership of the - * table is presumed enough rights. No lock required on the type, either. - */ - drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype); - - /* Clear pg_class.reloftype */ - relationRelation = heap_open(RelationRelationId, RowExclusiveLock); - tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid))); - } - - ((Form_pg_class)GETSTRUCT(tuple))->reloftype = InvalidOid; - simple_heap_update(relationRelation, &tuple->t_self, tuple); - CatalogUpdateIndexes(relationRelation, tuple); - tableam_tops_free_tuple(tuple); - heap_close(relationRelation, RowExclusiveLock); -} -/* - * relation_mark_replica_identity: Update a table's replica identity - * - * Iff ri_type = REPLICA_IDENTITY_INDEX, indexOid must be the Oid of a suitable - * index. Otherwise, it should be InvalidOid. - */ -static void relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, bool is_internal) -{ - Relation pg_index; - Relation pg_class; - HeapTuple pg_class_tuple; - HeapTuple pg_index_tuple; - Form_pg_index pg_index_form; - ListCell* index = NULL; - bool isNull = false; - Datum replident; - Datum indisreplident; - char relreplident = '\0'; - bool isreplident = false; - - /* - * Check whether relreplident has changed, and update it if so. - */ - pg_class = heap_open(RelationRelationId, RowExclusiveLock); - pg_class_tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(RelationGetRelid(rel))); - if (!HeapTupleIsValid(pg_class_tuple)) - ereport(ERROR, - ((errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for relation \"%s\"", RelationGetRelationName(rel))))); - replident = heap_getattr(pg_class_tuple, Anum_pg_class_relreplident, RelationGetDescr(pg_class), &isNull); - if (!isNull) - relreplident = CharGetDatum(replident); - - if (relreplident == '\0' || relreplident != ri_type) { - HeapTuple newctup = NULL; - Datum values[Natts_pg_class]; - bool nulls[Natts_pg_class]; - bool replaces[Natts_pg_class]; - errno_t rc; - rc = memset_s(values, sizeof(values), 0, sizeof(values)); - securec_check(rc, "\0", "\0"); - rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); - securec_check(rc, "\0", "\0"); - rc = memset_s(replaces, sizeof(replaces), false, sizeof(replaces)); - securec_check(rc, "\0", "\0"); - - replaces[Anum_pg_class_relreplident - 1] = true; - values[Anum_pg_class_relreplident - 1] = ri_type; - - newctup = (HeapTuple) tableam_tops_modify_tuple(pg_class_tuple, RelationGetDescr(pg_class), values, nulls, replaces); - - simple_heap_update(pg_class, &pg_class_tuple->t_self, newctup); - CatalogUpdateIndexes(pg_class, newctup); - tableam_tops_free_tuple(newctup); - } - heap_close(pg_class, RowExclusiveLock); - tableam_tops_free_tuple(pg_class_tuple); - - /* - * Check whether the correct index is marked indisreplident; if so, we're - * done. - */ - if (OidIsValid(indexOid)) { - Assert(ri_type == REPLICA_IDENTITY_INDEX); - - Relation reltmp = heap_open(IndexRelationId, AccessShareLock); - pg_index_tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexOid)); - if (!HeapTupleIsValid(pg_index_tuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", indexOid))); - } - pg_index_form = (Form_pg_index)GETSTRUCT(pg_index_tuple); - isNull = false; - isreplident = false; - indisreplident = heap_getattr(pg_index_tuple, Anum_pg_index_indisreplident, RelationGetDescr(reltmp), &isNull); - heap_close(reltmp, AccessShareLock); - - if (!isNull) - isreplident = BoolGetDatum(indisreplident); - - if (isreplident) { - ReleaseSysCache(pg_index_tuple); - return; - } - ReleaseSysCache(pg_index_tuple); - } - - /* - * Clear the indisreplident flag from any index that had it previously, and - * set it for any index that should have it now. - */ - pg_index = heap_open(IndexRelationId, RowExclusiveLock); - foreach (index, RelationGetIndexList(rel)) { - Oid thisIndexOid = lfirst_oid(index); - bool dirty = false; - - pg_index_tuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(thisIndexOid)); - if (!HeapTupleIsValid(pg_index_tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", thisIndexOid))); - } - pg_index_form = (Form_pg_index)GETSTRUCT(pg_index_tuple); - isNull = false; - isreplident = false; - indisreplident = - heap_getattr(pg_index_tuple, Anum_pg_index_indisreplident, RelationGetDescr(pg_index), &isNull); - if (!isNull) - isreplident = BoolGetDatum(indisreplident); - /* - * Unset the bit if set. We know it's wrong because we checked this - * earlier. - */ - if (isreplident) { - dirty = true; - isreplident = false; - } else if (thisIndexOid == indexOid) { - dirty = true; - isreplident = true; - } - - if (dirty) { - HeapTuple newitup = NULL; - Datum values[Natts_pg_class]; - bool nulls[Natts_pg_class]; - bool replaces[Natts_pg_class]; - errno_t rc; - rc = memset_s(values, sizeof(values), 0, sizeof(values)); - securec_check(rc, "\0", "\0"); - rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); - securec_check(rc, "\0", "\0"); - rc = memset_s(replaces, sizeof(replaces), false, sizeof(replaces)); - securec_check(rc, "\0", "\0"); - - replaces[Anum_pg_index_indisreplident - 1] = true; - values[Anum_pg_index_indisreplident - 1] = isreplident; - newitup = (HeapTuple) tableam_tops_modify_tuple(pg_index_tuple, RelationGetDescr(pg_index), values, nulls, replaces); - simple_heap_update(pg_index, &pg_index_tuple->t_self, newitup); - CatalogUpdateIndexes(pg_index, newitup); - tableam_tops_free_tuple(newitup); - } - tableam_tops_free_tuple(pg_index_tuple); - } - - heap_close(pg_index, RowExclusiveLock); -} - -/* - * ALTER TABLE REPLICA IDENTITY ... - */ -static void ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt* stmt, LOCKMODE lockmode) -{ - Oid indexOid; - Relation indexRel; - int key; - - if (stmt->identity_type == REPLICA_IDENTITY_DEFAULT) { - relation_mark_replica_identity(rel, stmt->identity_type, InvalidOid, true); - return; - } else if (stmt->identity_type == REPLICA_IDENTITY_FULL) { - relation_mark_replica_identity(rel, stmt->identity_type, InvalidOid, true); - return; - } else if (stmt->identity_type == REPLICA_IDENTITY_NOTHING) { - relation_mark_replica_identity(rel, stmt->identity_type, InvalidOid, true); - return; - } else if (stmt->identity_type == REPLICA_IDENTITY_INDEX) { - /* fallthrough */; - } else - ereport(ERROR, - ((errcode(ERRCODE_UNEXPECTED_NODE_STATE), - errmsg("unexpected identity type %u", (uint)stmt->identity_type)))); - - /* Check that the index exists */ - indexOid = get_relname_relid(stmt->name, rel->rd_rel->relnamespace); - if (!OidIsValid(indexOid)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("index \"%s\" for table \"%s\" does not exist", stmt->name, RelationGetRelationName(rel)))); - - indexRel = index_open(indexOid, ShareLock); - - /* Check that the index is on the relation we're altering. */ - if (indexRel->rd_index == NULL || indexRel->rd_index->indrelid != RelationGetRelid(rel)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not an index for table \"%s\"", - RelationGetRelationName(indexRel), - RelationGetRelationName(rel)))); - /* The AM must support uniqueness, and the index must in fact be unique. */ - if (!indexRel->rd_am->amcanunique || !indexRel->rd_index->indisunique) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot use non-unique index \"%s\" as replica identity", RelationGetRelationName(indexRel)))); - /* Deferred indexes are not guaranteed to be always unique. */ - if (!indexRel->rd_index->indimmediate) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg( - "cannot use non-immediate index \"%s\" as replica identity", RelationGetRelationName(indexRel)))); - /* Expression indexes aren't supported. */ - if (RelationGetIndexExpressions(indexRel) != NIL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use expression index \"%s\" as replica identity", RelationGetRelationName(indexRel)))); - /* Predicate indexes aren't supported. */ - if (RelationGetIndexPredicate(indexRel) != NIL) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use partial index \"%s\" as replica identity", RelationGetRelationName(indexRel)))); - /* And neither are invalid indexes. */ - if (!IndexIsValid(indexRel->rd_index)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use invalid index \"%s\" as replica identity", RelationGetRelationName(indexRel)))); - - /* Check index for nullable columns. */ - for (key = 0; key < IndexRelationGetNumberOfKeyAttributes(indexRel); key++) { - int16 attno = indexRel->rd_index->indkey.values[key]; - Form_pg_attribute attr; - - /* Of the system columns, only oid is indexable. */ - if (attno <= 0 && attno != ObjectIdAttributeNumber) - ereport(ERROR, - ((errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("internal column %d in unique index \"%s\"", attno, RelationGetRelationName(indexRel))))); - - attr = &rel->rd_att->attrs[attno - 1]; - if (!attr->attnotnull) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("index \"%s\" cannot be used as replica identity because column \"%s\" is nullable", - RelationGetRelationName(indexRel), - NameStr(attr->attname)))); - } - - /* This index is suitable for use as a replica identity. Mark it. */ - relation_mark_replica_identity(rel, stmt->identity_type, indexOid, true); - - index_close(indexRel, NoLock); -} - -/* - * ALTER FOREIGN TABLE OPTIONS (...) - */ -static void ATExecGenericOptions(Relation rel, List* options) -{ - Relation ftrel; - ForeignServer* server = NULL; - ForeignDataWrapper* fdw = NULL; - HeapTuple tuple; - bool isnull = false; - Datum repl_val[Natts_pg_foreign_table]; - bool repl_null[Natts_pg_foreign_table]; - bool repl_repl[Natts_pg_foreign_table]; - Datum datum; - Form_pg_foreign_table tableform; - errno_t rc; - DefElemAction actions; - double num_rows = 0.0; - char* total_rows = NULL; - - if (options == NIL) - return; - - ftrel = heap_open(ForeignTableRelationId, RowExclusiveLock); - - tuple = SearchSysCacheCopy1(FOREIGNTABLEREL, rel->rd_id); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("foreign table \"%s\" does not exist", RelationGetRelationName(rel)))); - tableform = (Form_pg_foreign_table)GETSTRUCT(tuple); - server = GetForeignServer(tableform->ftserver); - fdw = GetForeignDataWrapper(server->fdwid); - - rc = memset_s(repl_val, sizeof(repl_val), 0, sizeof(repl_val)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check(rc, "\0", "\0"); - rc = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check(rc, "\0", "\0"); - - /* Extract the current options */ - datum = SysCacheGetAttr(FOREIGNTABLEREL, tuple, Anum_pg_foreign_table_ftoptions, &isnull); - if (isnull) - datum = PointerGetDatum(NULL); - - /* add write_only to options */ - if (tableform->ftwriteonly) { - List* resultOptions = untransformRelOptions(datum); - Node* val = (Node*)makeString(pstrdup("true")); - resultOptions = lappend(resultOptions, makeDefElem(pstrdup("write_only"), val)); - datum = optionListToArray(resultOptions); - list_free_ext(resultOptions); - } - - options = regularizeObsLocationInfo(options); - - /* - * we insert type information into option in order to distinguish server type - * hdfs_fdw_validator function. - */ - char* optValue = getServerOptionValue(server->serverid, "type"); - DefElem* defElem = NULL; - if (NULL != optValue) { - defElem = makeDefElem("type", (Node*)makeString(optValue)); - options = lappend(options, defElem); - } - - /* Transform the options */ - datum = transformGenericOptions(ForeignTableRelationId, datum, options, fdw->fdwvalidator); - // update obs foreign table options totalrows in pg_class - // - if (IS_PGXC_COORDINATOR) { - /* As for DEFELEM_DROP status, we do not deal with toltal rows in pg_class. */ - actions = getFTAlterAction(options, "totalrows"); - if ((actions == DEFELEM_SET) || (actions == DEFELEM_ADD)) { - total_rows = getFTOptionValue(options, "totalrows"); - num_rows = convertFTOptionValue(total_rows); - updateTotalRows(rel->rd_id, num_rows); - } - } - - List* resultOptions = untransformRelOptions(datum); - - if (NULL != optValue) { - options = list_delete(options, defElem); - resultOptions = list_delete(resultOptions, defElem); - pfree_ext(defElem); - } - - /* remove write_only from datum */ - if (tableform->ftwriteonly) { - bool found = false; - resultOptions = FindOrRemoveForeignTableOption(resultOptions, "write_only", true, &found); - Assert(found); - } - - datum = optionListToArray(resultOptions); - - if (PointerIsValid(DatumGetPointer(datum))) - repl_val[Anum_pg_foreign_table_ftoptions - 1] = datum; - else - repl_null[Anum_pg_foreign_table_ftoptions - 1] = true; - - repl_repl[Anum_pg_foreign_table_ftoptions - 1] = true; - - /* Everything looks good - update the tuple */ - tuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(ftrel), repl_val, repl_null, repl_repl); - - simple_heap_update(ftrel, &tuple->t_self, tuple); - CatalogUpdateIndexes(ftrel, tuple); - - /* - * Invalidate relcache so that all sessions will refresh any cached plans - * that might depend on the old options. - */ - CacheInvalidateRelcache(rel); - - heap_close(ftrel, RowExclusiveLock); - - tableam_tops_free_tuple(tuple); -} - -#ifdef PGXC -/* - * ALTER TABLE DISTRIBUTE BY ... - */ -static void AtExecDistributeBy(Relation rel, DistributeBy* options) -{ - Oid relid; - char locatortype; - int hashalgorithm = 0; - int hashbuckets = 0; - AttrNumber* attnum = NULL; - int distributeKeyNum = 0; - - /* Nothing to do on Datanodes */ - if (IS_PGXC_DATANODE || options == NULL) - return; - - relid = RelationGetRelid(rel); - - if (options->colname) { - distributeKeyNum = list_length(options->colname); - attnum = (int2*)palloc(distributeKeyNum * sizeof(AttrNumber)); - } else { - distributeKeyNum = 1; - attnum = (int2*)palloc(1 * sizeof(AttrNumber)); - } - - /* Get necessary distribution information */ - GetRelationDistributionItems( - relid, options, RelationGetDescr(rel), &locatortype, &hashalgorithm, &hashbuckets, attnum); - - /* - * It is not checked if the distribution type list is the same as the old one, - * user might define a different sub-cluster at the same time. - */ - /* Update pgxc_class entry */ - PgxcClassAlter(relid, - locatortype, - attnum, - distributeKeyNum, - hashalgorithm, - hashbuckets, - 0, - NULL, - '\0', - PGXC_CLASS_ALTER_DISTRIBUTION, - NULL); - pfree_ext(attnum); - - /* Make the additional catalog changes visible */ - CommandCounterIncrement(); -} - -/* - * ALTER TABLE TO [ NODE nodelist | GROUP groupname ] - */ -static void AtExecSubCluster(Relation rel, PGXCSubCluster* options) -{ - Oid* nodeoids = NULL; - int numnodes; - ListCell* lc = NULL; - char* group_name = NULL; - - if (!u_sess->attr.attr_sql.enable_cluster_resize) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("The alter table to group(or node) can be used only for expansion.")))); - } - - /* Nothing to do on Datanodes */ - if (IS_PGXC_DATANODE || options == NULL) - return; - - /* Get node group name for ALTER TABLE ... TO Group */ - if (options->clustertype == SUBCLUSTER_GROUP) { - Assert(list_length(options->members) == 1); - - foreach (lc, options->members) { - group_name = strVal(lfirst(lc)); - } - } - - /* - * It is not checked if the new subcluster list is the same as the old one, - * user might define a different distribution type. - */ - /* Obtain new node information */ - nodeoids = GetRelationDistributionNodes(options, &numnodes); - - /* Update pgxc_class entry */ - PgxcClassAlter( - RelationGetRelid(rel), '\0', NULL, 0, 0, 0, numnodes, nodeoids, 'y', PGXC_CLASS_ALTER_NODES, group_name); - - /* Make the additional catalog changes visible */ - CommandCounterIncrement(); -} - -/* - * copy tabOid2 slice tuples (info from slicelist) to tabOid1 (destination) - */ -static void AtExecCopySlice(CatCList* sliceList, Oid tabOid, Relation pgxcSliceRel) -{ - HeapTuple oldTup, newTup; - Datum values[Natts_pgxc_slice] = {0}; - bool nulls[Natts_pgxc_slice] = {false}; - bool replaces[Natts_pgxc_slice] = {false}; - - for (int i = 0; i < sliceList->n_members; i++) { - oldTup = t_thrd.lsc_cxt.FetchTupleFromCatCList(sliceList, i); - bool isnull = false; - Datum val = fastgetattr(oldTup, Anum_pgxc_slice_type, RelationGetDescr(pgxcSliceRel), &isnull); - if (DatumGetChar(val) == PGXC_SLICE_TYPE_TABLE) { - continue; - } - values[Anum_pgxc_slice_relid - 1] = tabOid; - replaces[Anum_pgxc_slice_relid - 1] = true; - newTup = heap_modify_tuple(oldTup, RelationGetDescr(pgxcSliceRel), values, nulls, replaces); - (void)simple_heap_insert(pgxcSliceRel, newTup); - CatalogUpdateIndexes(pgxcSliceRel, newTup); - heap_freetuple_ext(newTup); - } -} -/* - * ALTER TABLE UPDATE SLICE LIKE - * Update pgxc_slice entry only used for range/list table redistribution - */ -static void AtExecUpdateSliceLike(Relation rel, const RangeVar* refTableName) -{ - int i; - HeapTuple oldTup; - Relation pgxcSliceRel; - Relation invalRel; - Oid tabOid1, tabOid2; - CatCList* sliceList1 = NULL; - CatCList* sliceList2 = NULL; - /* Nothing to do on Datanodes or input is null */ - if (IS_PGXC_DATANODE || rel == NULL || refTableName == NULL) { - return; - } - /* 'Update slice like' can be only used by redistribution tool */ - if (!u_sess->attr.attr_sql.enable_cluster_resize) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Update slice like can be only used by redistribution tool."))); - } - /* 'Update slice like' should only be used for range/list table */ - if (rel->rd_locator_info != NULL && IsLocatorDistributedBySlice(rel->rd_locator_info->locatorType)) { - tabOid1 = RelationGetRelid(rel); - tabOid2 = RangeVarGetRelidExtended(refTableName, NoLock, true, false, false, true, NULL, NULL); - invalRel = relation_open(tabOid1, AccessExclusiveLock); - pgxcSliceRel = heap_open(PgxcSliceRelationId, RowExclusiveLock); - sliceList1 = SearchSysCacheList2(PGXCSLICERELID, ObjectIdGetDatum(tabOid1), - CharGetDatum(PGXC_SLICE_TYPE_SLICE)); - sliceList2 = SearchSysCacheList2(PGXCSLICERELID, ObjectIdGetDatum(tabOid2), - CharGetDatum(PGXC_SLICE_TYPE_SLICE)); - if (sliceList1->n_members == 0 || sliceList2->n_members == 0 || - sliceList1->n_members != sliceList2->n_members) { - ReleaseSysCacheList(sliceList1); - ReleaseSysCacheList(sliceList2); - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Both table should have the same slice count."))); - } - /* drop tabOid1 slice tuples except table entry tuple */ - for (i = 0; i < sliceList1->n_members; i++) { - oldTup = t_thrd.lsc_cxt.FetchTupleFromCatCList(sliceList1, i); - simple_heap_delete(pgxcSliceRel, &oldTup->t_self); - } - ReleaseSysCacheList(sliceList1); - - /* copy tabOid2 slice tuples to tabOid1 */ - AtExecCopySlice(sliceList2, tabOid1, pgxcSliceRel); - ReleaseSysCacheList(sliceList2); - heap_close(pgxcSliceRel, RowExclusiveLock); - /* inval relation cache of tabOid1 */ - CacheInvalidateRelcache(invalRel); - relation_close(invalRel, NoLock); - CommandCounterIncrement(); - } -} - -/* - * ALTER TABLE ADD NODE nodelist - */ -static void AtExecAddNode(Relation rel, List* options) -{ - Oid* add_oids = NULL; - Oid* old_oids = NULL; - int add_num, old_num; - - /* Nothing to do on Datanodes */ - if (IS_PGXC_DATANODE || options == NIL) - return; - - /* - * Build a new array of sorted node Oids given the list of name nodes - * to be added. - */ - add_oids = BuildRelationDistributionNodes(options, &add_num); - - /* - * Then check if nodes to be added are not in existing node - * list and build updated list of nodes. - */ - old_num = get_pgxc_classnodes(RelationGetRelid(rel), &old_oids); - - /* Add elements to array */ - old_oids = add_node_list(old_oids, old_num, add_oids, add_num, &old_num); - - /* Sort once again the newly-created array of node Oids to maintain consistency */ - old_oids = SortRelationDistributionNodes(old_oids, old_num); - - /* Update pgxc_class entry */ - PgxcClassAlter(RelationGetRelid(rel), '\0', NULL, 0, 0, 0, old_num, old_oids, 'y', PGXC_CLASS_ALTER_NODES, NULL); - - /* Make the additional catalog changes visible */ - CommandCounterIncrement(); -} - -/* - * ALTER TABLE DELETE NODE nodelist - */ -static void AtExecDeleteNode(Relation rel, List* options) -{ - Oid* del_oids = NULL; - Oid* old_oids = NULL; - int del_num; - int old_num; - - /* Nothing to do on Datanodes */ - if (IS_PGXC_DATANODE || options == NIL) - return; - - /* - * Build a new array of sorted node Oids given the list of name nodes - * to be deleted. - */ - del_oids = BuildRelationDistributionNodes(options, &del_num); - - /* - * Check if nodes to be deleted are really included in existing - * node list and get updated list of nodes. - */ - old_num = get_pgxc_classnodes(RelationGetRelid(rel), &old_oids); - - /* Delete elements on array */ - old_oids = delete_node_list(old_oids, old_num, del_oids, del_num, &old_num); - - /* Update pgxc_class entry */ - PgxcClassAlter(RelationGetRelid(rel), '\0', NULL, 0, 0, 0, old_num, old_oids, '\0', PGXC_CLASS_ALTER_NODES, NULL); - - /* Make the additional catalog changes visible */ - CommandCounterIncrement(); -} - -static void ATCheckCmdGenericOptions(Relation rel, AlterTableCmd* cmd) -{ - DFSFileType fileType = DFS_INVALID; - DFSFileType formatType = DFS_INVALID; - Oid relid = InvalidOid; - ForeignTable* table = NULL; - ForeignServer* server = NULL; - ForeignDataWrapper* wrapper = NULL; - char* fdwnamestr = NULL; - char* format = NULL; - - if (RelationIsForeignTable(rel) || RelationIsStream(rel)) { - relid = RelationGetRelid(rel); - table = GetForeignTable(relid); - server = GetForeignServer(table->serverid); - wrapper = GetForeignDataWrapper(server->fdwid); - fdwnamestr = wrapper->fdwname; - if (0 == strncasecmp(fdwnamestr, HDFS_FDW, NAMEDATALEN) || - 0 == strncasecmp(fdwnamestr, DFS_FDW, NAMEDATALEN)) { - formatType = getSetFormatNewFormat(cmd); - if (RelationIsPartitioned(rel)) { - if (DFS_TEXT == formatType || DFS_CSV == formatType || DFS_CARBONDATA == formatType) { - ereport(ERROR, - (errmsg("The obs foreign partition table cannot support on text, csv, carbondata " - "format."))); - } - } - - format = HdfsGetOptionValue(relid, OPTION_NAME_FORMAT); - fileType = getFormatByName(format); - if (checkColumnTypeIsBytea(rel) && - (DFS_CARBONDATA == fileType || DFS_TEXT == fileType || DFS_CSV == fileType)) { - if (DFS_ORC == formatType || DFS_PARQUET == formatType) { - ereport(ERROR, - (errmsg("The obs foreign table has column type bytea, cannot support on orc format."))); - } - } - } - } - -} - -/* - * ATCheckCmd - * - * Check ALTER TABLE restrictions in openGauss - */ -static void ATCheckCmd(Relation rel, AlterTableCmd* cmd) -{ - /* Do nothing in the case of a remote node */ - if (u_sess->attr.attr_sql.enable_parallel_ddl) { - if (IS_PGXC_DATANODE) - return; - } else { - if (IS_PGXC_DATANODE || IsConnFromCoord()) - return; - } - - switch (cmd->subtype) { - case AT_DropColumn: { - AttrNumber attnum = get_attnum(RelationGetRelid(rel), cmd->name); - - /* Distribution column cannot be dropped */ - if (IsDistribColumn(RelationGetRelid(rel), attnum)) - ereport( - ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Distribution column cannot be dropped"))); - - break; - } - case AT_DistributeBy: - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Distribution mode cannot be altered"))); - break; - case AT_SubCluster: - break; - case AT_GenericOptions: { - ATCheckCmdGenericOptions(rel, cmd); - break; - } - default: - break; - } -} - -/* - * Check the Valid of set format option to Foreign Partition Table - */ -static DFSFileType getSetFormatNewFormat(AlterTableCmd* cmd) -{ - if (nodeTag(cmd->def) == T_List) { - List* defs = (List*)cmd->def; - ListCell* lc = NULL; - - foreach (lc, defs) { - DefElem* def = (DefElem*)lfirst(lc); - if (def->defaction == DEFELEM_SET && pg_strcasecmp(def->defname, OPTION_NAME_FORMAT) == 0) { - return getFormatByDefElem(def); - } - } - } - return DFS_INVALID; -} - -static bool checkColumnTypeIsBytea(Relation rel) -{ - int tupIndex = 0; - TupleDesc tupdesc = RelationGetDescr(rel); - - for (tupIndex = 0; tupIndex < tupdesc->natts; tupIndex++) { - Form_pg_attribute attr = &tupdesc->attrs[tupIndex]; - if (BYTEAOID == attr->atttypid) { - return true; - } - } - return false; -} - -/* - * brief: Get format option. - * input param @DefElem: the option information struct pointer - */ - -static DFSFileType getFormatByDefElem(DefElem* opt) -{ - char* format = NULL; - - if (0 == pg_strcasecmp(opt->defname, OPTION_NAME_FORMAT)) { - format = defGetString(opt); - return getFormatByName(format); - } - return DFS_INVALID; -} - -/* - * brief: Get format option. - * input param @format: the format(ORC/TEXT/CSV/Parquet/Carbondata) - */ -static DFSFileType getFormatByName(char* format) -{ - /* - * Currently, the orc format is supported for hdfs foreign table, but - * the parquet and csv formats will be supported in the future. - */ - DFSFileType formatType = DFS_INVALID; - - if (format != NULL) { - if (0 == pg_strcasecmp(format, DFS_FORMAT_ORC)) { - formatType = DFS_ORC; - } else if (0 == pg_strcasecmp(format, DFS_FORMAT_TEXT)) { - formatType = DFS_TEXT; - } else if (0 == pg_strcasecmp(format, DFS_FORMAT_CSV)) { - formatType = DFS_CSV; - } else if (0 == pg_strcasecmp(format, DFS_FORMAT_PARQUET)) { - formatType = DFS_PARQUET; - } else if (0 == pg_strcasecmp(format, DFS_FORMAT_CARBONDATA)) { - formatType = DFS_CARBONDATA; - } else { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmodule(MOD_DFS), - errmsg("Invalid option \"%s\"", format), - errhint("Valid options in this context are: orc, parquet, carbondata, text, csv"))); - } - } - - return formatType; -} - -/* - * BuildRedistribCommands - * Evaluate new and old distribution and build the list of operations - * necessary to perform table redistribution. - */ -static RedistribState* BuildRedistribCommands(Oid relid, List* subCmds) -{ - RedistribState* redistribState = makeRedistribState(relid); - RelationLocInfo* oldLocInfo = NULL; - RelationLocInfo* newLocInfo = NULL; /* Former locator info */ - Relation rel; - Oid* new_oid_array = NULL; /* Modified list of Oids */ - int new_num, i; /* Modified number of Oids */ - ListCell* item = NULL; - - /* Get necessary information about relation */ - rel = relation_open(redistribState->relid, NoLock); - oldLocInfo = RelationGetLocInfo(rel); - Assert(oldLocInfo); - - /* - * Get a copy of the locator information that will be modified by - * successive ALTER TABLE commands. - */ - newLocInfo = CopyRelationLocInfo(oldLocInfo); - /* The node list of this locator information will be rebuilt after command scan */ - list_free_ext(newLocInfo->nodeList); - list_free_ext(newLocInfo->partAttrNum); - newLocInfo->nodeList = NULL; - newLocInfo->partAttrNum = NULL; - - /* Get the list to be modified */ - new_num = get_pgxc_classnodes(RelationGetRelid(rel), &new_oid_array); - - foreach (item, subCmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(item); - DistributeBy* distributeby = NULL; - AttrNumber* attnum = NULL; - int distributeKeyNum; - - switch (cmd->subtype) { - case AT_DistributeBy: - /* - * Get necessary distribution information and update to new - * distribution type. - */ - distributeby = (DistributeBy*)cmd->def; - if (distributeby != NULL && distributeby->colname != NIL) { - distributeKeyNum = list_length(distributeby->colname); - attnum = (AttrNumber*)palloc(distributeKeyNum * sizeof(AttrNumber)); - } else { - distributeKeyNum = 1; - attnum = (AttrNumber*)palloc(1 * sizeof(AttrNumber)); - } - GetRelationDistributionItems(redistribState->relid, - (DistributeBy*)cmd->def, - RelationGetDescr(rel), - &(newLocInfo->locatorType), - NULL, - NULL, - attnum); - for (i = 0; i < distributeKeyNum; i++) - newLocInfo->partAttrNum = lappend_int(newLocInfo->partAttrNum, attnum[i]); - pfree_ext(attnum); - break; - case AT_SubCluster: - /* Update new list of nodes */ - new_oid_array = GetRelationDistributionNodes((PGXCSubCluster*)cmd->def, &new_num); - break; - case AT_AddNodeList: { - Oid* add_oids = NULL; - int add_num; - add_oids = BuildRelationDistributionNodes((List*)cmd->def, &add_num); - /* Add elements to array */ - new_oid_array = add_node_list(new_oid_array, new_num, add_oids, add_num, &new_num); - } break; - case AT_DeleteNodeList: { - Oid* del_oids = NULL; - int del_num; - del_oids = BuildRelationDistributionNodes((List*)cmd->def, &del_num); - /* Delete elements from array */ - new_oid_array = delete_node_list(new_oid_array, new_num, del_oids, del_num, &new_num); - } break; - case AT_UpdateSliceLike: { - /* - * Update slice like the temptable in pgxc_slice. - * Nothing need to do here, because 'update slice' only follows - * 'add/delete node' when redistribution is working, - * and the other preparations have been done by 'add/delete node'. - */ - } break; - default: - Assert(0); /* Should not happen */ - } - } - - /* Build relation node list for new locator info */ - for (i = 0; i < new_num; i++) - newLocInfo->nodeList = - lappend_int(newLocInfo->nodeList, PGXCNodeGetNodeId(new_oid_array[i], PGXC_NODE_DATANODE)); - - /* Build the command tree for table redistribution */ - PGXCRedistribCreateCommandList(redistribState, newLocInfo); - - if (ISMATMAP(rel->rd_rel->relname.data)) { - FreeRelationLocInfo(newLocInfo); - pfree_ext(new_oid_array); - relation_close(rel, NoLock); - - return redistribState; - } - - /* - * Using the new locator info already available, check if constraints on - * relation are compatible with the new distribution. - */ - foreach (item, RelationGetIndexList(rel)) { - Oid indid = lfirst_oid(item); - Relation indexRel = index_open(indid, AccessShareLock); - List* indexColNums = NIL; - int2vector* colIdsPtr = &indexRel->rd_index->indkey; - - /* - * Prepare call to shippability check. Attributes set to 0 correspond - * to index expressions and are evaluated internally, so they are not - * appended in given list. - */ - for (i = 0; i < colIdsPtr->dim1; i++) { - if (colIdsPtr->values[i] > 0) - indexColNums = lappend_int(indexColNums, colIdsPtr->values[i]); - } - - if (!pgxc_check_index_shippability(newLocInfo, - indexRel->rd_index->indisprimary, - indexRel->rd_index->indisunique, - indexRel->rd_index->indisexclusion, - indexColNums, - indexRel->rd_indexprs)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Cannot alter table to distribution incompatible " - "with existing constraints"))); - - index_close(indexRel, AccessShareLock); - } - - /* Clean up */ - FreeRelationLocInfo(newLocInfo); - pfree_ext(new_oid_array); - relation_close(rel, NoLock); - - return redistribState; -} - -/* - * Delete from given Oid array old_oids the given oid list del_oids - * and build a new one. - */ -static Oid* delete_node_list(Oid* old_oids, int old_num, Oid* del_oids, int del_num, int* new_num) -{ - /* Allocate former array and data */ - Oid* new_oids = old_oids; - int loc_new_num = old_num; - int i; - - /* - * Delete from existing node Oid array the elements to be removed. - * An error is returned if an element to be deleted is not in existing array. - * It is not necessary to sort once again the result array of node Oids - * as here only a deletion of elements is done. - */ - for (i = 0; i < del_num; i++) { - Oid nodeoid = del_oids[i]; - int j, position; - bool is_listed = false; - NameData nodename = {{0}}; - position = 0; - - for (j = 0; j < loc_new_num; j++) { - /* Check if element can be removed */ - if (PgxcNodeCheckDnMatric(nodeoid, new_oids[j])) { - is_listed = true; - position = j; - } - } - - /* Move all the elements from [j+1, n-1] to [j, n-2] */ - if (is_listed) { - for (j = position + 1; j < loc_new_num; j++) - new_oids[j - 1] = new_oids[j]; - - loc_new_num--; - - /* Not possible to have an empty list */ - if (loc_new_num == 0) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("Node list is empty: one node at least is mandatory"))); - - new_oids = (Oid*)repalloc(new_oids, loc_new_num * sizeof(Oid)); - } else - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("PGXC Node %s: object not in relation node list", get_pgxc_nodename(nodeoid, &nodename)))); - } - - /* Save new number of nodes */ - *new_num = loc_new_num; - return new_oids; -} - -/* - * Add to given Oid array old_oids the given oid list add_oids - * and build a new one. - */ -static Oid* add_node_list(Oid* old_oids, int old_num, Oid* add_oids, int add_num, int* new_num) -{ - /* Allocate former array and data */ - Oid* new_oids = old_oids; - int loc_new_num = old_num; - int i; - - /* - * Build new Oid list, both addition and old list are already sorted. - * The idea here is to go through the list of nodes to be added and - * add the elements one-by-one on the existing list. - * An error is returned if an element to be added already exists - * in relation node array. - * Here we do O(n^2) scan to avoid a dependency with the way - * oids are sorted by heap APIs. They are sorted once again once - * the addition operation is completed. - */ - for (i = 0; i < add_num; i++) { - Oid nodeoid = add_oids[i]; - int j; - NameData nodename = {{0}}; - - /* Check if element is already a part of array */ - for (j = 0; j < loc_new_num; j++) { - /* Item is already in node list */ - if (PgxcNodeCheckDnMatric(nodeoid, new_oids[j])) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("PGXC Node %s: object already in relation node list", - get_pgxc_nodename(nodeoid, &nodename)))); - } - - /* If we are here, element can be added safely in node array */ - loc_new_num++; - new_oids = (Oid*)repalloc(new_oids, loc_new_num * sizeof(Oid)); - new_oids[loc_new_num - 1] = nodeoid; - } - - /* Sort once again the newly-created array of node Oids to maintain consistency */ - new_oids = SortRelationDistributionNodes(new_oids, loc_new_num); - - /* Save new number of nodes */ - *new_num = loc_new_num; - return new_oids; -} -#endif - -/* - * Execute ALTER TABLE SET SCHEMA - */ -ObjectAddress AlterTableNamespace(AlterObjectSchemaStmt* stmt, Oid *oldschema) -{ - Relation rel; - Oid relid; - Oid oldNspOid; - Oid nspOid; - RangeVar* newrv = NULL; - ObjectAddresses* objsMoved = NULL; - ObjectAddress myself; - - relid = RangeVarGetRelidExtended(stmt->relation, - AccessExclusiveLock, - stmt->missing_ok, - false, - false, - false, - RangeVarCallbackForAlterRelation, - (void*)stmt); - - if (!OidIsValid(relid)) { - ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", stmt->relation->relname))); - return InvalidObjectAddress; - } - -#ifdef ENABLE_MOT - if (IsMOTForeignTable(relid)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Un-support feature"), - errdetail("target table is a mot table"))); - } -#endif - - TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); - - rel = relation_open(relid, NoLock); - - if (rel->rd_mlogoid != InvalidOid) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("table owning matview doesn't support this ALTER yet.")))); - } - - if (rel->rd_rel->relkind == RELKIND_MATVIEW) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER MATERIALIZED VIEW is not yet supported."))); - } - - /* Permission check */ - if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(rel)); - } - - oldNspOid = RelationGetNamespace(rel); - - /* We allow to alter global temp table only this session use it */ - CheckGttTableInUse(rel); - - /* If it's an owned sequence, disallow moving it by itself. */ - if (RELKIND_IS_SEQUENCE(rel->rd_rel->relkind)) { - ereport( - ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ALTER SEQUENCE SET SCHEMA is not yet supported."))); - - Oid tableId; - int32 colId; - - if (sequenceIsOwned(relid, &tableId, &colId)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move an owned (large) sequence into another schema"), - errdetail("Sequence \"%s\" is linked to table \"%s\".", - RelationGetRelationName(rel), - get_rel_name(tableId)))); - } - - /* Get and lock schema OID and check its permissions. */ - newrv = makeRangeVar(stmt->newschema, RelationGetRelationName(rel), -1); - nspOid = RangeVarGetAndCheckCreationNamespace(newrv, NoLock, NULL, '\0'); - - /* common checks on switching namespaces */ - CheckSetNamespace(oldNspOid, nspOid, RelationRelationId, relid); - ledger_check_switch_schema(oldNspOid, nspOid); - objsMoved = new_object_addresses(); - AlterTableNamespaceInternal(rel, oldNspOid, nspOid, objsMoved); - if (rel->rd_isblockchain) { - rename_hist_by_newnsp(RelationGetRelid(rel), stmt->newschema); - } - free_object_addresses(objsMoved); - - ObjectAddressSet(myself, RelationRelationId, relid); - - if (oldschema) - *oldschema = oldNspOid; - - /* close rel, but keep lock until commit */ - relation_close(rel, NoLock); - return myself; -} - -/* - * The guts of relocating a table or materialized view to another namespace: - * besides moving the relation itself, its dependent objects are relocated to - * the new schema. - */ -void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, ObjectAddresses* objsMoved) -{ - Relation classRel; - - Assert(objsMoved != NULL); - - /* OK, modify the pg_class row and pg_depend entry */ - classRel = heap_open(RelationRelationId, RowExclusiveLock); - - AlterRelationNamespaceInternal(classRel, RelationGetRelid(rel), oldNspOid, nspOid, true, objsMoved); - - /* Fix the table's row type too */ - (void)AlterTypeNamespaceInternal(rel->rd_rel->reltype, nspOid, false, false, objsMoved); - - /* Change the table's set type too */ - TupleDesc tupDesc = rel->rd_att; - for (int i = 0; i < tupDesc->natts; i++) { - if (!tupDesc->attrs[i].attisdropped && type_is_set(tupDesc->attrs[i].atttypid)) { - (void)AlterTypeNamespaceInternal(tupDesc->attrs[i].atttypid, nspOid, false, false, objsMoved); - } - } - - /* Fix other dependent stuff */ - if (rel->rd_rel->relkind == RELKIND_RELATION || - rel->rd_rel->relkind == RELKIND_MATVIEW) { - AlterIndexNamespaces(classRel, rel, oldNspOid, nspOid, objsMoved); - AlterSeqNamespaces(classRel, rel, oldNspOid, nspOid, objsMoved, AccessExclusiveLock); - AlterConstraintNamespaces(RelationGetRelid(rel), oldNspOid, nspOid, false, objsMoved); - } - - heap_close(classRel, RowExclusiveLock); -} - -/* - * The guts of relocating a relation to another namespace: fix the pg_class - * entry, and the pg_depend entry if any. Caller must already have - * opened and write-locked pg_class. - */ -void AlterRelationNamespaceInternal( - Relation classRel, Oid relOid, Oid oldNspOid, Oid newNspOid, bool hasDependEntry, ObjectAddresses* objsMoved) -{ - HeapTuple classTup; - Form_pg_class classForm; - ObjectAddress thisobj; - - classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); - if (!HeapTupleIsValid(classTup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relOid))); - } - classForm = (Form_pg_class)GETSTRUCT(classTup); - - Assert(classForm->relnamespace == oldNspOid); - - thisobj.classId = RelationRelationId; - thisobj.objectId = relOid; - thisobj.objectSubId = 0; - - /* - * Do nothing when there's nothing to do. - */ - if (!object_address_present(&thisobj, objsMoved)) { - /* - * Check relation name to ensure that it doesn't conflict with existing synonym. - */ - if (!IsInitdb && GetSynonymOid(NameStr(classForm->relname), newNspOid, true) != InvalidOid) { - ereport(ERROR, - (errmsg("relation name is already used by an existing synonym in schema \"%s\"", - get_namespace_name(newNspOid)))); - } - /* check for duplicate name (more friendly than unique-index failure) */ - if (get_relname_relid(NameStr(classForm->relname), newNspOid) != InvalidOid) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_TABLE), - errmsg("relation \"%s\" already exists in schema \"%s\"", - NameStr(classForm->relname), - get_namespace_name(newNspOid)))); - - /* classTup is a copy, so OK to scribble on */ - classForm->relnamespace = newNspOid; - - simple_heap_update(classRel, &classTup->t_self, classTup); - CatalogUpdateIndexes(classRel, classTup); - - /* Update dependency on schema if caller said so */ - if (hasDependEntry && - changeDependencyFor(RelationRelationId, relOid, NamespaceRelationId, oldNspOid, newNspOid) != 1) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("failed to change schema dependency for relation \"%s\"", NameStr(classForm->relname)))); - add_exact_object_address(&thisobj, objsMoved); - - /* Recode time of alter relation namespace. */ - PgObjectType objectType = GetPgObjectTypePgClass(classForm->relkind); - if (objectType != OBJECT_TYPE_INVALID) { - UpdatePgObjectMtime(relOid, objectType); - } - } - - tableam_tops_free_tuple(classTup); -} - -/* - * Move all indexes for the specified relation to another namespace. - * - * Note: we assume adequate permission checking was done by the caller, - * and that the caller has a suitable lock on the owning relation. - */ -static void AlterIndexNamespaces( - Relation classRel, Relation rel, Oid oldNspOid, Oid newNspOid, ObjectAddresses* objsMoved) -{ - List* indexList = NIL; - ListCell* l = NULL; - - indexList = RelationGetIndexList(rel, true); - - foreach (l, indexList) { - Oid indexOid = lfirst_oid(l); - ObjectAddress thisobj; - - thisobj.classId = RelationRelationId; - thisobj.objectId = indexOid; - thisobj.objectSubId = 0; - - /* - * Note: currently, the index will not have its own dependency on the - * namespace, so we don't need to do changeDependencyFor(). There's no - * row type in pg_type, either. - * - * XXX this objsMoved test may be pointless -- surely we have a single - * dependency link from a relation to each index? - */ - if (!object_address_present(&thisobj, objsMoved)) { - AlterRelationNamespaceInternal(classRel, indexOid, oldNspOid, newNspOid, false, objsMoved); - add_exact_object_address(&thisobj, objsMoved); - } - } - - list_free_ext(indexList); -} - -/* - * Move all SERIAL-column sequences of the specified relation to another - * namespace. - * - * Note: we assume adequate permission checking was done by the caller, - * and that the caller has a suitable lock on the owning relation. - */ -static void AlterSeqNamespaces( - Relation classRel, Relation rel, Oid oldNspOid, Oid newNspOid, ObjectAddresses* objsMoved, LOCKMODE lockmode) -{ - Relation depRel; - SysScanDesc scan; - ScanKeyData key[2]; - HeapTuple tup; - - /* - * SERIAL sequences are those having an auto dependency on one of the - * table's columns (we don't care *which* column, exactly). - */ - depRel = heap_open(DependRelationId, AccessShareLock); - - ScanKeyInit( - &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit( - &key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - /* we leave refobjsubid unspecified */ - scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 2, key); - - while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tup); - Relation seqRel; - - /* skip dependencies other than auto dependencies on columns */ - if (depForm->refobjsubid == 0 || depForm->classid != RelationRelationId || depForm->objsubid != 0 || - depForm->deptype != DEPENDENCY_AUTO) - continue; - - /* Use relation_open just in case it's an index */ - seqRel = relation_open(depForm->objid, lockmode); - - /* skip non-sequence relations */ - if (!RELKIND_IS_SEQUENCE(RelationGetForm(seqRel)->relkind)) { - /* No need to keep the lock */ - relation_close(seqRel, lockmode); - continue; - } else { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("There's dependent sequence, but ALTER SEQUENCE SET SCHEMA is not yet supported."))); - } - /* Fix the pg_class and pg_depend entries */ - AlterRelationNamespaceInternal(classRel, depForm->objid, oldNspOid, newNspOid, true, objsMoved); - - /* - * Sequences have entries in pg_type. We need to be careful to move - * them to the new namespace, too. - */ - (void)AlterTypeNamespaceInternal(RelationGetForm(seqRel)->reltype, newNspOid, false, false, objsMoved); - - /* Now we can close it. Keep the lock till end of transaction. */ - relation_close(seqRel, NoLock); - } - - systable_endscan(scan); - - relation_close(depRel, AccessShareLock); -} - -/* - * This code supports - * CREATE TEMP TABLE ... ON COMMIT { DROP | PRESERVE ROWS | DELETE ROWS } - * - * Because we only support this for TEMP tables, it's sufficient to remember - * the state in a backend-local data structure. - */ -/* - * Register a newly-created relation's ON COMMIT action. - */ -void register_on_commit_action(Oid relid, OnCommitAction action) -{ - OnCommitItem* oc = NULL; - MemoryContext oldcxt; - - /* - * We needn't bother registering the relation unless there is an ON COMMIT - * action we need to take. - */ - if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS) - return; - - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); - - oc = (OnCommitItem*)palloc(sizeof(OnCommitItem)); - oc->relid = relid; - oc->oncommit = action; - oc->creating_subid = GetCurrentSubTransactionId(); - oc->deleting_subid = InvalidSubTransactionId; - - u_sess->cmd_cxt.on_commits = lcons(oc, u_sess->cmd_cxt.on_commits); - - (void)MemoryContextSwitchTo(oldcxt); -} - -/* - * Unregister any ON COMMIT action when a relation is deleted. - * - * Actually, we only mark the OnCommitItem entry as to be deleted after commit. - */ -void remove_on_commit_action(Oid relid) -{ - ListCell* l = NULL; - - foreach (l, u_sess->cmd_cxt.on_commits) { - OnCommitItem* oc = (OnCommitItem*)lfirst(l); - - if (oc->relid == relid) { - oc->deleting_subid = GetCurrentSubTransactionId(); - break; - } - } -} - -/* - * Perform ON COMMIT actions. - * - * This is invoked just before actually committing, since it's possible - * to encounter errors. - */ -void PreCommit_on_commit_actions(void) -{ - ListCell* l = NULL; - List* oids_to_truncate = NIL; - - foreach (l, u_sess->cmd_cxt.on_commits) { - OnCommitItem* oc = (OnCommitItem*)lfirst(l); - - /* Ignore entry if already dropped in this xact */ - if (oc->deleting_subid != InvalidSubTransactionId) - continue; - - switch (oc->oncommit) { - case ONCOMMIT_NOOP: - case ONCOMMIT_PRESERVE_ROWS: - /* Do nothing (there shouldn't be such entries, actually) */ - break; - case ONCOMMIT_DELETE_ROWS: - /* - * If this transaction hasn't accessed any temporary - * relations, we can skip truncating ON COMMIT DELETE ROWS - * tables, as they must still be empty. - */ - if (t_thrd.xact_cxt.MyXactAccessedTempRel) - oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid); - break; - case ONCOMMIT_DROP: { - ObjectAddress object; - - object.classId = RelationRelationId; - object.objectId = oc->relid; - object.objectSubId = 0; - - /* - * Since this is an automatic drop, rather than one - * directly initiated by the user, we pass the - * PERFORM_DELETION_INTERNAL flag. - */ - performDeletion(&object, DROP_CASCADE, PERFORM_DELETION_INTERNAL); - - /* - * Note that table deletion will call - * remove_on_commit_action, so the entry should get marked - * as deleted. - */ - Assert(oc->deleting_subid != InvalidSubTransactionId); - break; - } - } - } - if (oids_to_truncate != NIL) { - heap_truncate(oids_to_truncate); - CommandCounterIncrement(); /* XXX needed? */ - } -} - -/* - * Post-commit or post-abort cleanup for ON COMMIT management. - * - * All we do here is remove no-longer-needed OnCommitItem entries. - * - * During commit, remove entries that were deleted during this transaction; - * during abort, remove those created during this transaction. - */ -void AtEOXact_on_commit_actions(bool isCommit) -{ - ListCell* cur_item = NULL; - ListCell* prev_item = NULL; - - prev_item = NULL; - cur_item = list_head(u_sess->cmd_cxt.on_commits); - - while (cur_item != NULL) { - OnCommitItem* oc = (OnCommitItem*)lfirst(cur_item); - - if (isCommit ? oc->deleting_subid != InvalidSubTransactionId : oc->creating_subid != InvalidSubTransactionId) { - /* cur_item must be removed */ - u_sess->cmd_cxt.on_commits = list_delete_cell(u_sess->cmd_cxt.on_commits, cur_item, prev_item); - pfree_ext(oc); - if (prev_item != NULL) - cur_item = lnext(prev_item); - else - cur_item = list_head(u_sess->cmd_cxt.on_commits); - } else { - /* cur_item must be preserved */ - oc->creating_subid = InvalidSubTransactionId; - oc->deleting_subid = InvalidSubTransactionId; - prev_item = cur_item; - cur_item = lnext(prev_item); - } - } -} - -/* - * Post-subcommit or post-subabort cleanup for ON COMMIT management. - * - * During subabort, we can immediately remove entries created during this - * subtransaction. During subcommit, just relabel entries marked during - * this subtransaction as being the parent's responsibility. - */ -void AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid) -{ - ListCell* cur_item = NULL; - ListCell* prev_item = NULL; - - prev_item = NULL; - cur_item = list_head(u_sess->cmd_cxt.on_commits); - - while (cur_item != NULL) { - OnCommitItem* oc = (OnCommitItem*)lfirst(cur_item); - - if (!isCommit && oc->creating_subid == mySubid) { - /* cur_item must be removed */ - u_sess->cmd_cxt.on_commits = list_delete_cell(u_sess->cmd_cxt.on_commits, cur_item, prev_item); - pfree_ext(oc); - if (prev_item != NULL) - cur_item = lnext(prev_item); - else - cur_item = list_head(u_sess->cmd_cxt.on_commits); - } else { - /* cur_item must be preserved */ - if (oc->creating_subid == mySubid) - oc->creating_subid = parentSubid; - if (oc->deleting_subid == mySubid) - oc->deleting_subid = isCommit ? parentSubid : InvalidSubTransactionId; - prev_item = cur_item; - cur_item = lnext(prev_item); - } - } -} - -/* - * This is intended as a callback for RangeVarGetRelidExtended(). It allows - * the relation to be locked only if (1) it's a plain table, materialized - * view, or TOAST table and (2) the current user is the owner (or the - * superuser). This meets the permission-checking needs of CLUSTER, REINDEX - * TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be - * used by all. - */ -void RangeVarCallbackOwnsTable(const RangeVar* relation, Oid relId, Oid oldRelId, bool target_is_partition, void* arg) -{ - char relkind; - - /* Nothing to do if the relation was not found. */ - if (!OidIsValid(relId)) { - return; - } - - /* - * If the relation does exist, check whether it's an index. But note that - * the relation might have been dropped between the time we did the name - * lookup and now. In that case, there's nothing to do. - */ - relkind = get_rel_relkind(relId); - if (!relkind) { - return; - } - if (relkind != RELKIND_RELATION && - relkind != RELKIND_TOASTVALUE && - relkind != RELKIND_MATVIEW) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table or materialized view", relation->relname))); - } - /* Check permissions */ - AclResult aclresult = pg_class_aclcheck(relId, GetUserId(), ACL_INDEX); - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(relId, GetUserId())) { - aclcheck_error(aclresult, ACL_KIND_CLASS, relation->relname); - } -} - -/* - * Callback to RangeVarGetRelidExtended(), similar to - * RangeVarCallbackOwnsTable() but without checks on the type of the relation. - */ -void RangeVarCallbackOwnsRelation( - const RangeVar* relation, Oid relId, Oid oldRelId, bool target_is_partition, void* arg) -{ - HeapTuple tuple; - - /* Nothing to do if the relation was not found. */ - if (!OidIsValid(relId)) { - return; - } - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relId)); - if (!HeapTupleIsValid(tuple)) { - /* should not happen */ - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relId))); - } - - AclResult aclresult = pg_class_aclcheck(relId, GetUserId(), ACL_INDEX); - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(relId, GetUserId()) && - !HasSpecAnyPriv(GetUserId(), CREATE_ANY_INDEX, false)) { - aclcheck_error(aclresult, ACL_KIND_CLASS, relation->relname); - } - if (!g_instance.attr.attr_common.allowSystemTableMods && !u_sess->attr.attr_common.IsInplaceUpgrade && - IsSystemClass((Form_pg_class)GETSTRUCT(tuple))) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", relation->relname))); - } - - ReleaseSysCache(tuple); -} - -/* - * Common RangeVarGetRelid callback for rename, set schema, and alter table - * processing. - */ -static void RangeVarCallbackForAlterRelation( - const RangeVar* rv, Oid relid, Oid oldrelid, bool target_is_partition, void* arg) -{ - Node* stmt = (Node*)arg; - ObjectType reltype; - HeapTuple tuple; - Form_pg_class classform; - AclResult aclresult; - char relkind; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - return; /* concurrently dropped */ - } - classform = (Form_pg_class)GETSTRUCT(tuple); - relkind = classform->relkind; - - /* - * Extract the specified relation type from the statement parse tree. - */ - if (IsA(stmt, RenameStmt)) { - reltype = ((RenameStmt*)stmt)->renameType; - } else if (IsA(stmt, AlterObjectSchemaStmt)) { - reltype = ((AlterObjectSchemaStmt*)stmt)->objectType; - } else if (IsA(stmt, AlterTableStmt)) { - reltype = ((AlterTableStmt*)stmt)->relkind; - } else { - reltype = OBJECT_TABLE; /* placate compiler */ - ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized node type: %d", (int)nodeTag(stmt)))); - } - - /* - * Must own relation or have the ALTER privilege of the relation. - * But if we are truncating a partition, we will not check owner. - */ - if (reltype == OBJECT_TYPE) { - Oid typeoid = get_typeoid(classform->relnamespace, NameStr(classform->relname)); - aclresult = pg_type_aclcheck(typeoid, GetUserId(), ACL_ALTER); - } else if (reltype == OBJECT_INDEX || reltype == OBJECT_PARTITION_INDEX) { - Oid tableoid = IndexGetRelation(relid, false); - aclresult = pg_class_aclcheck(tableoid, GetUserId(), ACL_INDEX); - bool anyResult = false; - if (aclresult != ACLCHECK_OK && !IsSysSchema(GetNamespaceIdbyRelId(tableoid))) { - anyResult = HasSpecAnyPriv(GetUserId(), ALTER_ANY_INDEX, false); - } - aclresult = anyResult ? ACLCHECK_OK : aclresult; - } else { - aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_ALTER); - } - - if (!target_is_partition && aclresult != ACLCHECK_OK && !pg_class_ownercheck(relid, GetUserId()) && - !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, rv->relname); - } - - /* No system table modifications unless explicitly allowed or during inplace upgrade. */ - if (!(g_instance.attr.attr_common.allowSystemTableMods && relid >= FirstBootstrapObjectId) && - !u_sess->attr.attr_common.IsInplaceUpgrade && IsSystemClass(classform)) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", rv->relname))); - } - - switch (relid) { - case DatabaseRelationId: - case AuthIdRelationId: - case AuthMemRelationId: - case RelationRelationId: - case AttributeRelationId: - case ProcedureRelationId: - case TypeRelationId: - case UserStatusRelationId: - /* - * Schema change of these nailed-in system catalogs is very dangerous!!! - * Later, we may need an exclusive GUC variable to enable such change. - */ - if (!u_sess->attr.attr_common.IsInplaceUpgrade) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: system catalog \"%s\" can not be altered", rv->relname))); - break; - default: - break; - } - /* - * For ALTER .. RENAME, check permissions: the user must (still) - * have CREATE rights on the containing namespace. - */ - if (IsA(stmt, RenameStmt)) { - aclresult = pg_namespace_aclcheck(classform->relnamespace, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) { - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(classform->relnamespace)); - } - } - - /* - * For compatibility with prior releases, we allow ALTER TABLE to be used - * with most other types of relations (but not composite types). We allow - * similar flexibility for ALTER INDEX in the case of RENAME, but not - * otherwise. Otherwise, the user must select the correct form of the - * command for the relation at issue. - */ - if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a sequence", rv->relname))); - } - - if (reltype == OBJECT_LARGE_SEQUENCE && relkind != RELKIND_LARGE_SEQUENCE) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a large sequence", rv->relname))); - } - - if (reltype == OBJECT_VIEW && relkind != RELKIND_VIEW && relkind != RELKIND_CONTQUERY) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a view", rv->relname))); - } - - if (reltype == OBJECT_CONTQUERY && relkind != RELKIND_CONTQUERY) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a contview", rv->relname))); - } - - if (reltype == OBJECT_MATVIEW && relkind != RELKIND_MATVIEW) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a materialized view", rv->relname))); - } - - if (reltype == OBJECT_FOREIGN_TABLE && relkind != RELKIND_FOREIGN_TABLE && relkind != RELKIND_STREAM) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a foreign table", rv->relname))); - } - - if (reltype == OBJECT_STREAM && relkind != RELKIND_STREAM) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a stream", rv->relname))); - } - - if (reltype == OBJECT_TYPE && relkind != RELKIND_COMPOSITE_TYPE) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a composite type", rv->relname))); - } - - if (reltype == OBJECT_INDEX && relkind != RELKIND_INDEX && relkind != RELKIND_GLOBAL_INDEX && - !IsA(stmt, RenameStmt)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not an index", rv->relname))); - } - /* - * Don't allow ALTER TABLE on composite types. We want people to use ALTER - * TYPE for that. - */ - if (reltype != OBJECT_TYPE && relkind == RELKIND_COMPOSITE_TYPE) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is a composite type", rv->relname), - errhint("Use ALTER TYPE instead."))); - } - - if (reltype != OBJECT_FOREIGN_TABLE && relkind == RELKIND_FOREIGN_TABLE) { -#ifdef ENABLE_MOT - if (isMOTFromTblOid(relid)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table", rv->relname), - errhint("Use ALTER FOREIGN TABLE to alter a foreign table."))); - } else { -#endif - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is a foreign table, which does not support column constraints.", rv->relname))); -#ifdef ENABLE_MOT - } -#endif - } - -#ifdef ENABLE_MULTIPLE_NODES - if (reltype != OBJECT_STREAM && relkind == RELKIND_STREAM) { - if (!range_var_is_stream(rv, true)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is a stream, which does not support column constraints.", rv->relname))); - } - } -#endif - - /* - * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be moved - * to a different schema, such as indexes and TOAST tables. - */ - if (IsA(stmt, AlterObjectSchemaStmt) && - relkind != RELKIND_RELATION && - relkind != RELKIND_VIEW && - relkind != RELKIND_CONTQUERY && - !RELKIND_IS_SEQUENCE(relkind) && - relkind != RELKIND_STREAM && - relkind != RELKIND_FOREIGN_TABLE && - relkind != RELKIND_MATVIEW) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, sequence, or foreign table", rv->relname))); - } - - ReleaseSysCache(tuple); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : check if the partition is in use - * Description : - * Notes : - */ -void checkPartNotInUse(Partition part, const char* stmt) -{ - const int expected_refcnt = 1; - - if (part->pd_refcnt != expected_refcnt) { - ereport(ERROR, - (errcode(ERRCODE_OBJECT_IN_USE), - /* translator: first %s is a SQL command, eg ALTER TABLE */ - errmsg("cannot %s \"%s\" because it is in use", stmt, PartitionGetPartitionName(part)))); - } -} - -extern Node* GetColumnRef(Node* key, bool* isExpr, bool* isFunc); -/* - * @@GaussDB@@ - * Target : data partition - * Brief : get paritionkey's sequence in column definition - * Description : - * Notes : invoker to free the return list - */ -List* GetPartitionkeyPos(List* partitionkeys, List* schema, bool* partkeyIsFunc) -{ - ListCell* partitionkey_cell = NULL; - ListCell* schema_cell = NULL; - int column_count = 0; - List* pos = NULL; - - if (schema == NIL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("there is no column for a partitioned table!"))); - } - - int len = schema->length; - if (partitionkeys == NIL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("there is no partition key!"))); - } - - bool* is_exist = (bool*)palloc(len * sizeof(bool)); - errno_t rc = EOK; - rc = memset_s(is_exist, len * sizeof(bool), 0, len * sizeof(bool)); - securec_check(rc, "\0", "\0"); - bool isExpr = false; - foreach (partitionkey_cell, partitionkeys) { - ColumnRef* partitionkey_ref = (ColumnRef*)GetColumnRef((Node*)lfirst(partitionkey_cell), &isExpr, partkeyIsFunc); - if (!partitionkey_ref) - ereport(ERROR,(errcode(ERRCODE_UNDEFINED_COLUMN),(errmsg("The partition key doesn't have any column.")))); - if (isExpr && partitionkeys->length > 1) - ereport(ERROR,(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),(errmsg("The multi partition expr keys are not supported.")))); - char* partitonkey_name = ((Value*)linitial(partitionkey_ref->fields))->val.str; - - foreach (schema_cell, schema) { - ColumnDef* schema_def = (ColumnDef*)lfirst(schema_cell); - - /* find the column that has the same name as the partitionkey */ - if (!strcmp(partitonkey_name, schema_def->colname)) { - - /* - * Generated columns cannot work: They are computed after BEFORE - * triggers, but partition routing is done before all triggers. - */ - if (schema_def->generatedCol) { - ereport(ERROR, (errmodule(MOD_GEN_COL), errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cannot use generated column in partition key"), - errdetail("Column \"%s\" is a generated column.", partitonkey_name))); - } - - /* duplicate partitionkey name */ - if (is_exist[column_count]) { - pfree_ext(is_exist); - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("duplicate partition key: %s", partitonkey_name))); - } - - /* recoed attribute info when the partitionkey is unique */ - is_exist[column_count] = true; - break; - } - - column_count++; - } - - /* fail to find the partitionkey in column definition */ - if (column_count >= len) { - pfree_ext(is_exist); - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("undefined column\"%s\" is used as a partitioning column", partitonkey_name))); - } - - pos = lappend_int(pos, column_count); - column_count = 0; - partitonkey_name = NULL; - } - - pfree_ext(is_exist); - - return pos; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : check the partitionkey's datatype - * Description : - * Notes : - */ -static void CheckRangePartitionKeyType(FormData_pg_attribute* attrs, List* pos) -{ - int location = 0; - ListCell* cell = NULL; - Oid typoid = InvalidOid; - foreach (cell, pos) { - bool result = false; - - location = lfirst_int(cell); - typoid = attrs[location].atttypid; - /* check datatype for range partitionkey */ - result = CheckRangePartitionKeyType(typoid); - - if (!result) { - list_free_ext(pos); - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column %s cannot serve as a range partitioning column because of its datatype", - NameStr(attrs[location].attname)))); - } - } -} - -static void CheckListPartitionKeyType(FormData_pg_attribute* attrs, List* pos) -{ - if (pos == NULL || attrs == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid list partiiton table definition"))); - } - int location; - ListCell* cell = NULL; - - foreach (cell, pos) { - location = lfirst_int(cell); - if (!CheckListPartitionKeyType(attrs[location].atttypid)) { - list_free_ext(pos); - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column %s cannot serve as a list partitioning column because of its datatype", - NameStr(attrs[location].attname)))); - } - } -} - -static void CheckHashPartitionKeyType(FormData_pg_attribute* attrs, List* pos) -{ - int location = 0; - ListCell* cell = NULL; - Oid typeOid = InvalidOid; - - foreach (cell, pos) { - location = lfirst_int(cell); - typeOid = attrs[location].atttypid; - /* Check datatype for hash partitionkey */ - if (!CheckHashPartitionKeyType(typeOid)) { - list_free_ext(pos); - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column %s cannot serve as a hash partitioning column because of its datatype", - NameStr(attrs[location].attname)))); - } - } -} - -static void CheckIntervalPartitionKeyType(FormData_pg_attribute* attrs, List* pos) -{ - /* must be one partitionkey for interval partition, have checked before */ - Assert(pos->length == 1); - - ListCell* cell = list_head(pos); - int location = lfirst_int(cell); - Oid typoid = attrs[location].atttypid; - if (typoid != TIMESTAMPOID && typoid != TIMESTAMPTZOID && typoid != DATEOID) { - list_free_ext(pos); - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column %s cannot serve as a interval partitioning column because of its datatype", - NameStr(attrs[location].attname)))); - } -} - -static void CheckIntervalValue( - const FormData_pg_attribute* attrs, const List* pos, const IntervalPartitionDefState* intervalPartDef) -{ - /* must be one partitionkey for interval partition, have checked before */ - Assert(pos->length == 1); - - ListCell* cell = list_head(pos); - int location = lfirst_int(cell); - Oid typoid = attrs[location].atttypid; - if (typoid != DATEOID) { - return; - } - - int32 typmod = -1; - Interval* interval = NULL; - A_Const* node = reinterpret_cast(intervalPartDef->partInterval); - interval = char_to_interval(node->val.val.str, typmod); - if (interval->time != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("the interval of DATE type must be an integer multiple of days"))); - } - pfree(interval); -} - -/* - * @@GaussDB@@ - * Target : value-partition type check - * Brief : check the value partitionkey's datatype - * Description : - * Notes : - */ -void CheckValuePartitionKeyType(FormData_pg_attribute* attrs, List* pos) -{ - int location = 0; - ListCell* cell = NULL; - Oid typoid = InvalidOid; - - foreach (cell, pos) { - location = lfirst_int(cell); - typoid = attrs[location].atttypid; - /* - * Check datatype for partitionkey NOTE: currently we reuse distribution - * key's restriction as value-based parition is equal-evaluated we can't - * the same criteria with Range-Partition Key - */ - /* We just error-out first partition-column with invalid datatype */ - if (!(typoid == INT8OID || typoid == INT1OID || typoid == INT2OID || typoid == INT4OID || - typoid == NUMERICOID || typoid == CHAROID || typoid == BPCHAROID || typoid == VARCHAROID || - typoid == NVARCHAR2OID || typoid == DATEOID || typoid == TIMEOID || typoid == TIMESTAMPOID || - typoid == TIMESTAMPTZOID || typoid == INTERVALOID || typoid == TIMETZOID || - typoid == SMALLDATETIMEOID || typoid == TEXTOID)) { - list_free_ext(pos); - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" cannot be served as a value-partitioning column because of its datatype [%s]", - NameStr(attrs[location].attname), - format_type_with_typemod(attrs[location].atttypid, attrs[location].atttypmod)))); - } - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : check datatype for range partitionkey - * Description : - * Notes : - */ -static bool CheckRangePartitionKeyType(Oid typoid) -{ - bool result = true; - - switch (typoid) { - case INT2OID: - result = true; - break; - - case INT4OID: - result = true; - break; - - case INT8OID: - result = true; - break; - - case DATEOID: - result = true; - break; - - case TIMESTAMPOID: - result = true; - break; - - case TIMESTAMPTZOID: - result = true; - break; - - case NUMERICOID: - result = true; - break; - - case CLOBOID: - result = true; - break; - - case TEXTOID: - result = true; - break; - - case CHAROID: - result = true; - break; - - case BPCHAROID: - result = true; - break; - - case VARCHAROID: - result = true; - break; - - case NVARCHAR2OID: - result = true; - break; - - case FLOAT4OID: - result = true; - break; - - case FLOAT8OID: - result = true; - break; - - case NAMEOID: - result = true; - break; - - default: - result = false; - break; - } - - return result; -} - -static bool CheckListPartitionKeyType(Oid typoid) -{ - switch (typoid) { - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case NUMERICOID: - case CHAROID: - case VARCHAROID: - case BPCHAROID: - case NVARCHAR2OID: - case DATEOID: - case TIMESTAMPOID: - case TIMESTAMPTZOID: - return true; - default: - return false; - } -} - -static bool CheckHashPartitionKeyType(Oid typoid) -{ - switch (typoid) { - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case NUMERICOID: - case CHAROID: - case BPCHAROID: - case VARCHAROID: - case TEXTOID: - case DATEOID: - case TIMEOID: - case NVARCHAR2OID: - case TIMESTAMPOID: - case TIMESTAMPTZOID: - return true; - default: - return false; - } -} - - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : check tablespace permission for partition - * Description : - * Notes : maybe the owner of the partition is not the current user - */ -static void CheckPartitionTablespace(const char* spcname, Oid owner) -{ - Oid spcoid = InvalidOid; - - if (spcname == NULL || !OidIsValid(owner)) - return; - - spcoid = get_tablespace_oid(spcname, false); - - /* Check permissions except when using database's default */ - if (OidIsValid(spcoid) && u_sess->proc_cxt.MyDatabaseTableSpace != spcoid) { - AclResult aclresult; - - aclresult = pg_tablespace_aclcheck(spcoid, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, get_tablespace_name(spcoid)); - } - - /* In all cases disallow placing user relations in pg_global */ - if (spcoid == GLOBALTABLESPACE_OID) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("only shared relations can be placed in pg_global tablespace"))); - } -} - -Oid GetPartkeyExprType(Oid* target_oid, int* target_mod) -{ - *target_oid = INT8OID; - Relation typeRel = heap_open(TypeRelationId, RowExclusiveLock); - HeapTuple typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(*target_oid)); - Form_pg_type pgTypeForm = (Form_pg_type)GETSTRUCT(typeTuple); - *target_mod = pgTypeForm->typtypmod; - Oid typcollation = pgTypeForm->typcollation; - ReleaseSysCache(typeTuple); - heap_close(typeRel, RowExclusiveLock); - return typcollation; -} - -static void FillListPartitionValueList(List** result, RowExpr* row, const List* keyPos, FormData_pg_attribute* attrs, - int boundId) -{ - Const* targetExpr = NULL; - ListCell* keyCell = NULL; - ListCell* posCell = NULL; - - forboth (keyCell, row->args, posCell, keyPos) { - /* transform the const to target datatype */ - targetExpr = (Const*)GetTargetValue(&attrs[lfirst_int(posCell)], (Const*)lfirst(keyCell), false, false); - if (targetExpr == NULL) { - for (int i = 0; i < boundId; i++) { - list_free_ext(result[i]); - } - pfree_ext(result); - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("partition key value must be const or const-evaluable expression"))); - } - targetExpr->constcollid = attrs[lfirst_int(posCell)].attcollation; - result[boundId] = lappend(result[boundId], targetExpr); - } -} - -static List **GetListPartitionValueLists(const List *keyPos, FormData_pg_attribute *attrs, List *value, - bool partkeyIsFunc) -{ - Node* cell = NULL; - ListCell* valueCell = NULL; - int count = 0; - Const* targetExpr = NULL; - List** result = (List**)palloc0(value->length * sizeof(List*)); - - foreach(valueCell, value) { - cell = (Node*)lfirst(valueCell); - if (IsA(cell, RowExpr)) { /* Multi-keys partition boundary values */ - FillListPartitionValueList(result, (RowExpr*)cell, keyPos, attrs, count++); - continue; - } - /* del with maxvalue */ - if (((Const*)cell)->ismaxvalue) { - Const* max = makeNode(Const); - max->xpr.type = T_Const; - max->ismaxvalue = true; - result[count] = lappend(result[count], max); - count++; - continue; - } - - /* transform the const to target datatype */ - targetExpr = (Const*)GetTargetValue(&attrs[lfirst_int(keyPos->head)], (Const*)cell, false, partkeyIsFunc); - if (targetExpr == NULL) { - for (int i = 0; i < count; i++) { - list_free_ext(result[i]); - } - pfree_ext(result); - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("partition key value must be const or const-evaluable expression"))); - } - if (partkeyIsFunc) { - Oid target_oid = InvalidOid; - int target_mod = -1; - targetExpr->constcollid = GetPartkeyExprType(&target_oid, &target_mod); - } else { - targetExpr->constcollid = attrs[lfirst_int(keyPos->head)].attcollation; - } - result[count] = lappend(result[count], targetExpr); - count++; - } - - return result; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : transform the partition value as an arry - * Description : - * Notes : the invoker should free the arry - */ -Const* GetPartitionValue(List* pos, FormData_pg_attribute* attrs, List* value, bool isinterval, bool isPartition, bool partkeyIsFunc) -{ - Const* result = NULL; - Const* cell = NULL; - ListCell* pos_cell = NULL; - ListCell* value_cell = NULL; - int valuepos = 0; - int count = 0; - Const* target_expr = NULL; - - /* lack of partitionkey value */ - if (pos->length > value->length) { - list_free_ext(pos); - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("%s bound list contains too few elements", (isPartition ? "partition" : "distribution")))); - } - - if (pos->length < value->length) { - list_free_ext(pos); - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("%s bound list contains too many elements", (isPartition ? "partition" : "distribution")))); - } - - result = (Const*)palloc(pos->length * sizeof(Const)); - errno_t rc = memset_s(result, pos->length * sizeof(Const), 0, pos->length * sizeof(Const)); - securec_check(rc, "", ""); - - forboth(pos_cell, pos, value_cell, value) - { - valuepos = lfirst_int(pos_cell); - cell = (Const*)lfirst(value_cell); - - /* del with maxvalue */ - if (cell->ismaxvalue) { - result[count].xpr.type = T_Const; - result[count].ismaxvalue = cell->ismaxvalue; - - count++; - continue; - } - - /* transform the const to target datatype */ - target_expr = (Const*)GetTargetValue(&attrs[valuepos], cell, isinterval, partkeyIsFunc); - if (target_expr == NULL) { - pfree_ext(result); - list_free_ext(pos); - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("%s key value must be const or const-evaluable expression", - (isPartition ? "partition" : "distribution")))); - } - - result[count] = *target_expr; - if (partkeyIsFunc) { - Oid target_oid = InvalidOid; - int target_mod = -1; - result[count].constcollid = GetPartkeyExprType(&target_oid, &target_mod); - } else { - result[count].constcollid = attrs[valuepos].attcollation; - } - - count++; - } - - Assert(count == pos->length); - - return result; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -Node* GetTargetValue(Form_pg_attribute attrs, Const* src, bool isinterval, bool partkeyIsFunc) -{ - Oid target_oid = InvalidOid; - int target_mod = -1; - Node* expr = NULL; - Node* target_expr = NULL; - - Assert(src); - - /* transform the const to target datatype */ - if (partkeyIsFunc) { - GetPartkeyExprType(&target_oid, &target_mod); - } else if (!ConfirmTypeInfo(&target_oid, &target_mod, src, attrs, isinterval)) { - return NULL; - } - - expr = (Node*)coerce_to_target_type( - NULL, (Node*)src, exprType((Node*)src), target_oid, target_mod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); - if (expr == NULL) { - return NULL; - } - - /* convert source const's charset to target partkey's charset */ - if (!partkeyIsFunc && DB_IS_CMPT(B_FORMAT) && OidIsValid(attrs->attcollation)) { - assign_expr_collations(NULL, expr); - if (attrs->attcollation != exprCollation(expr)) { - int attcharset = get_valid_charset_by_collation(attrs->attcollation); - expr = coerce_to_target_charset(expr, attcharset, target_oid, target_mod, attrs->attcollation); - - Assert(expr != NULL); - if (!IsA(expr, Const)) { - expr = (Node*)evaluate_expr((Expr*)expr, target_oid, target_mod, attrs->attcollation); - } else if (attrs->attcollation != exprCollation(expr)) { - if (expr == (Node*)src) { - /* We are not sure where src comes from, avoid set src->constcollid directly. */ - expr = (Node*)copyObject((void*)src); - } - /* - * The expr is used to compute hash or compare it with the partition boundary. - * Set the correct collation to ensure the correctness of the partition pruning and routing. - */ - exprSetCollation(expr, attrs->attcollation); - } - } - } - - switch (nodeTag(expr)) { - /* do nothing for Const */ - case T_Const: - target_expr = expr; - break; - - /* get return value for function expression */ - case T_FuncExpr: { - FuncExpr* funcexpr = (FuncExpr*)expr; - expr = (Node*)evaluate_expr( - (Expr*)funcexpr, exprType((Node*)funcexpr), exprTypmod((Node*)funcexpr), funcexpr->funccollid); - if (T_Const == nodeTag((Node*)expr)) { - target_expr = expr; - } - } break; - - case T_RelabelType: { - expr = (Node*)(((RelabelType*)expr)->arg); - if (T_Const == nodeTag((Node*)expr)) { - target_expr = expr; - } - } break; - - default: - target_expr = NULL; - break; - } - - return target_expr; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static bool ConfirmTypeInfo(Oid* target_oid, int* target_mod, Const* src, Form_pg_attribute attrs, bool isinterval) -{ - Assert(src && target_oid && target_mod); - - *target_oid = attrs->atttypid; - *target_mod = attrs->atttypmod; - - if (isinterval) { - Oid srcid = src->consttype; - - if ((*target_oid == DATEOID) || (*target_oid == TIMESTAMPOID) || (*target_oid == TIMESTAMPTZOID)) { - if (srcid == INTERVALOID) { - *target_oid = INTERVALOID; - *target_mod = src->consttypmod; - } else if (srcid == UNKNOWNOID) { - *target_oid = INTERVALOID; - *target_mod = -1; - } else { - return false; - } - } - } - - return true; -} - -static void ReportListPartitionIntersect(const List* partitionList, List** values[], int partValueLen[], - int idxFirst, int idxSecond) -{ - for (int e = 0; e < partitionList->length; ++e) { - for (int f = 0; f < partValueLen[e]; ++f) { - list_free_ext(values[e][f]); - } - pfree_ext(values[e]); - } - int i = 0; - ListCell* cell; - if (idxFirst == idxSecond) { - foreach (cell, partitionList) { - if (i == idxFirst) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("list partition %s has overlapped value", - ((ListPartitionDefState*)lfirst(cell))->partitionName))); - } - ++i; - } - } else { - int bigIdx = (idxFirst >= idxSecond) ? idxFirst : idxSecond; - int smallIdx = (idxFirst >= idxSecond) ? idxSecond : idxFirst; - char* partNameFirst = NULL; - char* partNameSecond = NULL; - foreach (cell, partitionList) { - if (i == smallIdx) { - partNameFirst = ((ListPartitionDefState*)lfirst(cell))->partitionName; - } else if (i == bigIdx) { - partNameSecond = ((ListPartitionDefState*)lfirst(cell))->partitionName; - break; - } - ++i; - } - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("list partition %s and %s has overlapped value", partNameFirst, partNameSecond))); - } -} - -/* Each Const in RowExpr corresponds to a partition key column. Check length and datatype. */ -static void sqlcmd_check_list_partition_rowexpr_bound(RowExpr *bound, const List *key_pos, FormData_pg_attribute *attrs, - List *part_def_list, char* part_name) -{ - if (list_length(bound->args) != list_length(key_pos)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid partition values"), - errdetail("list partition values in \"%s\" does not match the number of partition keys", part_name))); - } - - ListCell* keyvalue_cell = NULL; - ListCell* keypos_cell = NULL; - /* check multi-keys partition boundary values datatype */ - forboth (keyvalue_cell, bound->args, keypos_cell, key_pos) { - Const* key_value = (Const*)lfirst(keyvalue_cell); - FormData_pg_attribute key_pg_attr = attrs[lfirst_int(keypos_cell)]; - if (!can_coerce_type(1, &key_value->consttype, &key_pg_attr.atttypid, COERCION_IMPLICIT)) { - list_free_ext(part_def_list); - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("list partition value in \"%s\" does not match datatype of partition key \"%s\"", - part_name, NameStr(key_pg_attr.attname)))); - } - } -} - -/* Check the boundary length and datatype of each list partition. */ -static void sqlcmd_check_list_partition_boundary(ListPartitionDefState *part_def, const List *key_pos, - FormData_pg_attribute *attrs, List *part_def_list, bool* has_default) -{ - List *boundaries = part_def->boundary; - char* part_name = part_def->partitionName; - Const* key_value_const = NULL; - Oid first_key_type = attrs[linitial_int(key_pos)].atttypid; - - foreach_cell(bound_cell, boundaries) { - Node* bound = (Node*)lfirst(bound_cell); - /* check multi-keys partition boundary values */ - if (IsA(bound, RowExpr)) { - sqlcmd_check_list_partition_rowexpr_bound((RowExpr*)bound, key_pos, attrs, part_def_list, part_name); - continue; - } - - key_value_const = (Const *)bound; - if (key_value_const->ismaxvalue) { - /* default partition boundary can only have one max Const */ - if (list_length(boundaries) != 1) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("The default partition can have only one boundary value."))); - } - /* Cannot have two default partition */ - if (*has_default) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Partition table has multiple default partitions"))); - } - *has_default = true; - continue; - } - /* check single-key partition */ - if (list_length(key_pos) != 1) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid partition values"), - errdetail("list partition values in \"%s\" does not match the number of partition keys", part_name))); - } - /* check single-key partition boundary value datatype */ - if (!can_coerce_type(1, &key_value_const->consttype, &first_key_type, COERCION_IMPLICIT)) { - list_free_ext(part_def_list); - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("list partition value in \"%s\" does not match datatype of partition key \"%s\"", - part_name, NameStr(attrs[linitial_int(key_pos)].attname)))); - } - } -} - -static void sqlcmd_check_list_partition_have_duplicate_values(List** key_values_array[], int part_idx, int bound_idx, - List *partition_list, int part_value_len[]) -{ - ListCell* c1 = NULL; - ListCell* c2 = NULL; - for (int k = 0; k < bound_idx; ++k) { - forboth (c1, key_values_array[part_idx][bound_idx], c2, key_values_array[part_idx][k]) { - if (ConstCompareWithNull((Const*)lfirst(c1), (Const*)lfirst(c2), ((Const*)lfirst(c2))->constcollid) != 0) { - break; - } - } - /* All key values are equal, the key value set is duplicate. */ - if (c1 == NULL) { - ReportListPartitionIntersect( - partition_list, key_values_array, part_value_len, part_idx, part_idx); - } - } -} - -static void sqlcmd_check_two_list_partition_values_overlapped(List** key_values_array[], int p1_idx, int b1_idx, - int p2_idx, int b2_idx, List *partition_list, int part_value_len[]) -{ - ListCell* c1 = NULL; - ListCell* c2 = NULL; - Const* con1 = NULL; - Const* con2 = NULL; - forboth (c1, key_values_array[p1_idx][b1_idx], c2, key_values_array[p2_idx][b2_idx]) { - con1 = (Const*)lfirst(c1); - con2 = (Const*)lfirst(c2); - if (con1->ismaxvalue || con2->ismaxvalue) { - Assert(!(con1->ismaxvalue && con2->ismaxvalue)); - break; - } - if (ConstCompareWithNull(con1, con2, con2->constcollid) != 0) { - break; - } - } - /* All key values are equal, the key value set is duplicate. */ - if (c1 == NULL) { - ReportListPartitionIntersect(partition_list, key_values_array, part_value_len, p2_idx, p1_idx); - } -} - -static void sqlcmd_check_list_value_overlapped_with_others(List** key_values_array[], int part_idx, int bound_idx, - List *partition_list, int part_value_len[]) -{ - for (int m = 0; m < part_idx; ++m) { - for (int n = 0; n < part_value_len[m]; ++n) { - sqlcmd_check_two_list_partition_values_overlapped( - key_values_array, part_idx, bound_idx, m, n, partition_list, part_value_len); - } - } -} - -void CompareListValue(const List* pos, FormData_pg_attribute* attrs, List *partitionList, bool partkeyIsFunc) -{ - if (pos == NULL || attrs == NULL) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid list partiiton table definition"))); - } - - List** values_array[partitionList->length]; // save all the list partion values. - int part_value_len[partitionList->length]; // save list length of each partitionList - int part_list_idx = 0; - bool has_default = false; - /* Check if datatype of values ars consistent with partition keys' */ - foreach_cell(cell, partitionList) { - ListPartitionDefState *part_def = (ListPartitionDefState*)lfirst(cell); - sqlcmd_check_list_partition_boundary(part_def, pos, attrs, partitionList, &has_default); - part_value_len[part_list_idx] = list_length(part_def->boundary); - values_array[part_list_idx] = GetListPartitionValueLists(pos, attrs, part_def->boundary, partkeyIsFunc); - ++part_list_idx; - } - - /* Check if list partition has intersect values */ - /* XXX need a better way to reduce time complexity */ - for (int i = 0; i < partitionList->length; ++i) { - for (int j = 0; j < part_value_len[i]; ++j) { - /* Check if value overlapped in same list */ - sqlcmd_check_list_partition_have_duplicate_values(values_array, i, j, partitionList, part_value_len); - /* Check if value overlapped in different list */ - sqlcmd_check_list_value_overlapped_with_others(values_array, i, j, partitionList, part_value_len); - } - } - for (int e = 0; e < partitionList->length; ++e) { - for (int f = 0; f < part_value_len[e]; ++f) { - list_free_ext(values_array[e][f]); - } - pfree_ext(values_array[e]); - } -} - - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -void ComparePartitionValue(List *pos, FormData_pg_attribute *attrs, List *partitionList, bool isPartition, - bool partkeyIsFunc) -{ - Const* pre_value = NULL; - Const* cur_value = NULL; - Const** pre = NULL; - Const** cur = NULL; - ListCell* cell = NULL; - List* value = NIL; - bool is_intreval = false; - int result = 0; - int counter = 0; - errno_t rc = EOK; - - if (pos == NULL || attrs == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg(isPartition ? "invalid range partition table definition" : - "invalid range distribution table definition"))); - } - /* no partitionvalues */ - if (!pos->length) { - list_free_ext(pos); - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg(isPartition ? "there is no partition key" : "there is no distribution key"))); - } - pre = (Const**)palloc0(pos->length * sizeof(Const*)); - cur = (Const**)palloc0(pos->length * sizeof(Const*)); - - foreach (cell, partitionList) { - value = ((RangePartitionDefState*)lfirst(cell))->boundary; - - if (pre_value == NULL) { - pre_value = GetPartitionValue(pos, attrs, value, is_intreval, isPartition, partkeyIsFunc); - for (counter = 0; counter < pos->length; counter++) { - pre[counter] = pre_value + counter; - } - } else { - cur_value = GetPartitionValue(pos, attrs, value, is_intreval, isPartition, partkeyIsFunc); - for (counter = 0; counter < pos->length; counter++) { - cur[counter] = cur_value + counter; - } - result = partitonKeyCompare(cur, pre, pos->length); - - /* compare partition value */ - if (result <= 0) { - pfree_ext(pre); - pfree_ext(cur); - pfree_ext(pre_value); - pfree_ext(cur_value); - list_free_ext(pos); - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg((isPartition ? "partition bound of partition \"%s\" is too low" : - "distribution bound of slice \"%s\" is too low"), - ((RangePartitionDefState*)lfirst(cell))->partitionName))); - } - - rc = memcpy_s(pre, pos->length * sizeof(Const*), cur, pos->length * sizeof(Const*)); - securec_check(rc, "\0", "\0"); - pfree_ext(pre_value); - pre_value = cur_value; - cur_value = NULL; - } - } - pfree_ext(pre); - pfree_ext(cur); - pfree_ext(pre_value); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATPrepAddPartition(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport( - ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not add partition against NON-PARTITIONED table"))); - } - - if (rel->partMap->type == PART_TYPE_INTERVAL) { - ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), - errmsg("can not add partition against interval partitioned table"))); - } -} - -static void ATPrepAddSubPartition(Relation rel) -{ - if (!RelationIsSubPartitioned(rel)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Wrong object type"), - errdetail("Can not add subpartition against NON-SUBPARTITIONED table"), - errcause("ADD SUBPARTITION works on a NON-SUBPARTITIONED table"), - erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); - } - - if (rel->partMap->type == PART_TYPE_INTERVAL) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Un-support feature"), - errdetail("Can not add subpartition against interval partitioned table"), - errcause("ADD SUBPARTITION works on an interval partitioned table"), - erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATPrepDropPartition(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Un-support feature"), - errdetail("Can not drop partition against NON-PARTITIONED table"), - errcause("DROP PARTITION works on a NON-PARTITIONED table"), - erraction("Please check DDL syntax for \"DROP PARTITION\""))); - } - - if (rel->partMap->type == PART_TYPE_HASH) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Droping hash partition is unsupported."))); - } -} - -static void ATPrepDropSubPartition(Relation rel) -{ - if (!RelationIsSubPartitioned(rel)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Un-support feature"), - errdetail("Can not drop subpartition against NON-SUBPARTITIONED table"), - errcause("DROP SUBPARTITION works on a NON-SUBPARTITIONED table"), - erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); - } - - char subparttype = PART_STRATEGY_INVALID; - RelationGetSubpartitionInfo(rel, &subparttype, NULL, NULL); - if (subparttype == PART_STRATEGY_HASH) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Un-support feature"), - errdetail("The syntax is unsupported for hash subpartition"), - errcause("Try DROP SUBPARTITION on a hash-subpartitioned table"), - erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); - } -} - -static void ATPrepUnusableIndexPartition(Relation rel) -{ - if (!RelationIsPartitioned(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not set unusable index partition against NON-PARTITIONED index"))); - } -} - -static void ATPrepUnusableAllIndexOnPartition(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not set all index unusable on one partition against NON-PARTITIONED table"))); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATPrepEnableRowMovement(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not enable row movement against NON-PARTITIONED table"))); - } -} -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATPrepDisableRowMovement(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not disable row movement against NON-PARTITIONED table"))); - } - if (RelationIsColStore(rel)) { - ereport(NOTICE, - (errmsg("disable row movement is invalid for column stored tables. They always enable row movement between " - "partitions."))); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATPrepTruncatePartition(Relation rel) -{ - AclResult aclresult; - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not truncate partition against NON-PARTITIONED table"))); - } - - aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), ACL_TRUNCATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, RelationGetRelationName(rel)); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATPrepTruncateSubPartition(Relation rel) -{ - AclResult aclresult; - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not truncate partition against NON-PARTITIONED table"))); - } - - aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), ACL_TRUNCATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, RelationGetRelationName(rel)); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : ALTER TABLE EXCHANGE PARTITION WITH TABLE - * Description : check change with table which is or not ordinary table - * Notes : - */ -static void ATPrepExchangePartition(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not exchange partition against NON-PARTITIONED table"))); - } - - if (RelationIsSubPartitioned(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, exchange partition is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } -} - -static void ATPrepMergePartition(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not merge partitions against NON-PARTITIONED table"))); - } - - if (RelationIsSubPartitioned(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, merge partitions is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - - if (rel->partMap->type == PART_TYPE_LIST || rel->partMap->type == PART_TYPE_HASH) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not merge LIST/HASH partition table"))); - } -} - -static void ATPrepSplitPartition(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not split partition against NON-PARTITIONED table"))); - } - - if (RelationIsSubPartitioned(rel)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, split partition is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - - if (rel->partMap->type == PART_TYPE_LIST || rel->partMap->type == PART_TYPE_HASH) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not split LIST/HASH partition table"))); - } -} - -static void ATPrepSplitSubPartition(Relation rel) -{ - if (!RelationIsSubPartitioned(rel)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not split subpartition against NON-SUBPARTITIONED table"))); - } -} - -static void ATPrepResetPartitionno(Relation rel) -{ - if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not reset partitionno against NON-PARTITIONED table"))); - } -} - -static void ATExecAddPartition(Relation rel, AddPartitionState *partState) -{ - Assert(RELATION_IS_PARTITIONED(rel)); - - int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id)); - Assert(PARTITIONNO_IS_VALID(partitionno)); - - ListCell* cell = NULL; - ListCell* subcell = NULL; - foreach (cell, partState->partitionList) { - partitionno++; - PartitionDefState* partitionDefState = (PartitionDefState*)lfirst(cell); - partitionDefState->partitionno = partitionno; - int subpartitionno = 0; - foreach(subcell, partitionDefState->subPartitionDefState) { - subpartitionno++; - PartitionDefState* subpartitionDefState = (PartitionDefState*)lfirst(subcell); - subpartitionDefState->partitionno = subpartitionno; - } - } - - if (rel->partMap->type == PART_TYPE_LIST) { - if (!IsA(linitial(partState->partitionList), ListPartitionDefState)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not add none-list partition to list partition table"))); - } - } else if (rel->partMap->type == PART_TYPE_HASH) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not add hash partition"))); - } else { - if (!IsA(linitial(partState->partitionList), RangePartitionDefState)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not add none-range partition to range partition table"))); - } - } - - ATExecAddPartitionInternal(rel, partState); - /* inplace update on partitioned table, because we can't cover the wait_clean_gpi info, which is inplace updated */ - UpdateCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id), -partitionno, true); -} - -/* check tablespace permission for add partition/subpartition */ -static void CheckTablespaceForAddPartition(Relation rel, List *partDefStateList) -{ - ListCell *cell = NULL; - foreach (cell, partDefStateList) { - PartitionDefState *partDef = (PartitionDefState*)lfirst(cell); - if (PointerIsValid(partDef->tablespacename)) { - CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner); - } - CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState); - } -} - -static void CheckPartitionNameConflictForAddPartition(List *newPartitionNameList, List *existingPartitionNameList) -{ - Assert(PointerIsValid(newPartitionNameList)); - Assert(PointerIsValid(existingPartitionNameList)); - - - ListCell *cell1 = NULL; - ListCell *cell2 = NULL; - ListCell *lc = NULL; - foreach (cell1, newPartitionNameList) { - char *newPartitionName1 = (char *)lfirst(cell1); - - /* 1. the newPartitionNameList should not be different itself */ - lc = cell1; - while ((lc = lnext(lc)) != NULL) { - char *newPartitionName2 = (char *)lfirst(lc); - if (strcmp(newPartitionName1, newPartitionName2) == 0) { - ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("The partition name \"%s\" is duplicated with another new partition name", - newPartitionName1), - errdetail("N/A"), - errcause("When ADD PARTITION/SUBPARTITION, one partition name is duplicated with another one"), - erraction("Check the syntax, and change the duplicated partition name"))); - } - } - - /* 2. the newPartitionNameList should not be different from existingPartitionNameList */ - foreach (cell2, existingPartitionNameList) { - char *existingPartitionName = (char *)lfirst(cell2); - if (strcmp(newPartitionName1, existingPartitionName) == 0) { - ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("The partition name \"%s\" is duplicated with existing partition name", newPartitionName1), - errdetail("N/A"), - errcause( - "When ADD PARTITION/SUBPARTITION, one partition name is duplicated with the existing name"), - erraction("Check the syntax, and change the duplicated partition name"))); - } - } - } -} - -/* - * This function is used to find an existing list partition by the new boundary. - * Used for adding a list partition syntax, for example: - * 'ADD PARTITION VALUES (listValueList)' or 'SPLIT PARTITION VALUES (expr_list)' - */ -static Oid FindPartOidByListBoundary(Relation rel, ListPartitionMap *partMap, Node* boundKey, bool partkeyIsFunc) -{ - List *partKeyValueList = NIL; - Oid res; - if (IsA(boundKey, RowExpr)) { /* Multi-keys partition boundary values */ - partKeyValueList = transformConstIntoTargetType( - rel->rd_att->attrs, partMap->partitionKey, ((RowExpr*)boundKey)->args); - res = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); - list_free_ext(partKeyValueList); - return res; - } - - Const* con = (Const*)boundKey; - FormData_pg_attribute attr = rel->rd_att->attrs[partMap->partitionKey->values[0] - 1]; - - if (con->ismaxvalue) { - /* - * DEFAULT boundary of a list partition has only one Const. - * So it cannot be used to PartitionValuesGetPartitionOid for multi-keys partition. - * Just return default partition Oid. - */ - for (int i = 0; i < partMap->listElementsNum; i++) { - ListPartElement *list = &partMap->listElements[i]; - if (list->boundary[0].values[0]->ismaxvalue) { - return list->partitionOid; - } - } - return InvalidOid; - } - con = (Const*)GetTargetValue(&attr, con, false, partkeyIsFunc); - if (!PointerIsValid(con)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("partition key value must be const or const-evaluable expression"))); - } - if (!OidIsValid(con->constcollid) && OidIsValid(attr.attcollation)) { - con->constcollid = attr.attcollation; - } - partKeyValueList = list_make1(con); - res = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); - list_free_ext(partKeyValueList); - return res; -} - -static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partDefState, bool partkeyIsFunc) -{ - Assert(IsA(partDefState, RangePartitionDefState) || IsA(partDefState, ListPartitionDefState)); - - ListCell *cell = NULL; - List *partKeyValueList = NIL; - Oid existingPartOid = InvalidOid; - - incre_partmap_refcount(rel->partMap); - - int partNum = getNumberOfPartitions(rel); - if (nodeTag(partDefState) == T_RangePartitionDefState) { - RangePartitionDefState *partDef = (RangePartitionDefState *)partDefState; - RangePartitionMap *partMap = (RangePartitionMap *)rel->partMap; - Const *curBound = (Const *)copyObject(partMap->rangeElements[partNum - 1].boundary[0]); - Const *val = partDef->curStartVal; - if (!curBound->ismaxvalue && val != NULL && partitonKeyCompare(&curBound, &val, 1) != 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.", - partDef->partitionInitName ? partDef->partitionInitName : partDef->partitionName))); - } - partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, - partMap->partitionKey, partDef->boundary, partkeyIsFunc); - pfree_ext(curBound); - existingPartOid = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); - list_free_ext(partKeyValueList); - if (OidIsValid(existingPartOid)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("upper boundary of adding partition MUST overtop last existing partition"))); - } - } else { - ListPartitionDefState *partDef = (ListPartitionDefState *)partDefState; - - foreach (cell, partDef->boundary) { - existingPartOid = FindPartOidByListBoundary(rel, (ListPartitionMap *)rel->partMap, (Node*)lfirst(cell), partkeyIsFunc); - if (OidIsValid(existingPartOid)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("list boundary of adding partition MUST NOT overlap with existing partition"))); - } - } - } - - decre_partmap_refcount(rel->partMap); -} - -bool IsPartKeyFunc(Relation rel, bool isPartRel, bool forSubPartition, PartitionExprKeyInfo* partExprKeyInfo) -{ - HeapTuple partTuple = NULL; - if (forSubPartition) { - if (isPartRel) { - partTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(rel->rd_id)); - } else { - PartitionMap* map = rel->partMap; - Oid partitionId = InvalidOid; - if (map->type == PART_TYPE_LIST) { - partitionId = ((ListPartitionMap*)map)->listElements[0].partitionOid; - } else if (map->type == PART_TYPE_HASH) { - partitionId = ((HashPartitionMap*)map)->hashElements[0].partitionOid; - } else { - partitionId = ((RangePartitionMap*)map)->rangeElements[0].partitionOid; - } - partTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partitionId)); - } - } else { - partTuple = searchPgPartitionByParentIdCopy(PART_OBJ_TYPE_PARTED_TABLE, rel->rd_id); - } - if (!partTuple) - ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("The partTuple for oid %d can't be found", rel->rd_id))); - - bool isNull = false; - Datum datum = 0; - datum = SysCacheGetAttr(PARTRELID, partTuple, Anum_pg_partition_partkeyexpr, &isNull); - if (partExprKeyInfo) - partExprKeyInfo->partkeyexprIsNull = isNull; - if (isNull) { - if (forSubPartition) - ReleaseSysCache(partTuple); - else - heap_freetuple(partTuple); - return false; - } - - char* partKeyStr = TextDatumGetCString(datum); - Node* partkeyexpr = NULL; - if (forSubPartition) - ReleaseSysCache(partTuple); - else - heap_freetuple(partTuple); - - partkeyexpr = (Node*)stringToNode_skip_extern_fields(partKeyStr); - if (!partExprKeyInfo) - pfree_ext(partKeyStr); - else - partExprKeyInfo->partExprKeyStr = partKeyStr; - - if (!partkeyexpr) - ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("The partkeyexpr can't be NULL"))); - - if (partkeyexpr->type == T_OpExpr) - return false; - else if (partkeyexpr->type == T_FuncExpr) - return true; - else - ereport(ERROR, - (errcode(ERRCODE_NODE_ID_MISSMATCH), - errmsg("The node type %d is wrong, it must be T_OpExpr or T_FuncExpr", partkeyexpr->type))); - /* suppress warning: -Wreturn-type */ - return false; -} - -static void CheckSubpartitionForAddPartition(Relation rel, Node *partDefState) -{ - Assert(IsA(partDefState, RangePartitionDefState) || IsA(partDefState, ListPartitionDefState)); - - List *subPartitionDefStateList = NIL; - if (nodeTag(partDefState) == T_RangePartitionDefState) { - RangePartitionDefState *partDef= (RangePartitionDefState*)partDefState; - subPartitionDefStateList = partDef->subPartitionDefState; - } else { - ListPartitionDefState *partDef= (ListPartitionDefState*)partDefState; - subPartitionDefStateList = partDef->subPartitionDefState; - } - - if (subPartitionDefStateList == NIL) { - return; - } - - /* a. only can be subpartition table */ - if (!RelationIsSubPartitioned(rel)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Un-support feature"), - errdetail("Can not add subpartition against NON-SUBPARTITIONED table"), - errcause("ADD SUBPARTITION works on a NON-SUBPARTITIONED table"), - erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); - } - - /* get the nessary subpartition info */ - char subparttype = PART_STRATEGY_INVALID; - List *subpartKeyPosList = NIL; - RelationGetSubpartitionInfo(rel, &subparttype, &subpartKeyPosList, NULL); - - /* b. check the subpartition type is same as the relation itself */ - NodeTag subparttypeTag = GetPartitionStateType(subparttype); - ListCell* subcell = NULL; - foreach (subcell, subPartitionDefStateList) { - Node *subPartitionDefState = (Node *)lfirst(subcell); - if (nodeTag(subPartitionDefState) != subparttypeTag) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("The syntax format of subpartition is incorrect, the declaration and " - "definition of the subpartition do not match."), - errdetail("The syntax format of subpartition %s is incorrect.", - GetPartitionDefStateName(subPartitionDefState)), - errcause("The declaration and definition of the subpartition do not match."), - erraction("Consistent declaration and definition of subpartition."))); - } - } - - bool partkeyIsFunc = IsPartKeyFunc(rel, false, true); - /* c. subpartition values constraint */ - switch (subparttype) { - case PART_STRATEGY_RANGE: - ComparePartitionValue(subpartKeyPosList, (RelationGetDescr(rel))->attrs, subPartitionDefStateList, true, partkeyIsFunc); - break; - case PART_STRATEGY_LIST: - CompareListValue(subpartKeyPosList, (RelationGetDescr(rel))->attrs, subPartitionDefStateList, partkeyIsFunc); - break; - case PART_STRATEGY_HASH: - break; - default: - ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Only support RANGE/LIST/HASH for subpartition table"), - errdetail("N/A"), errcause("The partition type is incorrect."), - erraction("Use the correct partition type."))); - } - - list_free_ext(subpartKeyPosList); -} - -static void CheckForAddPartition(Relation rel, List *partDefStateList) -{ - Assert(RELATION_IS_PARTITIONED(rel)); - - /* check 1: tablespace privileges */ - CheckTablespaceForAddPartition(rel, partDefStateList); - - /* check 2: can not add more partition, because more enough */ - List* newPartitionNameList = GetPartitionNameList(partDefStateList); - List* existingPartitionNameList = RelationGetPartitionNameList(rel); - int npart = list_length(newPartitionNameList); - int nnewpart = list_length(existingPartitionNameList); - if ((npart + nnewpart) > MAX_PARTITION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("too many partitions for partitioned table"), - errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM))); - } - - /* check 3: name conflict check */ - CheckPartitionNameConflictForAddPartition(newPartitionNameList, existingPartitionNameList); - list_free_deep(existingPartitionNameList); - /* don't free the cell of newPartitionNameList */ - list_free_ext(newPartitionNameList); - - /* check 4: partition values constraint */ - bool partkeyIsFunc = IsPartKeyFunc(rel, false, false); - int2vector *partitionKey = GetPartitionKey(rel->partMap); - List *partKeyPosList = NIL; - for (int i = 0; i < partitionKey->dim1; i++) { - partKeyPosList = lappend_int(partKeyPosList, partitionKey->values[i] - 1); - } - if (rel->partMap->type == PART_TYPE_RANGE) { - ComparePartitionValue(partKeyPosList, (RelationGetDescr(rel))->attrs, partDefStateList, true, partkeyIsFunc); - } else if (rel->partMap->type == PART_TYPE_LIST) { - CompareListValue(partKeyPosList, (RelationGetDescr(rel))->attrs, partDefStateList, partkeyIsFunc); - } - list_free_ext(partKeyPosList); - - ListCell *cell = NULL; - foreach (cell, partDefStateList) { - /* check 5: new adding partitions behind the last partition */ - CheckPartitionValueConflictForAddPartition(rel, (Node*)lfirst(cell), partkeyIsFunc); - - /* check 6: constraint for subpartition */ - CheckSubpartitionForAddPartition(rel, (Node*)lfirst(cell)); - } - - /* check 7: whether has the unusable local index */ - if (!checkRelationLocalIndexesUsable(rel)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't add partition bacause the relation %s has unusable local index", - NameStr(rel->rd_rel->relname)), - errhint("please reindex the unusable index first."))); - } -} - -static void CheckForAddSubPartition(Relation rel, Relation partrel, List *subpartDefStateList) -{ - Assert(RELATION_IS_PARTITIONED(rel)); - Assert(RELATION_IS_PARTITIONED(partrel)); - - /* check 1: tablespace privileges */ - CheckTablespaceForAddPartition(rel, subpartDefStateList); - - /* check 2: can not add more partition, because more enough */ - List* newPartitionNameList = GetPartitionNameList(subpartDefStateList); - List* existingPartitionNameList = RelationGetPartitionNameList(rel); - int npart = list_length(newPartitionNameList); - int nnewpart = list_length(existingPartitionNameList); - if ((npart + nnewpart) > MAX_PARTITION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("too many partitions for partitioned table"), - errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM))); - } - - /* check 3: name conflict check */ - CheckPartitionNameConflictForAddPartition(newPartitionNameList, existingPartitionNameList); - list_free_deep(existingPartitionNameList); - /* don't free the cell of newPartitionNameList */ - list_free_ext(newPartitionNameList); - - /* check 4: partition values constraint */ - bool partkeyIsFunc = IsPartKeyFunc(partrel, true, true); - int2vector *partitionKey = GetPartitionKey(partrel->partMap); - List *partKeyPosList = NIL; - for (int i = 0; i < partitionKey->dim1; i++) { - partKeyPosList = lappend_int(partKeyPosList, partitionKey->values[i] - 1); - } - if (partrel->partMap->type == PART_TYPE_RANGE) { - ComparePartitionValue(partKeyPosList, (RelationGetDescr(partrel))->attrs, subpartDefStateList, true, partkeyIsFunc); - } else if (partrel->partMap->type == PART_TYPE_LIST) { - CompareListValue(partKeyPosList, (RelationGetDescr(partrel))->attrs, subpartDefStateList, partkeyIsFunc); - } - list_free_ext(partKeyPosList); - - ListCell *cell = NULL; - foreach (cell, subpartDefStateList) { - /* check 5: new adding partitions behind the last partition */ - CheckPartitionValueConflictForAddPartition(partrel, (Node*)lfirst(cell), partkeyIsFunc); - } - - /* check 6: whether has the unusable local index */ - if (!checkRelationLocalIndexesUsable(rel)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't add partition bacause the relation %s has unusable local index", - NameStr(rel->rd_rel->relname)), - errhint("please reindex the unusable index first."))); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecAddPartitionInternal(Relation rel, AddPartitionState *partState) -{ - Relation pgPartRel = NULL; - Oid newPartOid = InvalidOid; - List *newSubpartOidList = NIL; - Datum new_reloptions; - Datum rel_reloptions; - HeapTuple tuple; - bool isnull = false; - List* old_reloptions = NIL; - ListCell* cell = NULL; - Oid bucketOid; - Relation parentrel = NULL; - char subparttype = PART_STRATEGY_INVALID; - int2vector *subpartitionKey = NULL; - PartitionDefState* partDef = NULL; - - /* if the relation is a partrel of a subpartition, here we get the relation first */ - if (RelationIsPartitionOfSubPartitionTable(rel)) { - /* the lock of parentrel has been obtained already, seen in ATExecAddSubPartition */ - parentrel = heap_open(rel->parentId, NoLock); - } - - /* step 1: Check before the actual work */ - if (RelationIsPartitionOfSubPartitionTable(rel)) { - CheckForAddSubPartition(parentrel, rel, partState->partitionList); - } else { - CheckForAddPartition(rel, partState->partitionList); - } - - bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(rel); - bool *isTimestamptzForSubPartKey = NULL; - if (RelationIsSubPartitioned(rel)) { - List *subpartKeyPosList = NIL; - RelationGetSubpartitionInfo(rel, &subparttype, &subpartKeyPosList, &subpartitionKey); - isTimestamptzForSubPartKey = CheckSubPartkeyHasTimestampwithzone(rel, subpartKeyPosList); - list_free_ext(subpartKeyPosList); - } - - pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); - - /* step 2: add new partition entry in pg_partition */ - /* TRANSFORM into target first */ - Oid relOid = - RelationIsPartitionOfSubPartitionTable(rel) ? ObjectIdGetDatum(rel->parentId) : ObjectIdGetDatum(rel->rd_id); - tuple = SearchSysCache1(RELOID, relOid); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for relation %u", relOid), errdetail("N/A"), - errcause("System error."), erraction("Contact engineer to support."))); - rel_reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); - - old_reloptions = untransformRelOptions(rel_reloptions); - RemoveRedisRelOptionsFromList(&old_reloptions); - new_reloptions = transformRelOptions((Datum)0, old_reloptions, NULL, NULL, false, false); - ReleaseSysCache(tuple); - - if (old_reloptions != NIL) - list_free_ext(old_reloptions); - - bucketOid = RelationGetBucketOid(rel); - - PartitionExprKeyInfo partExprKeyInfo = PartitionExprKeyInfo(); - partExprKeyInfo.partkeyIsFunc = IsPartKeyFunc(rel, false, true, &partExprKeyInfo); - - List *partitionNameList = - list_concat(GetPartitionNameList(partState->partitionList), RelationGetPartitionNameList(rel)); - foreach (cell, partState->partitionList) { - partDef = (PartitionDefState*)lfirst(cell); - - PartitionState *partitionState = makeNode(PartitionState); - partitionState->partitionStrategy = PartitionMapIsRange(rel) ? PART_STRATEGY_RANGE : PART_STRATEGY_LIST; - partitionState->partitionNameList = partitionNameList; - if (RelationIsSubPartitioned(rel)) { - partitionState->subPartitionState = makeNode(PartitionState); - partitionState->subPartitionState->partitionStrategy = subparttype; - if (partDef->subPartitionDefState == NIL) { - Node *subPartitionDefState = MakeDefaultSubpartition(partitionState, (PartitionDefState *)partDef); - partDef->subPartitionDefState = lappend(partDef->subPartitionDefState, subPartitionDefState); - } - } - - if (rel->partMap->type == PART_TYPE_LIST) { - newPartOid = HeapAddListPartition(pgPartRel, - rel->rd_id, - rel->rd_rel->reltablespace, - bucketOid, - (ListPartitionDefState *)partDef, - rel->rd_rel->relowner, - (Datum)new_reloptions, - isTimestamptz, - RelationGetStorageType(rel), - subpartitionKey, - RelationIsPartitionOfSubPartitionTable(rel), - &partExprKeyInfo); - } else { - newPartOid = heapAddRangePartition(pgPartRel, - rel->rd_id, - rel->rd_rel->reltablespace, - bucketOid, - (RangePartitionDefState *)partDef, - rel->rd_rel->relowner, - (Datum)new_reloptions, - isTimestamptz, - RelationGetStorageType(rel), - AccessExclusiveLock, - subpartitionKey, - RelationIsPartitionOfSubPartitionTable(rel), - &partExprKeyInfo); - } - - Oid partTablespaceOid = - GetPartTablespaceOidForSubpartition(rel->rd_rel->reltablespace, partDef->tablespacename); - newSubpartOidList = addNewSubPartitionTuplesForPartition(pgPartRel, - newPartOid, - partTablespaceOid, - bucketOid, - rel->rd_rel->relowner, - (Datum)new_reloptions, - isTimestamptzForSubPartKey, - RelationGetStorageType(rel), - partitionState, - (Node *)partDef, - AccessExclusiveLock); - - /* step 3: no need to update number of partitions in pg_partition */ - /* - * We must bump the command counter to make the newly-created partition - * tuple visible for opening. - */ - CommandCounterIncrement(); - - if (RelationIsColStore(rel)) { - addCudescTableForNewPartition(rel, newPartOid); - addDeltaTableForNewPartition(rel, newPartOid); - } - - if (RelationIsPartitionOfSubPartitionTable(rel)) { - addIndexForPartition(parentrel, newPartOid); - addToastTableForNewPartition(rel, newPartOid, true); - } else if (RelationIsSubPartitioned(rel)) { - Assert(newSubpartOidList != NIL); - Partition part = partitionOpen(rel, newPartOid, AccessExclusiveLock); - Relation partrel = partitionGetRelation(rel, part); - ListCell* lc = NULL; - foreach (lc, newSubpartOidList) { - Oid subpartOid = lfirst_oid(lc); - addIndexForPartition(rel, subpartOid); - addToastTableForNewPartition(partrel, subpartOid, true); - } - releaseDummyRelation(&partrel); - partitionClose(rel, part, NoLock); - } else { - addIndexForPartition(rel, newPartOid); - addToastTableForNewPartition(rel, newPartOid); - } - - /* step 4: invalidate relation */ - if (RelationIsPartitionOfSubPartitionTable(rel)) { - CacheInvalidateRelcache(parentrel); - CacheInvalidatePartcacheByPartid(rel->rd_id); - } else { - CacheInvalidateRelcache(rel); - } - pfree_ext(partitionState->subPartitionState); - pfree_ext(partitionState); - } - - /* close relation, done */ - relation_close(pgPartRel, NoLock); - pfree_ext(isTimestamptz); - pfree_ext(isTimestamptzForSubPartKey); - list_free_ext(partitionNameList); - - if (RelationIsPartitionOfSubPartitionTable(rel)) { - heap_close(parentrel, NoLock); - } -} - -static void ATExecAddSubPartition(Relation rel, AddSubPartitionState *subpartState) -{ - Assert(PointerIsValid(subpartState->partitionName)); - Assert(RelationIsSubPartitioned(rel)); - - /* get partoid and lock partition */ - Oid partOid = PartitionNameGetPartitionOid(rel->rd_id, - subpartState->partitionName, - PART_OBJ_TYPE_TABLE_PARTITION, - ShareUpdateExclusiveLock, - false, - false, - NULL, - NULL, - NoLock); - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partrel = partitionGetRelation(rel, part); - - AddPartitionState* partState = makeNode(AddPartitionState); - partState->partitionList = subpartState->subPartitionList; - - int subpartitionno = -GetCurrentSubPartitionNo(partOid); - Assert(PARTITIONNO_IS_VALID(subpartitionno)); - ListCell* cell = NULL; - foreach (cell, partState->partitionList) { - subpartitionno++; - PartitionDefState* partitionDefState = (PartitionDefState*)lfirst(cell); - partitionDefState->partitionno = subpartitionno; - } - - if (partrel->partMap->type == PART_TYPE_LIST) { - if (!IsA(linitial(partState->partitionList), ListPartitionDefState)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not add none-list subpartition to list subpartition table"))); - } - } else if (partrel->partMap->type == PART_TYPE_HASH) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not add hash subpartition"))); - } else { - if (!IsA(linitial(partState->partitionList), RangePartitionDefState)) { - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not add none-range subpartition to range subpartition table"))); - } - } - - ATExecAddPartitionInternal(partrel, partState); - UpdateCurrentSubPartitionNo(partOid, -subpartitionno); - - releaseDummyRelation(&partrel); - partitionClose(rel, part, NoLock); - pfree_ext(partState); -} - -/* Assume the caller has already hold RowExclusiveLock on the pg_partition. */ -static void UpdateIntervalPartToRange(Relation relPartition, Oid partOid, const char* stmt) -{ - bool dirty = false; - /* Fetch a copy of the tuple to scribble on */ - HeapTuple parttup = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(partOid)); - if (!HeapTupleIsValid(parttup)) { - ereport(ERROR, - (errcode(ERRCODE_SQL_ROUTINE_EXCEPTION), - errmsg("pg_partition entry for partid %u vanished during %s.", partOid, stmt))); - } - Form_pg_partition partform = (Form_pg_partition)GETSTRUCT(parttup); - - /* Apply required updates, if any, to copied tuple */ - if (partform->partstrategy == PART_STRATEGY_INTERVAL) { - partform->partstrategy = PART_STRATEGY_RANGE; - dirty = true; - } else { - ereport(LOG, - (errcode(ERRCODE_SQL_ROUTINE_EXCEPTION), - errmsg("pg_partition entry for partid %u is not a interval " - "partition when execute %s .", - partOid, - stmt))); - } - - /* If anything changed, write out the tuple. */ - if (dirty) { - simple_heap_update(relPartition, &parttup->t_self, parttup); - CatalogUpdateIndexes(relPartition, parttup); - } - tableam_tops_free_tuple(parttup); -} - -/* assume caller already hold AccessExclusiveLock on the partition being dropped - * if the intervalPartOid is not InvalidOid, the interval partition which is specificed by it - * need to be changed to normal range partition. - */ -void fastDropPartition(Relation rel, Oid partOid, const char* stmt, Oid intervalPartOid, bool sendInvalid) -{ - Partition part = NULL; - Relation pg_partition = NULL; - char* partName = NULL; - pg_partition = relation_open(PartitionRelationId, RowExclusiveLock); - - /* step 2: drop the targeting partition entry in pg_partition */ - part = partitionOpenWithRetry(rel, partOid, AccessExclusiveLock, stmt); - if (!part) { - ereport(ERROR, - (errcode(ERRCODE_LOCK_WAIT_TIMEOUT), - errmsg("could not acquire AccessExclusiveLock on dest table partition \"%s\", %s failed", - getPartitionName(partOid, false), stmt))); - } - partName = pstrdup(PartitionGetPartitionName(part)); - /* drop toast table, index, and finally the partition iteselt */ - dropIndexForPartition(partOid); - dropToastTableOnPartition(partOid); - if (RelationIsColStore(rel)) { - dropCuDescTableOnPartition(partOid); - dropDeltaTableOnPartition(partOid); - } - heapDropPartition(rel, part); - if (intervalPartOid) { - UpdateIntervalPartToRange(pg_partition, intervalPartOid, stmt); - } - ereport(LOG, - (errmsg("Partition %s(%d) has been fast dropped.", partName, partOid))); - pfree_ext(partName); - - /* step 3: no need to update number of partitions in pg_partition */ - /* step 4: invalidate relation */ - /* if the relation is a partrel, we send invalid message to the partition itself, - * else we send invalid message to the relation */ - if (sendInvalid) { - if (RelationIsPartitionOfSubPartitionTable(rel)) { - CacheInvalidatePartcacheByPartid(rel->rd_id); - } else { - CacheInvalidateRelcache(rel); - } - } - - - /* make the dropping partition invisible, fresh partition map for the new partition */ - relation_close(pg_partition, RowExclusiveLock); - - /* - * We must bump the command counter to make the newly-droped partition - * tuple visible. - */ - CommandCounterIncrement(); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecDropPartition(Relation rel, AlterTableCmd *cmd) -{ - Assert(RELATION_IS_PARTITIONED(rel)); - - Oid partOid = InvalidOid; - List *subpartOidList = NIL; - Oid subpartOid = InvalidOid; - ListCell* cell = NULL; - Partition part = NULL; - Relation partrel = NULL; - - /* getting the dropping partition's oid, and lock partition */ - partOid = GetPartOidByATcmd(rel, cmd, "DROP PARTITION"); - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(rel)) { - LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - /* check 1: check validity of partition oid */ - if (!OidIsValid(partOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range"))); - } - - /* get subpartOidList if is subpartition, and lock subpartition */ - if (RelationIsSubPartitioned(rel)) { - part = partitionOpen(rel, partOid, NoLock); - partrel = partitionGetRelation(rel, part); - subpartOidList = relationGetPartitionOidList(partrel); - foreach (cell, subpartOidList) { - subpartOid = lfirst_oid(cell); - LockPartitionOid(partOid, subpartOid, AccessExclusiveLock); - } - } - - /* check 2: can not drop the last existing partition */ - if (getNumberOfPartitions(rel) == 1) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), errmsg("Cannot drop the only partition of a partitioned table"))); - } - if (RelationIsSubPartitioned(rel)) { - foreach (cell, subpartOidList) { - subpartOid = DatumGetObjectId(lfirst(cell)); - AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subpartOid); - } - } else { - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid); - } - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(rel); - } - - Oid changeToRangePartOid = InvalidOid; - if (rel->partMap->type != PART_TYPE_LIST && rel->partMap->type != PART_TYPE_HASH) { -#ifdef ENABLE_MULTIPLE_NODES - if (unlikely(RelationIsTsStore(rel) && OidIsValid(RelationGetDeltaRelId(rel))) && IS_PGXC_DATANODE) { - Tsdb::DeleteDeltaByPartition(GetActiveSnapshot(), rel, partOid); - } -#endif /* ENABLE_MULTIPLE_NODES */ - changeToRangePartOid = GetNeedDegradToRangePartOid(rel, partOid); - } - - if (RelationIsSubPartitioned(rel)) { - foreach (cell, subpartOidList) { - subpartOid = DatumGetObjectId(lfirst(cell)); - /* we don't send invalid message to the partition here, as the partition will be dropped soon */ - fastDropPartition(partrel, subpartOid, "DROP SUBPARTITION", InvalidOid, false); - } - releasePartitionOidList(&subpartOidList); - releaseDummyRelation(&partrel); - partitionClose(rel, part, NoLock); - fastDropPartition(rel, partOid, "DROP PARTITION"); - } else { - fastDropPartition(rel, partOid, "DROP PARTITION", changeToRangePartOid); - } -} - -static void ATExecDropSubPartition(Relation rel, AlterTableCmd *cmd) -{ - Assert(RelationIsSubPartitioned(rel)); - - Oid partOid = InvalidOid; - Oid subpartOid = InvalidOid; - - /* getting the dropping subpartition's oid, and lock subpartition */ - subpartOid = GetSubpartOidByATcmd(rel, cmd, &partOid, "DROP SUBPARTITION"); - - /* check 1: check validity of partition oid */ - if (!OidIsValid(partOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("The partition which owns the subpartition is missing"), - errdetail("N/A"), - errcause("Maybe the subpartition table is dropped"), - erraction("Check system table 'pg_partition' for more information"))); - } - if (!OidIsValid(subpartOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("The subpartition number is invalid or out-of-range"), - errdetail("N/A"), - errcause("Wrong or invalid value for DROP SUBPARTITION"), - erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); - } - - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partrel = partitionGetRelation(rel, part); - - /* check 2: can not drop the last existing subpartition */ - if (getNumberOfPartitions(partrel) == 1) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Cannot drop the only subpartition of a partitioned table"), - errdetail("N/A"), - errcause("DROP SUBPARTITION works on the partition which has only one subpartition"), - erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); - } - AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subpartOid); - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(rel); - } - - fastDropPartition(partrel, subpartOid, "DROP SUBPARTITION"); - releaseDummyRelation(&partrel); - partitionClose(rel, part, NoLock); -} - -static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *command) -{ - Oid partOid = InvalidOid; - - /* FIRST IS the PARTITION (partname) branch */ - if (PointerIsValid(cmd->name)) { - partOid = PartitionNameGetPartitionOid(rel->rd_id, - cmd->name, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - false, - false, - NULL, - NULL, - NoLock); - return partOid; - } - - /* next IS the PARTITION FOR (partvalue) branch */ - RangePartitionDefState *rangePartDef = (RangePartitionDefState*)cmd->def; - switch (rel->partMap->type) { - case PART_TYPE_RANGE: - case PART_TYPE_INTERVAL: - rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, - rangePartDef->boundary); - break; - case PART_TYPE_LIST: - rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((ListPartitionMap*)rel->partMap)->partitionKey, - rangePartDef->boundary); - break; - case PART_TYPE_HASH: - rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((HashPartitionMap*)rel->partMap)->partitionKey, - rangePartDef->boundary); - break; - default: - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Unknown partitioned type"), - errdetail("This parttype is not supported for %s", command), - errcause("Only range/list/hash/interval partitioned table is supported for %s", command), - erraction("Please check DDL syntax for \"%s\"", command))); - } - partOid = PartitionValuesGetPartitionOid(rel, - rangePartDef->boundary, - AccessExclusiveLock, - true, - true, /* will check validity of partition oid next step */ - false); - return partOid; -} - -static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid, const char *command) -{ - Oid subpartOid = InvalidOid; - - /* FIRST IS the SUBPARTITION (subpartname) branch */ - if (PointerIsValid(cmd->name)) { - subpartOid = SubPartitionNameGetSubPartitionOid(rel->rd_id, - cmd->name, - ShareUpdateExclusiveLock, /* partition lock */ - AccessExclusiveLock, /* subpartition lock */ - false, - false, - NULL, - NULL, - NoLock, - partOid); - return subpartOid; - } - - /* next IS the SUBPARTITION FOR (subpartvalue) branch */ - int2vector *subpartitionKey = NULL; - RelationGetSubpartitionInfo(rel, NULL, NULL, &subpartitionKey); - - RangePartitionDefState *rangePartDef = (RangePartitionDefState*)cmd->def; - int2vector *partitionKey = NULL; - switch (rel->partMap->type) { - case PART_TYPE_RANGE: - partitionKey = ((RangePartitionMap*)rel->partMap)->partitionKey; - break; - case PART_TYPE_LIST: - partitionKey = ((ListPartitionMap*)rel->partMap)->partitionKey; - break; - case PART_TYPE_HASH: - partitionKey = ((HashPartitionMap*)rel->partMap)->partitionKey; - break; - default: - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Unknown partitioned type"), - errdetail("This parttype is not supported for %s", command), - errcause("Only range/list/hash partitioned table is supported for %s", command), - erraction("Please check DDL syntax for \"%s\"", command))); - } - if (list_length(rangePartDef->boundary) != (partitionKey->dim1 + subpartitionKey->dim1)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - (errmsg("Number of boundary items NOT EQUAL to number of partition keys"), - errdetail("There must be %d boundary items for %s in a subpartitioned table", - partitionKey->dim1 + subpartitionKey->dim1, command), - errcause("N/A"), erraction("Check whether the SQL statements is correct.")))); - } - List *partBoundary = NIL; - List *subpartBoundary = NIL; - ListCell *cell = list_head(rangePartDef->boundary); - for (int i = 0; i < list_length(rangePartDef->boundary); i++) { - if (i < partitionKey->dim1) { - partBoundary = lappend(partBoundary, lfirst(cell)); - } else { - subpartBoundary = lappend(subpartBoundary, lfirst(cell)); - } - cell = lnext(cell); - } - partBoundary = transformConstIntoTargetType(rel->rd_att->attrs, - partitionKey, - partBoundary); - subpartBoundary = transformConstIntoTargetType(rel->rd_att->attrs, - subpartitionKey, - subpartBoundary); - - subpartOid = SubPartitionValuesGetSubPartitionOid(rel, - partBoundary, - subpartBoundary, - ShareUpdateExclusiveLock, /* partition lock */ - AccessExclusiveLock, /* subpartition lock */ - true, - true, /* will check validity of partition oid next step */ - false, - partOid); - list_free_ext(partBoundary); - list_free_ext(subpartBoundary); - - return subpartOid; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -void ATExecSetIndexUsableState(Oid objclassOid, Oid objOid, bool newState) -{ - bool dirty = false; - Relation sys_table = NULL; - int sysCacheId = 0; - HeapTuple sys_tuple = NULL; - - if (objclassOid != IndexRelationId && objclassOid != PartitionRelationId) - return; - - sys_table = relation_open(objclassOid, RowExclusiveLock); - - /* drop toast table, index, and finally the partition iteselt */ - /* get the corresponding tuple for partOid */ - if (objclassOid == PartitionRelationId) - sysCacheId = PARTRELID; - else if (objclassOid == IndexRelationId) - sysCacheId = INDEXRELID; - - // update the indisusable field - sys_tuple = SearchSysCacheCopy1(sysCacheId, ObjectIdGetDatum(objOid)); - if (sys_tuple) { - if (objclassOid == PartitionRelationId) { - if (((Form_pg_partition)GETSTRUCT(sys_tuple))->indisusable != newState) { - ((Form_pg_partition)GETSTRUCT(sys_tuple))->indisusable = newState; - dirty = true; - } - } else if (objclassOid == IndexRelationId) { - if (((Form_pg_index)GETSTRUCT(sys_tuple))->indisusable != newState) { - ((Form_pg_index)GETSTRUCT(sys_tuple))->indisusable = newState; - dirty = true; - } - } - - /* Keep the system catalog indexes current. */ - if (dirty) { - simple_heap_update(sys_table, &(sys_tuple->t_self), sys_tuple); - CatalogUpdateIndexes(sys_table, sys_tuple); - } - tableam_tops_free_tuple(sys_tuple); - } - relation_close(sys_table, RowExclusiveLock); - - if (dirty) { - CommandCounterIncrement(); - } -} - -void ATExecSetIndexVisibleState(Oid objOid, bool newState) -{ - bool dirty = false; - Relation sys_table = NULL; - HeapTuple sys_tuple = NULL; - bool isNull = false; - - sys_table = relation_open(IndexRelationId, RowExclusiveLock); - - // update the indisvisible field - sys_tuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(objOid)); - if (sys_tuple) { - Datum oldState = heap_getattr(sys_tuple, Anum_pg_index_indisvisible, RelationGetDescr(sys_table), &isNull); - dirty = (isNull || BoolGetDatum(oldState) != newState); - - /* Keep the system catalog indexes current. */ - if (dirty) { - HeapTuple newitup = NULL; - Datum values[Natts_pg_index]; - bool nulls[Natts_pg_class]; - bool replaces[Natts_pg_class]; - errno_t rc; - rc = memset_s(values, sizeof(values), 0, sizeof(values)); - securec_check(rc, "\0", "\0"); - rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); - securec_check(rc, "\0", "\0"); - rc = memset_s(replaces, sizeof(replaces), false, sizeof(replaces)); - securec_check(rc, "\0", "\0"); - - replaces[Anum_pg_index_indisvisible - 1] = true; - values[Anum_pg_index_indisvisible - 1] = DatumGetBool(newState); - - newitup = - (HeapTuple)tableam_tops_modify_tuple(sys_tuple, RelationGetDescr(sys_table), values, nulls, replaces); - simple_heap_update(sys_table, &(sys_tuple->t_self), newitup); - CatalogUpdateIndexes(sys_table, newitup); - tableam_tops_free_tuple(newitup); - } - tableam_tops_free_tuple(sys_tuple); - } - relation_close(sys_table, RowExclusiveLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecUnusableIndexPartition(Relation rel, const char* partition_name) -{ - Oid indexPartOid = InvalidOid; - Oid heapPartOid = InvalidOid; - - if (partition_name == NULL) - return; - - if (OidIsValid(rel->rd_rel->relcudescrelid) || rel->rd_rel->relam == CBTREE_AM_OID || - rel->rd_rel->relam == CGIN_AM_OID) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column-store index doesn't support this ALTER yet"))); - } - - /* the AccessShareLock lock on heap relation is held by AlterTableLookupRelation(). */ - /* getting the partition's oid, lock it the same time */ - indexPartOid = PartitionNameGetPartitionOid(rel->rd_id, - partition_name, - PART_OBJ_TYPE_INDEX_PARTITION, - AccessExclusiveLock, // lock on index partition - false, - false, - PartitionNameCallbackForIndexPartition, - (void*)&heapPartOid, - AccessExclusiveLock); // lock on heap partition - // call the internal function - ATExecSetIndexUsableState(PartitionRelationId, indexPartOid, false); -} - -static void ATUnusableGlobalIndex(Relation rel) -{ - ListCell* index = NULL; - bool dirty = false; - HeapTuple sysTuple = NULL; - Relation sysTable = NULL; - - sysTable = relation_open(IndexRelationId, RowExclusiveLock); - // update the indisusable field - foreach (index, RelationGetSpecificKindIndexList(rel, true)) { - Oid currIndexOid = lfirst_oid(index); - sysTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(currIndexOid)); - if (sysTuple) { - if (((Form_pg_index)GETSTRUCT(sysTuple))->indisusable != false) { - ((Form_pg_index)GETSTRUCT(sysTuple))->indisusable = false; - dirty = true; - } - } else { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("could not find tuple for relation %u", currIndexOid))); - } - - /* Keep the system catalog indexes current. */ - if (dirty) { - simple_heap_update(sysTable, &(sysTuple->t_self), sysTuple); - CatalogUpdateIndexes(sysTable, sysTuple); - } - tableam_tops_free_tuple(sysTuple); - } - relation_close(sysTable, RowExclusiveLock); - - if (dirty) { - /* invalidate relation */ - CacheInvalidateRelcache(rel); - CommandCounterIncrement(); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecUnusableAllIndexOnPartition(Relation rel, const char* partition_name) -{ - Oid partOid = InvalidOid; - Relation pg_partition = NULL; - List* partIndexlist = NIL; - ListCell* lc = NULL; - - if (partition_name == NULL) - return; - - if (RelationIsColStore(rel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column-store relation doesn't support this ALTER yet"))); - } - - if (RelationIsSubPartitioned(rel)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, UNUSABLE LOCAL INDEXES is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - - /* getting the partition's oid, lock it the same time */ - partOid = PartitionNameGetPartitionOid(rel->rd_id, - partition_name, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - false, - false, - NULL, - NULL, - NoLock); - - /* first get the list of index partition on targeting table partition */ - partIndexlist = searchPartitionIndexesByblid(partOid); - if (!PointerIsValid(partIndexlist)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no local index defined on partition %u", partOid))); - } - - // open pg_partition - pg_partition = relation_open(PartitionRelationId, RowExclusiveLock); - // for each - foreach (lc, partIndexlist) { - HeapTuple partIndexTuple = NULL; - Oid partIndId = InvalidOid; - Oid parentIndId = InvalidOid; - Partition indexPart = NULL; - Relation parentIndex = NULL; - - partIndexTuple = (HeapTuple)lfirst(lc); - parentIndId = (((Form_pg_partition)GETSTRUCT(partIndexTuple)))->parentid; - partIndId = HeapTupleGetOid(partIndexTuple); - - // open index and it's partition - parentIndex = index_open(parentIndId, AccessShareLock); - indexPart = partitionOpen(parentIndex, partIndId, AccessExclusiveLock); - - // update the indisusable field , by calling the internal function - ATExecSetIndexUsableState(PartitionRelationId, partIndId, false); - - // close index and it's partition - partitionClose(parentIndex, indexPart, NoLock); - index_close(parentIndex, NoLock); - } - - freePartList(partIndexlist); - - // update the indisusable field , by calling the internal function - ATExecSetIndexUsableState(PartitionRelationId, partOid, false); - - // close pg_partition - relation_close(pg_partition, RowExclusiveLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecUnusableIndex(Relation rel) -{ - List* indexPartitionTupleList = NULL; - ListCell* cell = NULL; - Oid heapOid = InvalidOid; - Relation heapRelation = NULL; - - if (!RelationIsIndex(rel)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not set unusable index for relation %s , as it is not a index", - RelationGetRelationName(rel)))); - - // cstore relation doesn't support this feature now. - if (OidIsValid(rel->rd_rel->relcudescrelid) || rel->rd_rel->relam == CBTREE_AM_OID || - rel->rd_rel->relam == CGIN_AM_OID) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column-store index doesn't support this ALTER yet"))); - } - - heapOid = IndexGetRelation(rel->rd_id, false); - // the index is already lock by AccessExclusive lock, do not lock again. - // AccessExclusiveLock on heap already held by call AlterTableLookupRelation(). - heapRelation = relation_open(heapOid, NoLock); - -#ifdef ENABLE_MOT - if (heapRelation->rd_rel->relkind == RELKIND_FOREIGN_TABLE && isMOTFromTblOid(heapOid)) { - AlterForeingTableCmd fcmd = { - T_AlterForeingTableCmd, - AT_UnusableIndex, - heapRelation, - nullptr, - nullptr, - InvalidOid, - nullptr - }; - ATExecMOTAlterTable(&fcmd); - } -#endif - - // call the internal function, update pg_index system table - ATExecSetIndexUsableState(IndexRelationId, rel->rd_id, false); - - // if partitioned index, do extra work: set local index unusable - if (RelationIsPartitioned(rel)) { - // update pg_partition system table - indexPartitionTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_INDEX_PARTITION, rel->rd_id); - foreach (cell, indexPartitionTupleList) { - Partition indexPartition = NULL; - Partition heapPartition = NULL; - Oid indexPartOid = HeapTupleGetOid((HeapTuple)lfirst(cell)); - Oid heapPartOid = indexPartGetHeapPart(indexPartOid, false); - - // lock heap partition - heapPartition = partitionOpen(heapRelation, heapPartOid, AccessExclusiveLock); - // lock index partition - indexPartition = partitionOpen(rel, indexPartOid, AccessExclusiveLock); - // update the indisusable field , by calling the internal function - ATExecSetIndexUsableState(PartitionRelationId, indexPartOid, false); - // close heap part and index part - partitionClose(heapRelation, heapPartition, NoLock); - partitionClose(rel, indexPartition, NoLock); - } - freePartList(indexPartitionTupleList); - } - // recode changecsn of table owing the index - UpdatePgObjectChangecsn(heapOid, heapRelation->rd_rel->relkind); - // close heap relation but maintain the lock. - relation_close(heapRelation, NoLock); -} - -static void ATExecVisibleIndex(Relation rel, char* index_name, bool visible) -{ - ListCell* index = NULL; - bool found = false; - - if (!RelationIsRelation(rel)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("can not set visible for relation %s, as it is not a ordinary table", - RelationGetRelationName(rel)))); - - foreach (index, RelationGetIndexList(rel, true)) { - Oid indexId = lfirst_oid(index); - Relation indexRel = index_open(indexId, AccessShareLock); - if (strcmp(index_name, RelationGetRelationName(indexRel)) != 0) { - index_close(indexRel, AccessShareLock); - continue; - } - index_close(indexRel, AccessShareLock); - - ATExecSetIndexVisibleState(indexId, visible); - - found = true; - break; - } - - if (!found) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("index \"%s\" of relation \"%s\" does not exist", - index_name, RelationGetRelationName(rel)))); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecModifyRowMovement(Relation rel, bool rowMovement) -{ - HeapTuple tuple = NULL; - Oid relid = rel->rd_id; - Relation pg_class; - Form_pg_class rd_rel; - bool dirty = false; - - pg_class = heap_open(RelationRelationId, RowExclusiveLock); - - /* get the tuple of partitioned table */ - tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("could not find tuple for relation %u", relid))); - } - - rd_rel = (Form_pg_class)GETSTRUCT(tuple); - - /* modify the tuple */ - if (rd_rel->relrowmovement != rowMovement) { - rd_rel->relrowmovement = rowMovement; - dirty = true; - } - - /* update pg_class */ - if (dirty) { - simple_heap_update(pg_class, &tuple->t_self, tuple); - CatalogUpdateIndexes(pg_class, tuple); - /* the above sends a cache inval message */ - } else { - /* no need to change tuple, but force relcache inval anyway */ - CacheInvalidateRelcacheByTuple(tuple); - } - - tableam_tops_free_tuple(tuple); - heap_close(pg_class, RowExclusiveLock); - - CommandCounterIncrement(); -} - -static void ATExecResetPartitionno(Relation rel) -{ - Assert(RELATION_IS_PARTITIONED(rel)); - - bool isupgrade = (t_thrd.proc->workingVersionNum < PARTITION_ENHANCE_VERSION_NUM); - LOCKMODE relationlock = isupgrade ? ShareUpdateExclusiveLock : AccessExclusiveLock; - - RelationResetPartitionno(rel->rd_id, relationlock); -} - -List* GetPartitionBoundary(Relation partTableRel, Node *PartDef) -{ - List *boundary = NIL; - int2vector *partitionKey = NULL; - switch (nodeTag(PartDef)) { - case T_RangePartitionDefState: - boundary = ((RangePartitionDefState *)PartDef)->boundary; - partitionKey = ((RangePartitionMap *)partTableRel->partMap)->partitionKey; - break; - case T_ListPartitionDefState: - boundary = ((ListPartitionDefState *)PartDef)->boundary; - partitionKey = ((ListPartitionMap *)partTableRel->partMap)->partitionKey; - break; - case T_HashPartitionDefState: - boundary = ((HashPartitionDefState *)PartDef)->boundary; - partitionKey = ((HashPartitionMap *)partTableRel->partMap)->partitionKey; - break; - default: - ereport(ERROR, - (errmodule(MOD_COMMAND), - errcode(ERRCODE_INVALID_OPERATION), - errmsg("Can't get the partitioned table boundary."), - errdetail("Table %s is unsupported for getting the boundary.", - RelationGetRelationName(partTableRel)), - errcause("Target table is unsupported for getting the boundary."), - erraction("Check the table type."))); - break; - } - boundary = transformConstIntoTargetType(partTableRel->rd_att->attrs, partitionKey, boundary); - return boundary; -} - -static char** getPartitionIndexesName(Oid partitionOid, List* indexList) { - if (indexList == NIL) { - return NULL; - } - int loc = 0; - ListCell* cell = NULL; - char** partitionIndexNames = (char**)palloc(sizeof(char*)*indexList->length); - foreach (cell, indexList) { - Oid indexOid = lfirst_oid(cell); - char* name = getPartitionIndexName(indexOid, partitionOid); - if (name == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("Invalid name of local index %u on the partition %u", indexOid, partitionOid))); - } - partitionIndexNames[loc++] = name; - } - return partitionIndexNames; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Change the names of the local indexes on the target partition table in pg_partition. - * Description : - * Notes : - */ -static void renamePartitionIndexes(Oid partitionedTableOid, Oid partitionOid, char** partitionIndexNames, - List* indexList) -{ - if (partitionIndexNames == NULL) { - return; - } - int loc = 0; - ListCell* cell = NULL; - foreach (cell, indexList) { - Oid indexOid = lfirst_oid(cell); - - Relation indexRel = relation_open(indexOid, AccessShareLock); - - Oid indexDestPartOid = getPartitionIndexOid(indexRel->rd_id, partitionOid); - Partition indexPart = partitionOpen(indexRel, indexDestPartOid, AccessExclusiveLock); - - renamePartitionInternal(partitionedTableOid, indexDestPartOid, partitionIndexNames[loc++]); - - partitionClose(indexRel, indexPart, NoLock); - relation_close(indexRel, AccessShareLock); - } -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : Truncate operation, create a new partition to replace the original partition. - * Description : - * Notes : - */ -static Oid heap_truncate_one_part_new(const AlterTableCmd* cmd, Relation partRel, Oid srcPartOid, - Relation rel = InvalidRelation) -{ - Partition srcPart = NULL; - bool renameTargetPart = false; - char* destPartitionName = NULL; - - Oid destPartOid = AddTemporaryPartitionForAlterPartitions(cmd, partRel, srcPartOid, &renameTargetPart); - if (RelationIsPartitionOfSubPartitionTable(partRel)) { - int subpartitionno = GetCurrentSubPartitionNo(srcPartOid); - PARTITIONNO_VALID_ASSERT(subpartitionno); - UpdateCurrentSubPartitionNo(destPartOid, subpartitionno); - } else { - int partitionno = GetCurrentPartitionNo(srcPartOid); - PARTITIONNO_VALID_ASSERT(partitionno); - UpdateCurrentPartitionNo(destPartOid, partitionno, false); - } - - List* indexList = NULL; - if (RelationIsPartitionOfSubPartitionTable(partRel) && RelationIsValid(rel)) { - indexList = RelationGetSpecificKindIndexList(rel, false); - } else { - indexList = RelationGetSpecificKindIndexList(partRel, false); - } - char** partitionIndexNames = getPartitionIndexesName(srcPartOid, indexList); - - srcPart = partitionOpen(partRel, srcPartOid, AccessExclusiveLock); - destPartitionName = pstrdup(PartitionGetPartitionName(srcPart)); - partitionClose(partRel, srcPart, NoLock); - - CommandCounterIncrement(); - fastDropPartition(partRel, srcPartOid, "TRUNCATE PARTITION"); - - CommandCounterIncrement(); - renamePartitionIndexes(partRel->rd_id, destPartOid, partitionIndexNames, indexList); - - if (renameTargetPart) { - CommandCounterIncrement(); - renamePartitionInternal(partRel->rd_id, destPartOid, destPartitionName); - } - - list_free_ext(indexList); - pfree_ext(partitionIndexNames); - pfree_ext(destPartitionName); - - return destPartOid; -} - -static void ATExecTruncatePartitionForSubpartitionTable(Relation rel, Oid partOid, AlterTableCmd* cmd, bool hasGPI) -{ - /* truncate subpartitioned table */ - Partition part = partitionOpen(rel, partOid, AccessExclusiveLock); - Relation partRel = partitionGetRelation(rel, part); - - List *subPartOidList = relationGetPartitionOidList(partRel); - ListCell *subPartOidCell = NULL; - Oid subPartOid = InvalidOid; - foreach (subPartOidCell, subPartOidList) { - subPartOid = lfirst_oid(subPartOidCell); - LockPartitionOid(partOid, subPartOid, AccessExclusiveLock); - } - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(rel); - } - foreach (subPartOidCell, subPartOidList) { - Oid subPartOid = lfirst_oid(subPartOidCell); - - if (cmd->alterGPI) { - AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subPartOid); - } - - if (!cmd->alterGPI || !hasGPI || RelationIsColStore(rel)) { - heap_truncate_one_part(partRel, subPartOid); - } else { - heap_truncate_one_part_new(cmd, partRel, subPartOid, rel); - } - pgstat_report_truncate(subPartOid, rel->rd_id, rel->rd_rel->relisshared); - } - - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd) -{ - List* oidList = NULL; - List* relid = lappend_oid(NULL, rel->rd_id); - Oid partOid = InvalidOid; - Oid destPartOid = InvalidOid; - Oid newPartOid = InvalidOid; - Relation newTableRel = NULL; - bool hasGPI = (list_length(RelationGetSpecificKindIndexList(rel, true)) > 0); - - oidList = heap_truncate_find_FKs(relid); - if (PointerIsValid(oidList)) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("cannot truncate a partition owned by partitioned table which is referenced in a foreign key " - "constraint"))); - } - -#ifdef PGXC - if (IS_PGXC_COORDINATOR) { - u_sess->catalog_cxt.redistribution_cancelable = true; - - /* this maybe useless, consider to remove */ - u_sess->exec_cxt.could_cancel_redistribution = true; - } - - /* - * If parent rel is in redistribution, we need to truncate the same - * partition in its new table rel. - */ - if (RelationInClusterResizing(rel) && !RelationInClusterResizingReadOnly(rel)) { - /* - * If the target table is under online extension, it should always trigger - * redis cancel, even though currently no lock confict yet, later it will still - * meet, because redistribute the target table need to lock all the partition. - * So we can trigger it now. - */ - newTableRel = GetAndOpenNewTableRel(rel, AccessExclusiveLock); - } - - if (IS_PGXC_COORDINATOR) { - u_sess->catalog_cxt.redistribution_cancelable = false; - } -#endif - - /* - * Get the partition oid - * 1. Get partition oid from part_name cluase - * 2. Get partition oid values clause - */ - if (PointerIsValid(cmd->name)) { - partOid = PartitionNameGetPartitionOid(rel->rd_id, - cmd->name, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - false, - false, - NULL, - NULL, - NoLock); - if (newTableRel) { - newPartOid = PartitionNameGetPartitionOid(newTableRel->rd_id, - cmd->name, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - false, - false, - NULL, - NULL, - NoLock); - } - } else { - List *boundary = GetPartitionBoundary(rel, cmd->def); - partOid = PartitionValuesGetPartitionOid(rel, - boundary, - AccessExclusiveLock, - true, - true, /* will check validity of partition oid next step */ - false); - if (newTableRel) { - newPartOid = PartitionValuesGetPartitionOid(newTableRel, - boundary, - AccessExclusiveLock, - true, - true, /* will check validity of partition oid next step */ - false); - } - } - - if (!OidIsValid(partOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range"))); - } - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(rel)) { - LockPartitionObject(rel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - if (RelationIsSubPartitioned(rel)) { - ATExecTruncatePartitionForSubpartitionTable(rel, partOid, cmd, hasGPI); - return; - } - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(rel); - } else { - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid); - } - - if (!cmd->alterGPI || !hasGPI || RelationIsColStore(rel)) { - heap_truncate_one_part(rel, partOid); - } else { - destPartOid = heap_truncate_one_part_new(cmd, rel, partOid); - } - - pgstat_report_truncate(partOid, rel->rd_id, rel->rd_rel->relisshared); - - /* If newTableRel is not NULL, the parent rel must be in redistribution */ - if (newTableRel) { - heap_truncate_one_part(newTableRel, newPartOid); - - /* delete partOid related rows in delete delta table */ - simple_delete_redis_tuples(rel, partOid); - if (OidIsValid(destPartOid)) { - ResetOnePartRedisCtidRelOptions(rel, destPartOid); - } else { - ResetOnePartRedisCtidRelOptions(rel, partOid); - } - - /* clean up */ - heap_close(newTableRel, AccessExclusiveLock); - pgstat_report_truncate(newPartOid, newTableRel->rd_id, newTableRel->rd_rel->relisshared); - } - -#ifdef ENABLE_MULTIPLE_NODES - if (unlikely(RelationIsTsStore(rel) && OidIsValid(RelationGetDeltaRelId(rel))) && IS_PGXC_DATANODE) { - Tsdb::DeleteDeltaByPartition(GetActiveSnapshot(), rel, partOid); - } -#endif /* ENABLE_MULTIPLE_NODES */ -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecTruncateSubPartition(Relation rel, AlterTableCmd* cmd) -{ - List* oidList = NULL; - List* relid = lappend_oid(NULL, rel->rd_id); - Oid subPartOid = InvalidOid; - bool hasGPI = (list_length(RelationGetSpecificKindIndexList(rel, true)) > 0); - - oidList = heap_truncate_find_FKs(relid); - if (PointerIsValid(oidList)) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("cannot truncate a partition owned by partitioned table which is referenced in a foreign key " - "constraint"))); - } - - /* - * Get the partition oid - * 1. Get partition oid from part_name cluase - * 2. Get partition oid values clause - */ - Oid partOid = InvalidOid; - if (PointerIsValid(cmd->name)) { - subPartOid = SubPartitionNameGetSubPartitionOid(rel->rd_id, - cmd->name, - ShareUpdateExclusiveLock, - AccessExclusiveLock, - false, - false, - NULL, - NULL, - NoLock, - &partOid); - } else { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The subpartition name is invalid"))); - } - - if (!OidIsValid(subPartOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The subpartition name is invalid"))); - } - - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partRel = partitionGetRelation(rel, part); - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(rel); - } else { - AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subPartOid); - } - - if (!cmd->alterGPI || !hasGPI || RelationIsColStore(rel)) { - heap_truncate_one_part(partRel, subPartOid); - } else { - heap_truncate_one_part_new(cmd, partRel, subPartOid, rel); - } - pgstat_report_truncate(subPartOid, rel->rd_id, rel->rd_rel->relisshared); - - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); - -#ifdef ENABLE_MULTIPLE_NODES - if (unlikely(RelationIsTsStore(rel) && OidIsValid(RelationGetDeltaRelId(rel))) && IS_PGXC_DATANODE) { - Tsdb::DeleteDeltaByPartition(GetActiveSnapshot(), rel, subPartOid); - } -#endif /* ENABLE_MULTIPLE_NODES */ -} - -/* - * - Brief: delete tuples in a redis delete delta table for partition table - * being redistributed - * - Parameter: - * @deltaRel: redis delete delta relation - * @partOid: partition oid - * - Return: - * @void: - */ -static void delete_delta_table_tuples(Relation deltaRel, Oid partOid) -{ - ScanKeyData key; - TableScanDesc scan; - HeapTuple tup; - - Assert(deltaRel != NULL); - /* delete_delta_table's first column including partitionoid */ - uint64 keyValue = (((uint64)u_sess->pgxc_cxt.PGXCNodeIdentifier << 32) | partOid); - /* For Redistribution, delete_delta_table's first column, including xc_node_id and relid(partitionoid) */ - ScanKeyInit(&key, 1, BTEqualStrategyNumber, F_INT8EQ, UInt64GetDatum(keyValue)); - - scan = tableam_scan_begin(deltaRel, SnapshotNow, 1, &key); - - while (HeapTupleIsValid(tup = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection))) { - simple_heap_delete(deltaRel, &tup->t_self); - } - - tableam_scan_end(scan); -} - -/* - * - Brief: delete tuples in redis related delete delta tables for partition table - * being redistributed - * - Parameter: - * @rel: relation being redistributed - * @partOid: partition oid - * - Return: - * @void: - */ -static void simple_delete_redis_tuples(Relation rel, Oid partOid) -{ - Relation deltaRel; - - /* - * First delete the multi catchup delta table tuples. - * Always keep the order consistent by operating on multi catchup delete delta first and then the delete delta. - */ - deltaRel = GetAndOpenDeleteDeltaRel(rel, RowExclusiveLock, true); - if (deltaRel) { - delete_delta_table_tuples(deltaRel, partOid); - heap_close(deltaRel, RowExclusiveLock); - } - - deltaRel = GetAndOpenDeleteDeltaRel(rel, RowExclusiveLock, false); - if (deltaRel) { - delete_delta_table_tuples(deltaRel, partOid); - heap_close(deltaRel, RowExclusiveLock); - } -} - -// find each index partition corresponding to srcPartOids, -// under clonedIndexRelation. -// then, add the fake relation for local index to end of merging_btrees_list. -static List* generateMergeingIndexes( - Relation destIndexRelation, Relation clonedIndexRelation, int2 bucketId, List* heapPartOids, List** partList) -{ - ListCell* cell = NULL; - List* merging_btrees_list = NIL; - List* merging_part_list = NIL; - - if (!OID_IS_BTREE(clonedIndexRelation->rd_rel->relam)) - return merging_btrees_list; - - merging_btrees_list = lappend(merging_btrees_list, destIndexRelation); - - foreach (cell, heapPartOids) { - Oid heapPartOid = InvalidOid; - Oid indexPartOid = InvalidOid; - Partition indexPart = NULL; - Relation indexPartRel = NULL; - - heapPartOid = lfirst_oid(cell); - indexPartOid = getPartitionIndexOid(clonedIndexRelation->rd_id, heapPartOid); - // the index partition already locked by checkPartitionLocalIndexesUsable() - indexPart = partitionOpen(clonedIndexRelation, indexPartOid, NoLock, bucketId); - if (!indexPart->pd_part->indisusable) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NODE_STATE), - errmsg("can not merge index partition %s bacause it is unusable local index", - PartitionGetPartitionName(indexPart)))); - } - indexPartRel = partitionGetRelation(clonedIndexRelation, indexPart); - - // append indexPartRel - merging_btrees_list = lappend(merging_btrees_list, indexPartRel); - merging_part_list = lappend(merging_part_list, indexPart); - } - - if (partList != NULL) - *partList = merging_part_list; - - return merging_btrees_list; -} - -static void destroyMergeingIndexes(Relation srcIndexRelation, List* merging_btrees_list, List* merging_part_list) -{ - ListCell* cell1 = NULL; - ListCell* cell2 = NULL; - int i = 0; - - cell2 = list_head(merging_part_list); - - foreach (cell1, merging_btrees_list) { - Relation indexRelation = (Relation)lfirst(cell1); - Partition indexPartition = NULL; - - if (i == 0) { - relation_close(indexRelation, NoLock); - } else if (!OidIsValid(indexRelation->parentId)) { - relation_close(indexRelation, NoLock); - } else { - - indexPartition = (Partition)lfirst(cell2); - partitionClose(srcIndexRelation, indexPartition, NoLock); - releaseDummyRelation(&indexRelation); - cell2 = lnext(cell2); - } - i++; - } - list_free_ext(merging_btrees_list); - list_free_ext(merging_part_list); -} - -static void mergePartitionIndexSwap(List* indexRel, List* indexDestPartOid, List* indexDestOid, TransactionId FreezeXid, - MultiXactId FreezeMultiXid) -{ - ListCell* cell1 = NULL; - ListCell* cell2 = NULL; - ListCell* cell3 = NULL; - - forthree(cell1, indexRel, cell2, indexDestPartOid, cell3, indexDestOid) - { - Relation currentIndex = (Relation)lfirst(cell1); - Oid dstPartOid = lfirst_oid(cell2); - Oid clonedIndexRelationId = lfirst_oid(cell3); - Partition dstPart; - - /* before swap refilenode, promote lock on index partition from ExclusiveLock to AccessExclusiveLock */ - dstPart = partitionOpenWithRetry(currentIndex, dstPartOid, AccessExclusiveLock, "MERGE PARTITIONS"); - if (!dstPart) { - ereport(ERROR, - (errcode(ERRCODE_LOCK_WAIT_TIMEOUT), - errmsg( - "could not acquire AccessExclusiveLock on dest index partition \"%s\", MERGE PARTITIONS failed", - getPartitionName(dstPartOid, false)))); - } - /* swap relfilenode between temp index relation and dest index partition */ - finishPartitionHeapSwap(dstPartOid, clonedIndexRelationId, false, FreezeXid, FreezeMultiXid); - partitionClose(currentIndex, dstPart, NoLock); - } -} - -static void mergePartitionHeapSwap(Relation partTableRel, Oid destPartOid, Oid tempTableOid, TransactionId FreezeXid, - MultiXactId FreezeMultiXid) -{ - Partition destPart; - - /* before swap refilenode, promote lock on heap partition from ExclusiveLock to AccessExclusiveLock */ - destPart = partitionOpenWithRetry(partTableRel, destPartOid, AccessExclusiveLock, "MERGE PARTITIONS"); - - /* step 4: swap relfilenode and delete temp table */ - if (!destPart) { - ereport(ERROR, - (errcode(ERRCODE_LOCK_WAIT_TIMEOUT), - errmsg("could not acquire AccessExclusiveLock on dest table partition \"%s\", MERGE PARTITIONS failed", - getPartitionName(destPartOid, false)))); - } - - finishPartitionHeapSwap(destPartOid, tempTableOid, false, FreezeXid, FreezeMultiXid); - partitionClose(partTableRel, destPart, NoLock); -} - -static inline void FlushBufferIfNotBucket(Relation rel, int2 bucketId) -{ - if (bucketId == InvalidBktId) { - FlushRelationBuffers(rel); - smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM); - } -} - -static void mergePartitionBTreeIndexes(List* srcPartOids, List* srcPartMergeOffset, List* indexRel_list, - List* indexDestOid_list, int2 bucketId) -{ - ListCell* cell1 = NULL; - ListCell* cell2 = NULL; - - forboth(cell1, indexRel_list, cell2, indexDestOid_list) - { - Relation currentIndex = (Relation)lfirst(cell1); - Oid clonedIndexRelationId = lfirst_oid(cell2); - bool skip_build = false; - Relation clonedIndexRelation; - List* merging_btrees_list = NIL; - List* merging_part_list = NIL; - int2 bktid = RelationIsCrossBucketIndex(currentIndex) ? InvalidBktId : bucketId; - - if (OID_IS_BTREE(currentIndex->rd_rel->relam)) - skip_build = true; - else - skip_build = false; - - /* merge several indexes together */ - if (skip_build) { - /* - * add the newly created index into merging_btrees_list - * we now begin creating a list of index relation for merging - */ - clonedIndexRelation = index_open(clonedIndexRelationId, AccessExclusiveLock, bktid); - if (RelationIsCrossBucketIndex(clonedIndexRelation)) { - if (!clonedIndexRelation->newcbi) { - /* here the crossbucket index was already merged */ - index_close(clonedIndexRelation, AccessExclusiveLock); - continue; - } - /* the first time to attempt to merge this crossbucket index when newcbi is true */ - clonedIndexRelation->newcbi = false; - } - merging_btrees_list = - generateMergeingIndexes(clonedIndexRelation, currentIndex, bktid, srcPartOids, &merging_part_list); - - /* merging indexes: using the merging_btrees_list as the source */ - mergeBTreeIndexes(merging_btrees_list, srcPartMergeOffset, bucketId); - - /* destroy the merging indexes list */ - destroyMergeingIndexes(currentIndex, merging_btrees_list, merging_part_list); - } - } -} - -static void mergePartitionHeapData(Relation partTableRel, Relation tempTableRel, List* srcPartOids, List* indexRel_list, - List* indexDestOid_list, int2 bucketId, TransactionId* freezexid, MultiXactId* freezeMultixid) -{ - TransactionId FreezeXid = InvalidTransactionId; - MultiXactId FreezeMultiXid = InvalidMultiXactId; - HTAB* chunkIdHashTable = NULL; - ListCell* cell1 = NULL; - List* mergeToastIndexes = NIL; - List* srcPartToastMergeOffset = NIL; - List* srcPartMergeOffset = NIL; - bool hasToast = false; - Relation tempTableToastRel = NULL; - Relation tempTableToastIndexRel = NULL; - BlockNumber mergeHeapBlocks = 0; - BlockNumber mergeToastBlocks = 0; - int partNum = 0; - int iterator = 0; - bool* srcPartsHasVM = NULL; - bool hasVM = false; - bool hasFSM = false; - - partNum = srcPartOids->length; - - if (OidIsValid(tempTableRel->rd_rel->reltoastrelid)) { - hasToast = true; - tempTableToastRel = relation_open(tempTableRel->rd_rel->reltoastrelid, AccessExclusiveLock, bucketId); - tempTableToastIndexRel = index_open(tempTableToastRel->rd_rel->reltoastidxid, AccessExclusiveLock, bucketId); - mergeToastIndexes = lappend(mergeToastIndexes, tempTableToastIndexRel); - } - - /* step 3: merge each src partition's tuple into the temp table */ - mergeHeapBlocks = 0; - mergeToastBlocks = 0; - - /* - * 3.1 check chunk_id of toast table not repeat - */ - if (hasToast) { - HASHCTL hashCtl; - List* srcPartToastRels = NIL; - errno_t rc = EOK; - - /* Initialize hash tables */ - rc = memset_s(&hashCtl, sizeof(hashCtl), 0, sizeof(hashCtl)); - securec_check(rc, "\0", "\0"); - hashCtl.keysize = sizeof(ChunkIdHashKey); - hashCtl.entrysize = sizeof(OldToNewChunkIdMapping); - hashCtl.hash = tag_hash; - - chunkIdHashTable = - hash_create("Merge partition / Old to new chunkId map", 128, &hashCtl, HASH_ELEM | HASH_FUNCTION); - - foreach (cell1, srcPartOids) { - Oid srcPartOid = lfirst_oid(cell1); - Partition srcPartition = NULL; - Relation srcPartToastRel = NULL; - Relation srcPartToastIndexRel = NULL; - - srcPartition = partitionOpen(partTableRel, srcPartOid, NoLock); - - /* open toast table and it's index */ - srcPartToastRel = relation_open(srcPartition->pd_part->reltoastrelid, ExclusiveLock, bucketId); - srcPartToastIndexRel = index_open(srcPartToastRel->rd_rel->reltoastidxid, ExclusiveLock, bucketId); - - srcPartToastRels = lappend(srcPartToastRels, srcPartToastRel); - mergeToastIndexes = lappend(mergeToastIndexes, srcPartToastIndexRel); - - partitionClose(partTableRel, srcPartition, NoLock); - } - - /* Find repeat chunkId in toast tables, and replace. */ - replaceRepeatChunkId(chunkIdHashTable, srcPartToastRels); - - foreach (cell1, srcPartToastRels) { - Relation srcPartToastRel = (Relation)lfirst(cell1); - relation_close(srcPartToastRel, NoLock); - } - - list_free_ext(srcPartToastRels); - } - /* - * 3.2 check VM and FSM of src partitions - */ - srcPartsHasVM = (bool*)palloc0(partNum * sizeof(bool)); - - iterator = 0; - foreach (cell1, srcPartOids) { - Oid srcPartOid = lfirst_oid(cell1); - Partition srcPartition = NULL; - - srcPartition = partitionOpen(partTableRel, srcPartOid, NoLock, bucketId); - - if (smgrexists(srcPartition->pd_smgr, VISIBILITYMAP_FORKNUM)) { - srcPartsHasVM[iterator] = true; - hasVM = true; - } - - if (smgrexists(srcPartition->pd_smgr, FSM_FORKNUM)) { - hasFSM = true; - } - - iterator++; - - PartitionCloseSmgr(srcPartition); - partitionClose(partTableRel, srcPartition, NoLock); - } - - /* create VM on temp table if need */ - if (hasVM) { - /* Retry to open smgr in case it is cloesd when we process SI messages */ - RelationOpenSmgr(tempTableRel); - smgrcreate(tempTableRel->rd_smgr, VISIBILITYMAP_FORKNUM, false); - } - - /* create FSM on temp table if need */ - if (hasFSM) { - /* Retry to open smgr in case it is cloesd when we process SI messages */ - RelationOpenSmgr(tempTableRel); - smgrcreate(tempTableRel->rd_smgr, FSM_FORKNUM, false); - } - - /* - * 3.3 merge heap and toast, if any - */ - iterator = 0; - foreach (cell1, srcPartOids) { - Oid srcPartOid = lfirst_oid(cell1); - Partition srcPartition = NULL; - Relation srcPartRel = NULL; - char persistency; - BlockNumber srcPartHeapBlocks = 0; - TransactionId relfrozenxid = InvalidTransactionId; - MultiXactId relminmxid = InvalidMultiXactId; - - srcPartition = partitionOpen(partTableRel, srcPartOid, ExclusiveLock, bucketId); // already ExclusiveLock - // locked - srcPartRel = partitionGetRelation(partTableRel, srcPartition); - PartitionOpenSmgr(srcPartition); - - getPartitionRelxids(srcPartRel, &relfrozenxid, &relminmxid); - /* update final fronzenxid, we choose the least one */ - if (!TransactionIdIsValid(FreezeXid) || TransactionIdPrecedes(relfrozenxid, FreezeXid)) - FreezeXid = relfrozenxid; - - if (!MultiXactIdIsValid(FreezeMultiXid) || MultiXactIdPrecedes(relminmxid, FreezeMultiXid)) - FreezeMultiXid = relminmxid; - - /* Retry to open smgr in case it is cloesd when we process SI messages */ - RelationOpenSmgr(tempTableRel); - - /* Ensure smgr is opened */ - RelationOpenSmgr(srcPartRel); - - /* flush dirty pages to disk. Bucket tables has already flushed buffers before for performance. */ - FlushBufferIfNotBucket(srcPartRel, bucketId); - - persistency = srcPartRel->rd_rel->relpersistence; - - srcPartHeapBlocks = smgrnblocks(srcPartRel->rd_smgr, MAIN_FORKNUM); - - /* merge heap */ - mergeHeapBlock(srcPartRel, - tempTableRel, - MAIN_FORKNUM, - persistency, - srcPartHeapBlocks, - mergeHeapBlocks, - srcPartRel->rd_att, - srcPartRel->rd_rel->reltoastrelid, - tempTableRel->rd_rel->reltoastrelid, - hasToast ? chunkIdHashTable : NULL, - hasFSM); - - /* merge toast table */ - if (hasToast) { - Relation srcPartToastRel = NULL; - char toastPersistency; - BlockNumber srcPartToastBlocks = 0; - - srcPartToastRel = relation_open(srcPartition->pd_part->reltoastrelid, NoLock, bucketId); - RelationOpenSmgr(srcPartToastRel); - srcPartToastMergeOffset = lappend_int(srcPartToastMergeOffset, mergeToastBlocks); - - FlushBufferIfNotBucket(srcPartToastRel, bucketId); - - toastPersistency = srcPartToastRel->rd_rel->relpersistence; - - srcPartToastBlocks = smgrnblocks(srcPartToastRel->rd_smgr, MAIN_FORKNUM); - - mergeHeapBlock(srcPartToastRel, - tempTableToastRel, - MAIN_FORKNUM, - toastPersistency, - srcPartToastBlocks, - mergeToastBlocks, - NULL, - InvalidOid, - InvalidOid, - chunkIdHashTable, - false); - mergeToastBlocks += srcPartToastBlocks; - - RelationCloseSmgr(srcPartToastRel); - relation_close(srcPartToastRel, NoLock); - } - - /* merge VM */ - if (hasVM && srcPartsHasVM[iterator]) { - mergeVMBlock(srcPartRel, tempTableRel, srcPartHeapBlocks, mergeHeapBlocks); - } - - PartitionCloseSmgr(srcPartition); - partitionClose(partTableRel, srcPartition, NoLock); - releaseDummyRelation(&srcPartRel); - - iterator++; - srcPartMergeOffset = lappend_int(srcPartMergeOffset, mergeHeapBlocks); - mergeHeapBlocks += srcPartHeapBlocks; - } - - pfree_ext(srcPartsHasVM); - - if (freezexid != NULL) - *freezexid = FreezeXid; - - if (freezeMultixid != NULL) - *freezeMultixid = FreezeMultiXid; - /* - * 3.4 merge toast indexes and destroy chunkId hash table - */ - if (hasToast) { - mergeBTreeIndexes(mergeToastIndexes, srcPartToastMergeOffset, bucketId); - destroyMergeingIndexes(NULL, mergeToastIndexes, NULL); - RelationCloseSmgr(tempTableToastRel); - heap_close(tempTableToastRel, NoLock); - hash_destroy(chunkIdHashTable); - } - - /* - * 3.5 merge btree-indexes - * - */ - mergePartitionBTreeIndexes(srcPartOids, srcPartMergeOffset, indexRel_list, indexDestOid_list, bucketId); -} - -static void UpdatePrevIntervalPartToRange(Relation srcRel, Relation pgPartition, int srcPartIndex, const char* briefCmd) -{ - RangePartitionMap *parts = reinterpret_cast(srcRel->partMap); - for (int i = srcPartIndex - 1; i > 0; --i) { - if (parts->rangeElements[i].isInterval) { - UpdateIntervalPartToRange(pgPartition, parts->rangeElements[i].partitionOid, briefCmd); - } - } -} - -static void FlushMergedPartitionBuffers(Relation partTableRel, const List* srcPartOids, bool hasToast) -{ - ListCell* cell = NULL; - foreach (cell, srcPartOids) { - Oid srcPartOid = lfirst_oid(cell); - - /* - * 1. Alreay exclusive locked at begin. No need to acquire any lock here - * 2. Use InvalidBktId as FlushRelationBuffers do not compare bucketNode with buffer tag. - */ - Partition srcPartition = partitionOpen(partTableRel, srcPartOid, NoLock, InvalidBktId); - Relation srcPartRel = partitionGetRelation(partTableRel, srcPartition); - RelationOpenSmgr(srcPartRel); - - FlushRelationBuffers(srcPartRel); - smgrimmedsync(srcPartRel->rd_smgr, MAIN_FORKNUM); - - if (hasToast) { - Relation srcPartToastRel = relation_open(srcPartition->pd_part->reltoastrelid, NoLock, InvalidBktId); - RelationOpenSmgr(srcPartToastRel); - - FlushRelationBuffers(srcPartToastRel); - smgrimmedsync(srcPartToastRel->rd_smgr, MAIN_FORKNUM); - - RelationCloseSmgr(srcPartToastRel); - relation_close(srcPartToastRel, NoLock); - } - - RelationCloseSmgr(srcPartRel); - partitionClose(partTableRel, srcPartition, NoLock); - releaseDummyRelation(&srcPartRel); - } -} - -/* - * MERGE partitions p1, p2...pn into partition px - * infact, pn is the same partition with px - * if px is deferent with pn, change pn's name to px - */ -static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd) -{ - List* srcPartitions = NIL; - List* srcPartOids = NIL; - List* index_list = NIL; - List* indexRel_list = NIL; - List* clonedIndexRelId_list = NIL; - List* indexDestPartOid_list = NIL; - ListCell* cell = NULL; - char* destPartName = NULL; - char* oldPartName = NULL; - Oid destPartOid = InvalidOid; - Partition destPart = NULL; - Relation destPartRel = NULL; - bool renameTargetPart = false; - bool needUpdateIntervalToRange = false; - int firstPartIndex = -1; - int curPartIndex = -1; - int prevPartIndex = -1; - int iterator = 0; - int partNum; - Oid targetPartTablespaceOid = InvalidOid; - TupleDesc partedTableHeapDesc; - Datum partedTableRelOptions = 0; - HeapTuple tuple = NULL; - bool isNull = false; - Oid tempTableOid = InvalidOid; - Relation tempTableRel = NULL; - ObjectAddress object; - TransactionId FreezeXid; - MultiXactId FreezeMultiXid; - LOCKMODE lockMode = NoLock; - - srcPartitions = (List*)cmd->def; - destPartName = cmd->name; - partNum = srcPartitions->length; - - /* Branch if we are dealing with column-store */ - if (RelationIsColStore(partTableRel)) { - ATExecCStoreMergePartition(partTableRel, cmd); - return; - } - - /* the source partitions, must be at least 2, to merge into 1 partition */ - if (partNum < 2) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("source partitions must be at least two partitions"))); - } - if (partNum > MAX_MERGE_PARTITIONS) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("merge partitions of relation \"%s\", source partitions must be no more than %d partitions", - RelationGetRelationName(partTableRel), - MAX_MERGE_PARTITIONS))); - } - - /* - * step 1: lock src partitions, and check the continuity of srcPartitions. - * for 1...nth partition, we use AccessExclusiveLock lockmode - * althought merge is something like delete and insert. - */ - foreach (cell, srcPartitions) { - char* partName = NULL; - Oid srcPartOid = InvalidOid; - - iterator++; - partName = strVal(lfirst(cell)); - - /* from name to partition oid */ - srcPartOid = PartitionNameGetPartitionOid(partTableRel->rd_id, - partName, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, // get AccessExclusiveLock lock on src partitions - false, // no missing - false, // wait - NULL, - NULL, - NoLock); - /* check local index 'usable' state */ - if (!checkPartitionLocalIndexesUsable(srcPartOid)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't merge partition bacause partition %s has unusable local index", partName), - errhint("please reindex the unusable index first."))); - } - - /* from partitionoid to partition sequence */ - curPartIndex = partOidGetPartSequence(partTableRel, srcPartOid) - 1; - - /* check the continuity of sequence, not the first round loop */ - if (iterator != 1) { - if (curPartIndex - prevPartIndex != 1) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("source partitions must be continuous and in ascending order of boundary"))); - RangePartitionMap* partMap = reinterpret_cast(partTableRel->partMap); - RangeElement* ranges = partMap->rangeElements; - if (ranges[curPartIndex].isInterval) { - // previous partition's upperBound should be equal with current partition's lowerBound - Const* prevUpper = ranges[prevPartIndex].boundary[0]; - if (ValueCmpLowBoudary(&prevUpper, ranges + curPartIndex, partMap->intervalValue) != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("source partitions must be continuous and in ascending order of boundary"))); - } - needUpdateIntervalToRange = true; - } - } else { - firstPartIndex = curPartIndex; - } - prevPartIndex = curPartIndex; - - /* save the last source partition name */ - if (iterator == partNum) { - oldPartName = partName; - destPartOid = srcPartOid; - } - - /* save oid of src partition */ - srcPartOids = lappend_oid(srcPartOids, srcPartOid); - } - if (oldPartName == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("the last source partition name oldPartName is NULL"))); - } - - if (strcmp(oldPartName, destPartName) != 0) { - /* check partition new name does not exist. */ - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, - NameGetDatum(destPartName), - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION), - ObjectIdGetDatum(partTableRel->rd_id))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("target partition's name \"%s\" already exists", destPartName))); - } - - renameTargetPart = true; - } - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(partTableRel)) { - LockPartitionObject(partTableRel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - if (cmd->alterGPI) { - destPartOid = AddTemporaryRangePartitionForAlterPartitions(cmd, partTableRel, curPartIndex, &renameTargetPart); - int partitionno = GetPartitionnoFromSequence(partTableRel->partMap, curPartIndex); - UpdateCurrentPartitionNo(destPartOid, partitionno, false); - lockMode = AccessExclusiveLock; - } - - /* - * step 2: create a temp table for merge - * get desc of partitioned table - */ - partedTableHeapDesc = RelationGetDescr(partTableRel); - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(RelationGetRelid(partTableRel))); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for relation %u", RelationGetRelid(partTableRel)))); - } - partedTableRelOptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isNull); - if (isNull) { - partedTableRelOptions = (Datum)0; - } - - /* - * open the dest partition. - * If it's not alterGPI, destPart was already locked by PartitionNameGetPartitionOid() call. - */ - destPart = partitionOpen(partTableRel, destPartOid, lockMode); - destPartRel = partitionGetRelation(partTableRel, destPart); - - /* check target partition tablespace */ - if (PointerIsValid(cmd->target_partition_tablespace)) { - targetPartTablespaceOid = get_tablespace_oid(cmd->target_partition_tablespace, false); - } else { - targetPartTablespaceOid = destPartRel->rd_rel->reltablespace; - } - - /* create temp table and open it */ - tempTableOid = makePartitionNewHeap(partTableRel, - partedTableHeapDesc, - partedTableRelOptions, - destPartRel->rd_id, - destPartRel->rd_rel->reltoastrelid, - targetPartTablespaceOid); - object.classId = RelationRelationId; - object.objectId = tempTableOid; - object.objectSubId = 0; - - ReleaseSysCache(tuple); - partitionClose(partTableRel, destPart, NoLock); - releaseDummyRelation(&destPartRel); - - /* open temp relation */ - tempTableRel = relation_open(tempTableOid, AccessExclusiveLock); - RelationOpenSmgr(tempTableRel); - - /* lock the index relation on partitioned table and check the usability */ - index_list = RelationGetSpecificKindIndexList(partTableRel, false); - foreach (cell, index_list) { - Oid dstIndexPartTblspcOid; - Oid clonedIndexRelationId; - Oid indexDestPartOid; - Oid indexId = lfirst_oid(cell); - char tmp_idxname[NAMEDATALEN]; - Relation currentIndex; - bool skip_build = false; - errno_t rc = EOK; - - /* Open the index relation, use AccessShareLock */ - currentIndex = index_open(indexId, AccessShareLock); - - if (!IndexIsUsable(currentIndex->rd_index)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("merge partitions cannot process inusable index relation \''%s\''", - RelationGetRelationName(currentIndex)))); - } else { - indexRel_list = lappend(indexRel_list, currentIndex); - } - - if (OID_IS_BTREE(currentIndex->rd_rel->relam)) - skip_build = true; - else - skip_build = false; - - /* build name for tmp index */ - rc = snprintf_s(tmp_idxname, - sizeof(tmp_idxname), - sizeof(tmp_idxname) - 1, - "pg_tmp_%u_%u_index", - destPartOid, - currentIndex->rd_id); - securec_check_ss_c(rc, "\0", "\0"); - - /* get tablespace oid of this index partition */ - dstIndexPartTblspcOid = getPartitionIndexTblspcOid(currentIndex->rd_id, destPartOid); - - /* build the same index for tmp table */ - clonedIndexRelationId = - generateClonedIndex(currentIndex, tempTableRel, tmp_idxname, dstIndexPartTblspcOid, skip_build, false); - indexDestPartOid = getPartitionIndexOid(currentIndex->rd_id, destPartOid); - - clonedIndexRelId_list = lappend_oid(clonedIndexRelId_list, clonedIndexRelationId); - indexDestPartOid_list = lappend_oid(indexDestPartOid_list, indexDestPartOid); - } - - if (OidIsValid(tempTableRel->rd_rel->reltoastrelid)) { - /* set new empty filenode for toast index */ - Relation toastRel = relation_open(tempTableRel->rd_rel->reltoastrelid, AccessExclusiveLock); - Relation toastIndexRel = index_open(toastRel->rd_rel->reltoastidxid, AccessExclusiveLock); - RelationSetNewRelfilenode(toastIndexRel, InvalidTransactionId, InvalidMultiXactId); - relation_close(toastRel, NoLock); - index_close(toastIndexRel, NoLock); - } - - /* step3: merge internal */ - if (RELATION_OWN_BUCKETKEY(partTableRel)) { - /* - * Flushing relation buffer needs to scan all buffers; It's too slow to scan buckets one by one. - * Thus we do the flushing outside the inner loop. - */ - bool hasToast = OidIsValid(tempTableRel->rd_rel->reltoastrelid); - FlushMergedPartitionBuffers(partTableRel, srcPartOids, hasToast); - - Relation tempbucketRel = NULL; - oidvector* bucketlist = searchHashBucketByOid(partTableRel->rd_bucketoid); - - for (int i = 0; i < bucketlist->dim1; i++) { - tempbucketRel = bucketGetRelation(tempTableRel, NULL, bucketlist->values[i]); - mergePartitionHeapData(partTableRel, - tempbucketRel, - srcPartOids, - indexRel_list, - clonedIndexRelId_list, - bucketlist->values[i], - &FreezeXid, - &FreezeMultiXid); - - /* first bucket already merged into target cross bucket index. */ - if (i != 0) { - AddCBIForPartition(partTableRel, tempbucketRel, indexRel_list, clonedIndexRelId_list); - } - bucketCloseRelation(tempbucketRel); - } - } else { - mergePartitionHeapData( - partTableRel, tempTableRel, srcPartOids, indexRel_list, clonedIndexRelId_list, InvalidBktId, &FreezeXid, - &FreezeMultiXid); - } - - /* close temp relation */ - RelationCloseSmgr(tempTableRel); - heap_close(tempTableRel, NoLock); - - /* swap the index relfilenode*/ - mergePartitionIndexSwap(indexRel_list, indexDestPartOid_list, clonedIndexRelId_list, FreezeXid, FreezeMultiXid); - - /* swap the heap relfilenode */ - mergePartitionHeapSwap(partTableRel, destPartOid, tempTableOid, FreezeXid, FreezeMultiXid); - CommandCounterIncrement(); - - /*free index list*/ - list_free_ext(index_list); - list_free_ext(clonedIndexRelId_list); - list_free_ext(indexDestPartOid_list); - const char* BRIEF_CMD_MERGE = "MERGE PARTITIONS"; - if (needUpdateIntervalToRange) { - /* update previous partition and the new partition to range partition */ - Relation pgPartition = relation_open(PartitionRelationId, RowExclusiveLock); - UpdatePrevIntervalPartToRange(partTableRel, pgPartition, firstPartIndex, BRIEF_CMD_MERGE); - - /* update merge result partition to range partition */ - UpdateIntervalPartToRange(pgPartition, destPartOid, BRIEF_CMD_MERGE); - relation_close(pgPartition, NoLock); - } - /* ensure that preceding changes are all visible to the next deletion step. */ - CommandCounterIncrement(); - - /* delete temp table */ - performDeletion(&object, DROP_CASCADE, PERFORM_DELETION_INTERNAL); - - /* close every index relation */ - foreach (cell, indexRel_list) { - Relation currentIndex = (Relation)lfirst(cell); - index_close(currentIndex, NoLock); - } - list_free_ext(indexRel_list); - - /* step 5: drop src partitions */ - foreach (cell, srcPartOids) { - Oid srcPartOid = InvalidOid; - srcPartOid = lfirst_oid(cell); - if (destPartOid != srcPartOid) { - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, srcPartOid); - fastDropPartition(partTableRel, srcPartOid, "MERGE PARTITIONS"); - } - } - - /* - * step 6: rename p(n) to p(target) if needed, the dest partition is now locked by swap refilenode processing step - */ - if (renameTargetPart) { - renamePartitionInternal(partTableRel->rd_id, destPartOid, destPartName); - } - - /* step 7: Unusable Global Index */ - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(partTableRel); - } else { - AddGPIForPartition(RelationGetRelid(partTableRel), destPartOid); - } -} - - -static void ATExecAddTblIntoCBI(Relation idxRel, const AddTableIntoCBIState* state) -{ - Oid relid; - Oid tmpPartOid; - Relation tmpHeapRel; - Relation heapRel; - Relation targetHeap; - Relation targetIndex; - Partition heapPart; - Partition idxPart; - List *heapparts = NULL; - List *indexparts = NULL; - List *tmpPartOids = NULL; - - if (!u_sess->attr.attr_sql.enable_cluster_resize) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ALTER INDEX ADD TABLE only support in pg_redis."))); - } - if (!RelationIsCrossBucketIndex(idxRel)) { - ereport(LOG, (errmsg("alter index add table only support crossbucket index"))); - return; - } - ereport(LOG, - (errmsg("ALTER INDEX %s ADD TABLE %s.", RelationGetRelationName(idxRel), state->relation->relname))); - - relid = RangeVarGetRelid(state->relation, NoLock, false); - heapRel = heap_open(relid, AccessShareLock); - - if (RelationIsPartitioned(heapRel)) { - tmpHeapRel = heap_open(IndexGetRelation(RelationGetRelid(idxRel), false), AccessShareLock); - heapparts = relationGetPartitionList(heapRel, AccessShareLock); - indexparts = RelationIsGlobalIndex(idxRel) ? NULL : GetIndexPartitionListByOrder(idxRel, AccessExclusiveLock); - tmpPartOids = relationGetPartitionOidList(tmpHeapRel); - for (int i = 0; i < list_length(heapparts); i++) { - heapPart = (Partition)list_nth(heapparts, i); - idxPart = (indexparts != NULL) ? (Partition)list_nth(indexparts, i) : NULL; - targetHeap = partitionGetRelation(heapRel, heapPart); - targetIndex = (idxPart != NULL) ? partitionGetRelation(idxRel, idxPart) : idxRel; - tmpPartOid = list_nth_oid(tmpPartOids, i); - ScanHeapInsertCBI(heapRel, targetHeap, targetIndex, tmpPartOid); - releaseDummyRelation(&targetHeap); - partitionClose(heapRel, heapPart, AccessShareLock); - if (!RelationIsGlobalIndex(idxRel)) { - releaseDummyRelation(&targetIndex); - partitionClose(idxRel, idxPart, AccessExclusiveLock); - } - } - list_free_ext(heapparts); - list_free_ext(indexparts); - list_free_ext(tmpPartOids); - heap_close(tmpHeapRel, AccessShareLock); - } else { - ScanHeapInsertCBI(heapRel, heapRel, idxRel, InvalidOid); - } - heap_close(heapRel, AccessShareLock); -} - -// When merge toast table, values of the first column may be repeat. -// So, we must replace these values, and record them in repeatChunkIdList. -// When merge heap, modify toast_pointer->va_valueid by repeatChunkIdList. -static void replaceRepeatChunkId(HTAB* chunkIdHashTable, List* srcPartToastRels) -{ - ListCell* cell = NULL; - int i = 0; - errno_t rc = EOK; - - foreach (cell, srcPartToastRels) { - Relation srcPartToastRel = NULL; - Relation toastIndexRel = NULL; - TableScanDesc scan = NULL; - void *tuple = NULL; - TupleDesc tupleDesc = NULL; - int numAttrs = 0; - - srcPartToastRel = (Relation)lfirst(cell); - toastIndexRel = index_open(srcPartToastRel->rd_rel->reltoastidxid, RowExclusiveLock); - - tupleDesc = srcPartToastRel->rd_att; - numAttrs = tupleDesc->natts; - - scan = tableam_scan_begin(srcPartToastRel, SnapshotNow, 0, NULL); - - while ((tuple = tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - Datum values[numAttrs]; - bool isNull[numAttrs]; - Oid oldChunkId = InvalidOid; - Oid newChunkId = InvalidOid; - - bool found = false; - ChunkIdHashKey hashkey; - OldToNewChunkIdMapping mapping = NULL; - - rc = memset_s(values, numAttrs, 0, numAttrs); - securec_check(rc, "\0", "\0"); - - rc = memset_s(isNull, numAttrs, 0, numAttrs); - securec_check(rc, "\0", "\0"); - - tableam_tops_deform_tuple(tuple, tupleDesc, values, isNull); - - oldChunkId = (Oid)values[0]; - - rc = memset_s(&hashkey, sizeof(hashkey), 0, sizeof(hashkey)); - securec_check(rc, "\0", "\0"); - hashkey.toastTableOid = RelationGetRelid(srcPartToastRel); - hashkey.oldChunkId = oldChunkId; - - mapping = (OldToNewChunkIdMapping)hash_search(chunkIdHashTable, &hashkey, HASH_FIND, NULL); - - // One data may be cut into several tuples. - // These tuples have the same chunkId. - // So we replace the same new value if need. - if (PointerIsValid(mapping)) { - { - HeapTuple copyTuple = NULL; - - values[0] = mapping->newChunkId; - copyTuple = (HeapTuple)tableam_tops_form_tuple(tupleDesc, values, isNull, tupleDesc->td_tam_ops); - - simple_heap_delete(srcPartToastRel, &((HeapTuple)tuple)->t_self); - (void)simple_heap_insert(srcPartToastRel, copyTuple); - - (void)index_insert(toastIndexRel, - values, - isNull, - &(copyTuple->t_self), - srcPartToastRel, - toastIndexRel->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); - - tableam_tops_free_tuple(copyTuple); - } - - continue; - } - - if (checkChunkIdRepeat(srcPartToastRels, i, oldChunkId)) { - HeapTuple copyTuple = NULL; - - // Get one new oid, and it is not repeat in other toast tables. - do { - newChunkId = GetNewObjectId(); - } while (checkChunkIdRepeat(srcPartToastRels, -1, newChunkId)); - - values[0] = newChunkId; - - copyTuple = (HeapTuple)tableam_tops_form_tuple(tupleDesc, values, isNull, tupleDesc->td_tam_ops); - - simple_heap_delete(srcPartToastRel, &((HeapTuple)tuple)->t_self); - (void)simple_heap_insert(srcPartToastRel, copyTuple); - { - copyTuple = heap_form_tuple(tupleDesc, values, isNull); - - simple_heap_delete(srcPartToastRel, &((HeapTuple)tuple)->t_self); - (void)simple_heap_insert(srcPartToastRel, copyTuple); - - (void)index_insert(toastIndexRel, - values, - isNull, - &(copyTuple->t_self), - srcPartToastRel, - toastIndexRel->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); - - tableam_tops_free_tuple(copyTuple); - } - - // Enter hash table - mapping = (OldToNewChunkIdMapping)hash_search(chunkIdHashTable, &hashkey, HASH_ENTER, &found); - - Assert(!found); - - mapping->newChunkId = newChunkId; - } - } - - tableam_scan_end(scan); - index_close(toastIndexRel, RowExclusiveLock); - - i++; - } -} - -// Check whether or not chunkId is repeat in the other toast tables. -static bool checkChunkIdRepeat(List* srcPartToastRels, int selfIndex, Oid chunkId) -{ - ListCell* cell = NULL; - int i = 0; - - foreach (cell, srcPartToastRels) { - Relation srcPartToastRel = (Relation)lfirst(cell); - - // skip self. - if ((i++ == selfIndex)) { - continue; - } - - if (toastrel_valueid_exists(srcPartToastRel, chunkId)) { - return true; - } - } - - return false; -} - -// Description : Execute exchange -static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd) -{ - Oid ordTableOid = InvalidOid; - Oid partOid = InvalidOid; - Relation ordTableRel = NULL; - List* partIndexList = NIL; - List* ordIndexList = NIL; - TransactionId relfrozenxid = InvalidTransactionId; - MultiXactId relminmxid = InvalidMultiXactId; - - ordTableOid = RangeVarGetRelid(cmd->exchange_with_rel, AccessExclusiveLock, false); - - partOid = getPartitionOid(partTableRel, cmd->name, cmd->def); - - if (!OidIsValid(partOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("Specified partition does not exist"))); - } - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(partTableRel)) { - LockPartitionObject(partTableRel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - Assert(OidIsValid(ordTableOid)); - - ordTableRel = heap_open(ordTableOid, NoLock); - - if (ordTableRel->rd_rel->relkind != RELKIND_RELATION || - ordTableRel->rd_rel->parttype == PARTTYPE_PARTITIONED_RELATION || - ordTableRel->rd_rel->relpersistence == RELPERSISTENCE_TEMP || - ordTableRel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED || - ordTableRel->rd_rel->relpersistence == RELPERSISTENCE_GLOBAL_TEMP) { - ereport( - ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ALTER TABLE EXCHANGE requires an ordinary table"))); - } - - // Check storage parameters for two tables - checkStorageTypeForExchange(partTableRel, ordTableRel); - - // Check row level security policy - if (RelationHasRlspolicy(partTableRel->rd_id) || RelationHasRlspolicy(ordTableRel->rd_id)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE EXCHANGE not support with row level security policy table"))); - } - - // Check ordinary competence - CheckTableNotInUse(ordTableRel, "ALTER TABLE"); - - /* Check permission for independent user's tables */ - if (is_role_independent(partTableRel->rd_rel->relowner) && - !has_privs_of_role(GetUserId(), partTableRel->rd_rel->relowner)) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(partTableRel)); - } - if (is_role_independent(ordTableRel->rd_rel->relowner) && - !has_privs_of_role(GetUserId(), ordTableRel->rd_rel->relowner)) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(ordTableRel)); - } - - ATSimplePermissions(ordTableRel, ATT_TABLE); - - // Check compress - checkCompressForExchange(partTableRel, ordTableRel); - - // Check number, type of column - checkColumnForExchange(partTableRel, ordTableRel); - - // Check constraint of two tables - checkConstraintForExchange(partTableRel, ordTableRel); - -#ifdef PGXC - // Check distribute of two tables only on coordinator - if (IS_PGXC_COORDINATOR) { - checkDistributeForExchange(partTableRel, ordTableRel); - } -#endif - - // Check number, type of index - checkIndexForExchange(partTableRel, partOid, ordTableRel, &partIndexList, &ordIndexList); - - // Check if the tables are colstore - checkColStoreForExchange(partTableRel, ordTableRel); - // Swap object of partition and ordinary table - LockPartition(partTableRel->rd_id, partOid, AccessExclusiveLock, PARTITION_LOCK); - - // Check the value is valided for partition boundary - if (cmd->check_validation) { - checkValidationForExchange(partTableRel, ordTableRel, partOid, cmd->exchange_verbose); - } - if (RelationIsPartition(ordTableRel)) - getPartitionRelxids(ordTableRel, &relfrozenxid, &relminmxid); - else - getRelationRelxids(ordTableRel, &relfrozenxid, &relminmxid); - - // Swap relfilenode of table and toast table - finishPartitionHeapSwap(partOid, ordTableRel->rd_id, false, relfrozenxid, relminmxid); - - // Swap relfilenode of index - Assert(list_length(partIndexList) == list_length(ordIndexList)); - if (0 != list_length(partIndexList)) { - finishIndexSwap(partIndexList, ordIndexList); - list_free_ext(partIndexList); - list_free_ext(ordIndexList); - } - - heap_close(ordTableRel, NoLock); - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(partTableRel); - } else { - ExchangePartitionWithGPI(cmd, partTableRel, partOid, relfrozenxid, relminmxid); - } -} - -static void checkStorageTypeForExchange(Relation partTableRel, Relation ordTableRel) -{ - if (RELATION_HAS_BUCKET(ordTableRel) != RELATION_HAS_BUCKET(partTableRel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " - "to have the same hashbucket option(on or off)"))); - } - - if (RELATION_CREATE_BUCKET(ordTableRel)) { - oidvector *bucketList1 = searchHashBucketByOid(ordTableRel->rd_bucketoid); - oidvector *bucketList2 = searchHashBucketByOid(partTableRel->rd_bucketoid); - if (!hashbucket_eq(bucketList1, bucketList2)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " - "to have the same buckets list"))); - } - } - if (ordTableRel->storage_type != partTableRel->storage_type) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " - "to have the same storage type"))); - } -} - -static void checkColStoreForExchange(Relation partTableRel, Relation ordTableRel) -{ - if ((RelationIsColStore(partTableRel) && !RelationIsColStore(ordTableRel)) || - (!RelationIsColStore(partTableRel) && RelationIsColStore(ordTableRel))) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same column/row storage"))); -} -// Description : Check compress -static void checkCompressForExchange(Relation partTableRel, Relation ordTableRel) -{ - if (partTableRel->rd_rel->relcmprs != ordTableRel->rd_rel->relcmprs) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same type of compress"))); - } - if (partTableRel->rd_node.opt != ordTableRel->rd_node.opt) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same type of compress"))); - } -} - -// Description : Check number, type of column -static void checkColumnForExchange(Relation partTableRel, Relation ordTableRel) -{ - CatCList* partAttList = NULL; - CatCList* ordAttList = NULL; - HeapTuple partHeapTuple = NULL; - HeapTuple ordHeapTuple = NULL; - int i = 0; - Relation attrdefRel = NULL; - - // Get column list - partAttList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(partTableRel->rd_id)); - ordAttList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(ordTableRel->rd_id)); - - Relation attRelation = heap_open(AttributeRelationId, AccessShareLock); - TupleDesc attDesc = RelationGetDescr(attRelation); - heap_close(attRelation, AccessShareLock); - - Datum* partVals = (Datum*)palloc(sizeof(Datum) * Natts_pg_attribute); - Datum* ordVals = (Datum*)palloc(sizeof(Datum) * Natts_pg_attribute); - bool* partNulls = (bool*)palloc(sizeof(bool) * Natts_pg_attribute); - bool* ordNulls = (bool*)palloc(sizeof(bool) * Natts_pg_attribute); - - // Check column number - if (partAttList->n_members != ordAttList->n_members) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same number of columns"))); - } - - attrdefRel = heap_open(AttrDefaultRelationId, RowExclusiveLock); - - for (i = 0; i < partAttList->n_members; i++) { - int j = 0; - ScanKeyData scankeys[2]; - SysScanDesc partAttrdefScan = NULL; - HeapTuple partAttrdefTuple = NULL; - SysScanDesc ordAttrdefScan = NULL; - HeapTuple ordAttrdefTuple = NULL; - - partHeapTuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(partAttList, i); - heap_deform_tuple(partHeapTuple, attDesc, partVals, partNulls); - - for (j = 0; j < ordAttList->n_members; j++) { - ordHeapTuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(ordAttList, i); - heap_deform_tuple(ordHeapTuple, attDesc, ordVals, ordNulls); - - if (DatumGetInt8(ordVals[Anum_pg_attribute_attnum - 1]) == - DatumGetInt8(partVals[Anum_pg_attribute_attnum - 1])) { - break; - } - } - - // Check column name - if (strcmp(DatumGetName(ordVals[Anum_pg_attribute_attname - 1])->data, - DatumGetName(partVals[Anum_pg_attribute_attname - 1])->data) != 0) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column name mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - // Check column type and length - if (DatumGetObjectId(ordVals[Anum_pg_attribute_atttypid - 1]) != - DatumGetObjectId(partVals[Anum_pg_attribute_atttypid - 1]) || - DatumGetInt8(ordVals[Anum_pg_attribute_attlen - 1]) != - DatumGetInt8(partVals[Anum_pg_attribute_attlen - 1]) || - DatumGetInt8(ordVals[Anum_pg_attribute_atttypmod - 1]) != - DatumGetInt8(partVals[Anum_pg_attribute_atttypmod - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column type or size mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - // Check column not null constraint - if (DatumGetBool(ordVals[Anum_pg_attribute_attnotnull - 1]) != - DatumGetBool(partVals[Anum_pg_attribute_attnotnull - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column not null constraint mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - // Check column default constraint - if (DatumGetBool(ordVals[Anum_pg_attribute_atthasdef - 1]) != - DatumGetBool(partVals[Anum_pg_attribute_atthasdef - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column default constraint mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - ScanKeyInit(&scankeys[0], - Anum_pg_attrdef_adrelid, - BTEqualStrategyNumber, - F_OIDEQ, - ObjectIdGetDatum(partTableRel->rd_id)); - ScanKeyInit(&scankeys[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, - partVals[Anum_pg_attribute_attnum - 1]); - - partAttrdefScan = systable_beginscan(attrdefRel, AttrDefaultIndexId, true, NULL, 2, scankeys); - partAttrdefTuple = systable_getnext(partAttrdefScan); - - ScanKeyInit(&scankeys[0], - Anum_pg_attrdef_adrelid, - BTEqualStrategyNumber, - F_OIDEQ, - ObjectIdGetDatum(ordTableRel->rd_id)); - ScanKeyInit(&scankeys[1], Anum_pg_attrdef_adnum, BTEqualStrategyNumber, F_INT2EQ, - ordVals[Anum_pg_attribute_attnum - 1]); - - ordAttrdefScan = systable_beginscan(attrdefRel, AttrDefaultIndexId, true, NULL, 2, scankeys); - ordAttrdefTuple = systable_getnext(ordAttrdefScan); - - if ((partAttrdefTuple != NULL) && (ordAttrdefTuple != NULL)) { - bool isnull = false; - Datum partAdsrc = (Datum)0; - Datum ordAdsrc = (Datum)0; - - partAdsrc = heap_getattr(partAttrdefTuple, Anum_pg_attrdef_adsrc, attrdefRel->rd_att, &isnull); - ordAdsrc = heap_getattr(ordAttrdefTuple, Anum_pg_attrdef_adsrc, attrdefRel->rd_att, &isnull); - - if (strcmp(TextDatumGetCString(partAdsrc), TextDatumGetCString(ordAdsrc)) != 0) { - systable_endscan(partAttrdefScan); - systable_endscan(ordAttrdefScan); - - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column default constraint mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - } - - systable_endscan(partAttrdefScan); - systable_endscan(ordAttrdefScan); - - // Check column collation - if (DatumGetObjectId(ordVals[Anum_pg_attribute_attcollation - 1]) != - DatumGetObjectId(partVals[Anum_pg_attribute_attcollation - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column collation mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - // Check column storage - if (DatumGetChar(ordVals[Anum_pg_attribute_attstorage - 1]) != - DatumGetChar(partVals[Anum_pg_attribute_attstorage - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column storage mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - // Check the type of column compress - if (DatumGetInt8(ordVals[Anum_pg_attribute_attcmprmode - 1]) != - DatumGetInt8(partVals[Anum_pg_attribute_attcmprmode - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("the type of column compress mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - // Check kv storage type - if (DatumGetInt8(ordVals[Anum_pg_attribute_attkvtype - 1]) != - DatumGetInt8(partVals[Anum_pg_attribute_attkvtype - 1])) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("the kv storage type of column mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - } - - heap_close(attrdefRel, RowExclusiveLock); - - ReleaseSysCacheList(partAttList); - ReleaseSysCacheList(ordAttList); - pfree_ext(ordVals); - pfree_ext(partVals); - pfree_ext(ordNulls); - pfree_ext(partNulls); -} - -bool checkPartitionLocalIndexesUsable(Oid partitionOid) -{ - bool ret = true; - Relation pg_part_rel = NULL; - ScanKeyData scankeys[1]; - SysScanDesc partScan = NULL; - HeapTuple partTuple = NULL; - - pg_part_rel = relation_open(PartitionRelationId, AccessShareLock); - ScanKeyInit( - &scankeys[0], Anum_pg_partition_indextblid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partitionOid)); - partScan = systable_beginscan(pg_part_rel, PartitionIndexTableIdIndexId, true, NULL, 1, scankeys); - while (NULL != (partTuple = systable_getnext(partScan))) { - Relation indexRel = NULL; - Partition indexPart = NULL; - Oid indexRelOid = ((Form_pg_partition)GETSTRUCT(partTuple))->parentid; - Oid indexPartOid = HeapTupleGetOid(partTuple); - - // the index relation is already locked by upper caller function - indexRel = index_open(indexRelOid, NoLock); - // we will keep the lock on index partition. - indexPart = partitionOpen(indexRel, indexPartOid, ExclusiveLock); - if (!indexPart->pd_part->indisusable) { - partitionClose(indexRel, indexPart, NoLock); - index_close(indexRel, NoLock); - ret = false; - break; - } else { - partitionClose(indexRel, indexPart, NoLock); - index_close(indexRel, NoLock); - continue; - } - } - - systable_endscan(partScan); - relation_close(pg_part_rel, AccessShareLock); - return ret; -} - -/* - * @Description: check whether the partitioned relation has usable index. - * @in relation: the partitioned relation. - * @return: whether the partitioned relation has usable index. - * notes: the invoker must check relation is partitioned first. - */ -bool checkRelationLocalIndexesUsable(Relation relation) -{ - bool ret = true; - Relation indrel; - SysScanDesc indscan; - ScanKeyData skey; - HeapTuple htup; - - /* Prepare to scan pg_index for entries having indrelid = this rel. */ - Oid relid = RelationIsPartitionOfSubPartitionTable(relation) ? relation->parentId : RelationGetRelid(relation); - ScanKeyInit( - &skey, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - - indrel = heap_open(IndexRelationId, AccessShareLock); - indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, NULL, 1, &skey); - - while (HeapTupleIsValid(htup = systable_getnext(indscan))) { - Form_pg_index index = (Form_pg_index)GETSTRUCT(htup); - Relation index_relation = index_open(index->indexrelid, AccessShareLock); - - if (!IndexIsUsable(index) && !RelationIsGlobalIndex(index_relation)) { - index_close(index_relation, AccessShareLock); - ret = false; - break; - } - index_close(index_relation, AccessShareLock); - } - - systable_endscan(indscan); - heap_close(indrel, AccessShareLock); - - return ret; -} - -// Description : Check constraint of two tables -static void checkConstraintForExchange(Relation partTableRel, Relation ordTableRel) -{ - List* partConList = NIL; - List* ordConList = NIL; - Relation pgConstraint = NULL; - ListCell* partCell = NULL; - ListCell* ordCell = NULL; - - // Get constraint list - partConList = getConstraintList(partTableRel->rd_id); - ordConList = getConstraintList(ordTableRel->rd_id); - - // Check constraint number - if (list_length(partConList) != list_length(ordConList)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("constraint mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); - - foreach (partCell, partConList) { - Form_pg_constraint partConForm = NULL; - HeapTuple partConTuple = NULL; - bool isNull = false; - Datum partConDatum; - ArrayType* partConKeyArr = NULL; - int16* partConKeyAttNums = NULL; - int partConKeyNum = 0; - bool isMatch = false; - - partConTuple = (HeapTuple)lfirst(partCell); - partConForm = (Form_pg_constraint)GETSTRUCT(partConTuple); - - // Get column number and column index - // of the constriant on partitioned table - partConDatum = heap_getattr(partConTuple, Anum_pg_constraint_conkey, RelationGetDescr(pgConstraint), &isNull); - partConKeyArr = DatumGetArrayTypeP(partConDatum); - partConKeyNum = ARR_DIMS(partConKeyArr)[0]; - partConKeyAttNums = (int16*)ARR_DATA_PTR(partConKeyArr); - - foreach (ordCell, ordConList) { - Form_pg_constraint ordConForm = NULL; - HeapTuple ordConTuple = NULL; - Datum ordConDatum; - ArrayType* ordConKeyArr = NULL; - int16* ordConKeyAttNums = NULL; - int ordConKeyNum = 0; - int i = 0; - - isMatch = false; - - ordConTuple = (HeapTuple)lfirst(ordCell); - ordConForm = (Form_pg_constraint)GETSTRUCT(ordConTuple); - - // get column number and column index - // of the constriant on partitioned table - ordConDatum = heap_getattr(ordConTuple, Anum_pg_constraint_conkey, RelationGetDescr(pgConstraint), &isNull); - ordConKeyArr = DatumGetArrayTypeP(ordConDatum); - ordConKeyNum = ARR_DIMS(ordConKeyArr)[0]; - ordConKeyAttNums = (int16*)ARR_DATA_PTR(ordConKeyArr); - - if ((ordConForm->contype == partConForm->contype) && (ordConKeyNum == partConKeyNum)) { - // If the constriant is check constriant, - // check the check expression - if (ordConForm->contype == 'c') { - if (constraints_equivalent(ordConTuple, partConTuple, pgConstraint->rd_att)) { - isMatch = true; - } - } else { - isMatch = true; - - for (i = 0; i < ordConKeyNum; i++) { - if (ordConKeyAttNums[i] != partConKeyAttNums[i]) { - isMatch = false; - break; - } - } - } - - if (isMatch) { - break; - } - } - } - - if (!isMatch) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("constraint mismatch in ALTER TABLE EXCHANGE PARTITION"))); - } - } - - heap_close(pgConstraint, AccessShareLock); - - freeConstraintList(partConList); - freeConstraintList(ordConList); -} - -/* - * Alter Matview and their maps and mlogs to group - */ -void ATMatviewGroup(List* stmts, Oid mvid, LOCKMODE lockmode) -{ - ListCell *lc = NULL; - Query *query = NULL; - List *relids = NIL; - AlterTableStmt *stmtf = (AlterTableStmt *)linitial(stmts); - AlterTableCmd *cmd = (AlterTableCmd *)linitial(stmtf->cmds); - - if (list_length(stmtf->cmds) != 1 || cmd->subtype != AT_SubCluster) { - return; - } - - if ((get_rel_relkind(mvid) != RELKIND_MATVIEW) || (!is_incremental_matview(mvid))) { - return; - } - - /* Alter matmap node group */ - Oid mapid = get_matview_mapid(mvid); - if (mapid) { - stmtf->relation->relname = get_rel_name(mapid); - AlterTable(mapid, lockmode, stmtf); - } - - /* Alter matview reltable node group */ - Relation matview = heap_open(mvid, lockmode); - query = get_matview_query(matview); - relids = pull_up_rels_recursive((Node *)query); - - foreach (lc, relids) { - Oid relid = (Oid)lfirst_oid(lc); - Oid mlogid = find_matview_mlog_table(relid); - - stmtf->relation->relname = get_rel_name(mlogid); - AlterTable(mlogid, lockmode, stmtf); - } - - heap_close(matview, NoLock); - return; -} - -// Description : Get constraint tuple list of table -static List* getConstraintList(Oid relOid, char conType) -{ - List* result = NIL; - Relation pgConstraint = NULL; - HeapTuple tuple = NULL; - SysScanDesc scan = NULL; - ScanKeyData skey[2]; - int nkeys; - - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relOid)); - if (conType != CONSTRAINT_INVALID) { - ScanKeyInit(&skey[1], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(conType)); - nkeys = 2; - scan = systable_beginscan(pgConstraint, ConstraintRelidIndexId, false, NULL, nkeys, skey); - - } else { - nkeys = 1; - scan = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL, nkeys, skey); - } - - while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - HeapTuple resultTuple = (HeapTuple) tableam_tops_copy_tuple(tuple); - result = lappend(result, resultTuple); - } - - systable_endscan(scan); - heap_close(pgConstraint, AccessShareLock); - - return result; -} - -// Description : Free constraint tuple list of table -static void freeConstraintList(List* list) -{ - ListCell* cell = NULL; - HeapTuple tuple = NULL; - - foreach (cell, list) { - tuple = (HeapTuple)lfirst(cell); - if (HeapTupleIsValid(tuple)) { - tableam_tops_free_tuple(tuple); - } - } - - list_free_ext(list); -} - -static bool colHasPartialClusterKey(Relation rel, AttrNumber attNum) -{ - List* constraintList = getConstraintList(RelationGetRelid(rel), CONSTRAINT_CLUSTER); - bool colHasCluster = false; - ListCell* lc = NULL; - Relation pgConstraint = NULL; - bool isNull = false; - Datum conKeyDatum; - - ArrayType* conKeyArr = NULL; - int16* conKeyAttNums = NULL; - int conKeyNum = 0; - int i = 0; - - pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); - - foreach (lc, constraintList) { - HeapTuple constrTuple = (HeapTuple)lfirst(lc); - - conKeyDatum = heap_getattr(constrTuple, Anum_pg_constraint_conkey, RelationGetDescr(pgConstraint), &isNull); - conKeyArr = DatumGetArrayTypeP(conKeyDatum); - conKeyNum = ARR_DIMS(conKeyArr)[0]; - conKeyAttNums = (int16*)ARR_DATA_PTR(conKeyArr); - - for (i = 0; i < conKeyNum; i++) { - if (attNum == conKeyAttNums[i]) { - colHasCluster = true; - break; - } - } - } - - heap_close(pgConstraint, AccessShareLock); - - freeConstraintList(constraintList); - - return colHasCluster; -} - -// Description : Check distribute attribute of two tables -static void checkDistributeForExchange(Relation partTableRel, Relation ordTableRel) -{ - Relation pgxcClass = NULL; - HeapTuple partTuple = NULL; - HeapTuple ordTuple = NULL; - Form_pgxc_class partForm = NULL; - Form_pgxc_class ordForm = NULL; - bool isMatch = false; - oidvector* partNodes = NULL; - oidvector* ordNodes = NULL; - bool isNull = false; - - pgxcClass = heap_open(PgxcClassRelationId, RowExclusiveLock); - - partTuple = SearchSysCache1(PGXCCLASSRELID, ObjectIdGetDatum(partTableRel->rd_id)); - if (!HeapTupleIsValid(partTuple)) - ereport(ERROR, - ((errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for relaton %u", partTableRel->rd_id)))); - - Datum part_nodes_datum = heap_getattr(partTuple, Anum_pgxc_class_nodes, RelationGetDescr(pgxcClass), &isNull); - - if (isNull) - elog(PANIC, "Can't get nodeoid for relation %s", RelationGetRelationName(partTableRel)); - partNodes = (oidvector*)PG_DETOAST_DATUM(part_nodes_datum); - - partForm = (Form_pgxc_class)GETSTRUCT(partTuple); - - /* Get ord information */ - ordTuple = SearchSysCache1(PGXCCLASSRELID, ObjectIdGetDatum(ordTableRel->rd_id)); - if (!HeapTupleIsValid(ordTuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmodule(MOD_HDFS), - errmsg("cache lookup failed from %u", PGXCCLASSRELID))); - } - - Datum ordnodes_datum = heap_getattr(ordTuple, Anum_pgxc_class_nodes, RelationGetDescr(pgxcClass), &isNull); - if (isNull) - ereport(PANIC, - ((errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("Can't get nodeoid for relation %s", RelationGetRelationName(ordTableRel))))); - - ordNodes = (oidvector*)PG_DETOAST_DATUM(ordnodes_datum); - - ordForm = (Form_pgxc_class)GETSTRUCT(ordTuple); - - if (partForm->pclocatortype == ordForm->pclocatortype && partForm->pchashalgorithm == ordForm->pchashalgorithm && - partNodes->dim1 == ordNodes->dim1 && - DatumGetBool(DirectFunctionCall2( - int2vectoreq, PointerGetDatum(&partForm->pcattnum), PointerGetDatum(&ordForm->pcattnum)))) { - int i = 0; - - isMatch = true; - - for (i = 0; i < partNodes->dim1; i++) { - if (partNodes->values[i] != ordNodes->values[i]) { - isMatch = false; - break; - } - } - } - - if ((oidvector*)DatumGetPointer(part_nodes_datum) != partNodes) - pfree_ext(partNodes); - - if ((oidvector*)DatumGetPointer(ordnodes_datum) != ordNodes) - pfree_ext(ordNodes); - - if (!isMatch) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("distribute mismatch for tables in ALTER TABLE EXCHANGE PARTITION"))); - } - - ReleaseSysCache(partTuple); - ReleaseSysCache(ordTuple); - - heap_close(pgxcClass, RowExclusiveLock); -} - -// Description : Check index of two tables -static void checkIndexForExchange( - Relation partTableRel, Oid partOid, Relation ordTableRel, List** partIndexList, List** ordIndexList) -{ - ListCell* oidCell = NULL; - ListCell* tupleCell = NULL; - List* partTableIndexOidList = NIL; - List* ordTableIndexOidList = NIL; - HeapTuple ordTableIndexTuple = NULL; - List* ordTableIndexTupleList = NIL; - bool* matchFlag = NULL; - - partTableIndexOidList = RelationGetSpecificKindIndexList(partTableRel, false); - ordTableIndexOidList = RelationGetIndexList(ordTableRel); - - if (list_length(partTableIndexOidList) == 0 && list_length(ordTableIndexOidList) == 0) { - return; - } - - if (list_length(partTableIndexOidList) != list_length(ordTableIndexOidList)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same number of indexs"))); - } - - foreach (oidCell, ordTableIndexOidList) { - Oid ordTableIndexOid = lfirst_oid(oidCell); - ordTableIndexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(ordTableIndexOid)); - if (!HeapTupleIsValid(ordTableIndexTuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", ordTableIndexOid))); - } - - ordTableIndexTupleList = lappend(ordTableIndexTupleList, ordTableIndexTuple); - } - - matchFlag = (bool*)palloc0(list_length(partTableIndexOidList) * sizeof(bool)); - - foreach (oidCell, partTableIndexOidList) { - Oid partTableIndexOid = lfirst_oid(oidCell); - Oid partIndexOid = InvalidOid; - HeapTuple indexPartTuple = NULL; - Form_pg_partition indexPartForm = NULL; - HeapTuple partTalbeIndexTuple = NULL; - Form_pg_index partTalbeIndexForm = NULL; - Form_pg_index ordTalbeIndexForm = NULL; - bool isMatch = false; - bool partTableIndexPartUsable = true; - int matchFlagIndex = -1; - - // step 1: check index 'indisusable' state - partTalbeIndexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(partTableIndexOid)); - partTalbeIndexForm = (Form_pg_index)GETSTRUCT(partTalbeIndexTuple); - if (!IndexIsValid(partTalbeIndexForm)) { - partTableIndexPartUsable = false; - } - - partIndexOid = getPartitionIndexOid(partTableIndexOid, partOid); - // step 2: check index partition 'indisusable' state - if (partTableIndexPartUsable) { - indexPartTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partIndexOid)); - if (!HeapTupleIsValid(indexPartTuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmodule(MOD_HDFS), - errmsg("cache lookup failed from %u", PARTRELID))); - } - indexPartForm = (Form_pg_partition)GETSTRUCT(indexPartTuple); - - if (!indexPartForm->indisusable) { - partTableIndexPartUsable = false; - } - ReleaseSysCache(indexPartTuple); - } - - // step 3: check whether 2 index are matching - foreach (tupleCell, ordTableIndexTupleList) { - Oid ordTableIndexOid = InvalidOid; - HeapTuple partTableIndexClassTuple = NULL; - HeapTuple ordTableIndexClassTuple = NULL; - Form_pg_class partTableIndexClassForm = NULL; - Form_pg_class ordTableIndexClassForm = NULL; - bool ordTableIndexUsable = true; - - isMatch = false; - - matchFlagIndex++; - - if (matchFlag[matchFlagIndex]) { - continue; - } - - ordTableIndexTuple = (HeapTuple)lfirst(tupleCell); - ordTalbeIndexForm = (Form_pg_index)GETSTRUCT(ordTableIndexTuple); - - ordTableIndexOid = ordTalbeIndexForm->indexrelid; - if (!IndexIsValid(ordTalbeIndexForm)) { - ordTableIndexUsable = false; - } - if (partTableIndexPartUsable != ordTableIndexUsable) { - continue; - } - - partTableIndexClassTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(partTableIndexOid)); - ordTableIndexClassTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(ordTableIndexOid)); - partTableIndexClassForm = (Form_pg_class)GETSTRUCT(partTableIndexClassTuple); - ordTableIndexClassForm = (Form_pg_class)GETSTRUCT(ordTableIndexClassTuple); - - // index access method - if (partTableIndexClassForm->relam != ordTableIndexClassForm->relam) { - ReleaseSysCache(partTableIndexClassTuple); - ReleaseSysCache(ordTableIndexClassTuple); - continue; - } - - ReleaseSysCache(partTableIndexClassTuple); - ReleaseSysCache(ordTableIndexClassTuple); - - if ((ordTalbeIndexForm->indnatts == partTalbeIndexForm->indnatts) && - (ordTalbeIndexForm->indisunique == partTalbeIndexForm->indisunique) && - (ordTalbeIndexForm->indisprimary == partTalbeIndexForm->indisprimary)) { - int i = 0; - isMatch = true; - - for (i = 0; i < partTalbeIndexForm->indkey.dim1; i++) { - if (ordTalbeIndexForm->indkey.values[i] != partTalbeIndexForm->indkey.values[i]) { - isMatch = false; - break; - } - } - } - - if (isMatch) { - *partIndexList = lappend_oid(*partIndexList, partIndexOid); - *ordIndexList = lappend_oid(*ordIndexList, ordTableIndexOid); - - matchFlag[matchFlagIndex] = true; - break; - } - } - - ReleaseSysCache(partTalbeIndexTuple); - - if (!isMatch) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("index mismatch for tables in ALTER TABLE EXCHANGE PARTITION"))); - } - } - - foreach (tupleCell, ordTableIndexTupleList) { - ordTableIndexTuple = (HeapTuple)lfirst(tupleCell); - ReleaseSysCache(ordTableIndexTuple); - } - - list_free_ext(partTableIndexOidList); - list_free_ext(ordTableIndexOidList); - list_free_ext(ordTableIndexTupleList); - - pfree_ext(matchFlag); -} - -// Description : Check all tuples of ordinary whether locate the partition -template -static void checkValidationForExchangeTable(Relation partTableRel, Relation ordTableRel, int partSeq) -{ - TableScanDesc scan = NULL; - void* tuple = NULL; - TupleDesc tupleDesc = NULL; - HTAB* partRelHTAB = NULL; - List* indexList = NIL; - List* indexRelList = NIL; - List* indexInfoList = NIL; - ListCell* cell = NULL; - ListCell* cell1 = NULL; - EState* estate = NULL; - TupleTableSlot* indexslot = NULL; - bool relisustore = RelationIsUstoreFormat(ordTableRel); - - if (relisustore != RelationIsUstoreFormat(partTableRel)) - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot exchange between different orientations"))); - } - - - tupleDesc = ordTableRel->rd_att; - - if (exchangeVerbose) { - indexList = RelationGetIndexList(partTableRel, false); - - foreach (cell, indexList) { - Oid indexOid = lfirst_oid(cell); - Relation indexRel = relation_open(indexOid, RowExclusiveLock); - IndexInfo* indexInfo = BuildIndexInfo(indexRel); - - indexRelList = lappend(indexRelList, indexRel); - indexInfoList = lappend(indexInfoList, indexInfo); - } - - if (PointerIsValid(indexRelList)) { - estate = CreateExecutorState(); - /* - * We need a ResultRelInfo so we can use the regular executor's - * index-entry-making machinery. (There used to be a huge amount of code - * here that basically duplicated execUtils.c ...) - */ - ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo); - resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ - resultRelInfo->ri_RelationDesc = ordTableRel; - ExecOpenIndices(resultRelInfo, false); - estate->es_result_relations = resultRelInfo; - estate->es_num_result_relations = 1; - estate->es_result_relation_info = resultRelInfo; - - indexslot = MakeSingleTupleTableSlot(RelationGetDescr(partTableRel), false, partTableRel->rd_tam_ops); - } - } - - scan = scan_handler_tbl_beginscan(ordTableRel, SnapshotNow, 0, NULL); - - while ((tuple = scan_handler_tbl_getnext(scan, ForwardScanDirection, ordTableRel)) != NULL) { - if (!isTupleLocatePartition(partTableRel, partSeq, tupleDesc, tuple)) { - if (!exchangeVerbose) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("some rows in table do not qualify for specified partition"))); - } else { - Oid targetPartOid = InvalidOid; - Relation partRel = NULL; - Partition part = NULL; - Tuple copyTuple = NULL; - int2 bucketId = InvalidBktId; - - // get right partition oid for the tuple - targetPartOid = heapTupleGetPartitionId(partTableRel, (HeapTuple)tuple, NULL, true); - - searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, - INVALID_PARTITION_NO, partRel, part, RowExclusiveLock); - - if (RELATION_HAS_BUCKET(partTableRel)) { - // Get the target bucket. - bucketId = computeTupleBucketId(partTableRel, (HeapTuple)tuple); - searchHBucketFakeRelation(partRelHTAB, CurrentMemoryContext, partRel, bucketId, partRel); - } - - /* insert the copied tuple in the right partition */ - copyTuple = tableam_tops_copy_tuple(tuple); - if (partRel == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("part not found for partition relation"))); - } - tableam_tuple_insert(partRel, copyTuple, GetCurrentCommandId(true), 0, NULL); - - /* insert the index tuple */ - if (PointerIsValid(indexRelList)) { - (void)ExecStoreTuple(copyTuple, indexslot, InvalidBuffer, false); - } - - forboth(cell, indexRelList, cell1, indexInfoList) - { - Relation indexRel = (Relation)lfirst(cell); - IndexInfo* indexInfo = (IndexInfo*)lfirst(cell1); - Oid indexOid = RelationGetRelid(indexRel); - Oid partIndexOid = InvalidOid; - Relation partIndexRel = NULL; - Partition partIndex = NULL; - - Datum values[tupleDesc->natts]; - bool isNull[tupleDesc->natts]; - bool estateIsNotNull = false; - - bool isglobal = RelationIsGlobalIndex(indexRel); - if (!isglobal) { - partIndexOid = getPartitionIndexOid(indexOid, targetPartOid); - searchFakeReationForPartitionOid(partRelHTAB, - CurrentMemoryContext, - indexRel, - partIndexOid, - INVALID_PARTITION_NO, - partIndexRel, - partIndex, - RowExclusiveLock); - indexRel = partIndexRel; - } - - if (RELATION_HAS_BUCKET(indexRel) && !(RelationAmIsBtree(indexRel) && - RELOPTIONS_CROSSBUCKET(indexRel->rd_options))) { - searchHBucketFakeRelation( - partRelHTAB, CurrentMemoryContext, indexRel, bucketId, indexRel); - } - - if (indexInfo->ii_Expressions != NIL || indexInfo->ii_ExclusionOps != NULL) { - ExprContext* econtext = GetPerTupleExprContext(estate); - econtext->ecxt_scantuple = indexslot; - estateIsNotNull = true; - } - - FormIndexDatum(indexInfo, indexslot, estateIsNotNull ? estate : NULL, values, isNull); - - (void)index_insert(indexRel, - values, - isNull, - &((HeapTuple)copyTuple)->t_self, - partRel, - indexRel->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); - } - - /* tableam_tops_free_tuple is not ready so we add UStore hack path */ - tableam_tops_free_tuple(copyTuple); - TM_Result result; - TM_FailureData tmfd; - Relation fake_relation = GetTableScanDesc(scan, ordTableRel)->rs_rd; - TupleTableSlot* oldslot = NULL; - result = tableam_tuple_delete(fake_relation, - &((HeapTuple)tuple)->t_self, - GetCurrentCommandId(true), - InvalidSnapshot, - GetActiveSnapshot(), - true, - &oldslot, - &tmfd, - true); - switch (result) { - case TM_SelfModified: - /* Tuple was already updated in current command? */ - ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("tuple already updated by self"))); - break; - - case TM_Ok: { /* done successfully */ - if (PointerIsValid(indexRelList)) { - Bitmapset *modifiedIdxAttrs = NULL; - ExecIndexTuplesState exec_index_tuples_state; - exec_index_tuples_state.estate = estate; - exec_index_tuples_state.targetPartRel = NULL; - exec_index_tuples_state.p = NULL; - exec_index_tuples_state.conflict = NULL; - exec_index_tuples_state.rollbackIndex = false; - tableam_tops_exec_delete_index_tuples(oldslot, fake_relation, NULL, - &((HeapTuple)tuple)->t_self, exec_index_tuples_state, modifiedIdxAttrs); - } - if (oldslot) { - ExecDropSingleTupleTableSlot(oldslot); - } - break; - } - case TM_Updated: - ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("tuple concurrently updated"))); - break; - - case TM_Deleted: - ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("tuple concurrently updated"))); - break; - - default: - ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("unrecognized delete status: %u", result))); - break; - } - - } - } - } - - scan_handler_tbl_endscan(scan); - - if (PointerIsValid(partRelHTAB)) { - FakeRelationCacheDestroy(partRelHTAB); - } - - if (exchangeVerbose) { - foreach (cell, indexRelList) { - Relation indexRel = (Relation)lfirst(cell); - - relation_close(indexRel, RowExclusiveLock); - } - - list_free_ext(indexRelList); - list_free_ext(indexInfoList); - - if (PointerIsValid(estate)) { - ExecCloseIndices(estate->es_result_relation_info); - pfree_ext(estate->es_result_relations); - FreeExecutorState(estate); - } - - if (PointerIsValid(indexslot)) { - ExecDropSingleTupleTableSlot(indexslot); - } - } -} - -template -static void checkValidationForExchangeCStore(Relation partTableRel, Relation ordTableRel, int partSeq) -{ - RangePartitionMap* partMap = (RangePartitionMap*)(partTableRel->partMap); - int2vector* partkeyColumns = partMap->partitionKey; - int partkeyColumnNum = partkeyColumns->dim1; - - AttrNumber* scanAttrNumbers = NULL; - int scanColNum = 0; - - CStoreScanDesc scanstate; - VectorBatch* vecScanBatch = NULL; - ScalarVector* pVec = NULL; - ScalarValue* pVals = NULL; - Datum* values = NULL; - bool* nulls = NULL; - FormData_pg_attribute* attrs = ordTableRel->rd_att->attrs; - - Const consts[RANGE_PARTKEYMAXNUM]; - Const* partKeyValues[RANGE_PARTKEYMAXNUM]; - bool isInPart = false; - - const int tididx = 1; // junk column for cstore delete - const int tableoidIdx = 2; // junk column(tableoid) for cstore delete - int countNotInPart = 0; - EState* estate = NULL; - VectorBatch* pBatchNotInPart = NULL; - VectorBatch* pBatchForDelete = NULL; - ScalarVector* pValNotInPart = NULL; - CStoreDelete* ordTabelDelete = NULL; - ResultRelInfo* resultRelInfo = NULL; - CStorePartitionInsert* partionInsert = NULL; - - // perpare for cstore scan - if (exchangeVerbose) { - // use all columns and ctid for scan key - scanColNum = ordTableRel->rd_att->natts + 2; - scanAttrNumbers = (AttrNumber*)palloc(sizeof(AttrNumber) * scanColNum); - - for (int i = 0; i < (scanColNum - 2); i++) { - scanAttrNumbers[i] = attrs[i].attnum; - } - - // ctid for delete - scanAttrNumbers[scanColNum - 2] = SelfItemPointerAttributeNumber; - scanAttrNumbers[scanColNum - 1] = TableOidAttributeNumber; - - // init cstore partition insert - resultRelInfo = makeNode(ResultRelInfo); - InitResultRelInfo(resultRelInfo, partTableRel, 1, 0); - ExecOpenIndices(resultRelInfo, false); - resultRelInfo->ri_junkFilter = makeNode(JunkFilter); - resultRelInfo->ri_junkFilter->jf_junkAttNo = tididx; - resultRelInfo->ri_junkFilter->jf_xc_part_id = tableoidIdx; - partionInsert = - New(CurrentMemoryContext) CStorePartitionInsert(partTableRel, resultRelInfo, TUPLE_SORT, false, NULL, NULL); - - // init cstore delete - estate = CreateExecutorState(); - ordTabelDelete = New(CurrentMemoryContext) CStoreDelete(ordTableRel, estate, false, NULL, NULL); - ordTabelDelete->InitSortState(); - } else { - // use parition key for scan key - scanColNum = partkeyColumnNum; - scanAttrNumbers = (AttrNumber*)palloc(sizeof(AttrNumber) * scanColNum); - - for (int i = 0; i < scanColNum; i++) { - scanAttrNumbers[i] = partkeyColumns->values[i]; - } - } - - // datum values for scan partition key values - values = (Datum*)palloc(sizeof(Datum) * partkeyColumnNum); - nulls = (bool*)palloc(sizeof(bool) * partkeyColumnNum); - - // scan columnar table - scanstate = CStoreBeginScan(ordTableRel, scanColNum, scanAttrNumbers, SnapshotNow, true); - - if (exchangeVerbose) { - // perpare move data which not in exchange partition - pBatchNotInPart = New(CurrentMemoryContext) VectorBatch(CurrentMemoryContext, scanstate->m_pScanBatch); - TupleDesc tupdesc = CreateTemplateTupleDesc(2, false); - TupleDescInitEntry(tupdesc, (AttrNumber)1, "ctid", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)2, "tableoid", INT8OID, -1, 0); - pBatchForDelete = New(CurrentMemoryContext) VectorBatch(CurrentMemoryContext, tupdesc); - pBatchNotInPart->CreateSysColContainer(CurrentMemoryContext, scanstate->ps.ps_ProjInfo->pi_sysAttrList); - } - - do { - vecScanBatch = CStoreGetNextBatch(scanstate); - if (!BatchIsNull(vecScanBatch)) { - for (int row = 0; row < vecScanBatch->m_rows; row++) { - for (int partkeyIdx = 0; partkeyIdx < partkeyColumnNum; partkeyIdx++) { - // transform VectorBatch to partition key values - int col = partkeyColumns->values[partkeyIdx] - 1; - pVec = vecScanBatch->m_arr + col; - if (pVec->IsNull(row)) { - nulls[partkeyIdx] = true; - values[partkeyIdx] = (Datum)0; - } else { - nulls[partkeyIdx] = false; - pVals = pVec->m_vals; - if (pVec->m_desc.encoded == false) - values[partkeyIdx] = pVals[row]; - else { - Assert(attrs[col].attlen < 0 || attrs[col].attlen > 8); - Datum v = ScalarVector::Decode(pVals[row]); - values[partkeyIdx] = - (attrs[col].attlen < 0) ? v : PointerGetDatum((char*)v + VARHDRSZ_SHORT); - } - } - - partKeyValues[partkeyIdx] = transformDatum2Const(partTableRel->rd_att, - partkeyColumns->values[partkeyIdx], - values[partkeyIdx], - nulls[partkeyIdx], - &consts[partkeyIdx]); - } - - // is this row in the exchange partition - isInPart = isPartKeyValuesInPartition(partMap, partKeyValues, partkeyColumnNum, partSeq); - - if (!isInPart) { - if (!exchangeVerbose) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("some rows in table do not qualify for specified partition"))); - } else { - // all column - for (int col = 0; col < vecScanBatch->m_cols; col++) { - pVec = vecScanBatch->m_arr + col; - pValNotInPart = pBatchNotInPart->m_arr + col; - - // shallow copy - pValNotInPart->m_vals[countNotInPart] = pVec->m_vals[row]; - pValNotInPart->m_flag[countNotInPart] = pVec->m_flag[row]; - pValNotInPart->m_rows++; - } - - // ctid - pVec = vecScanBatch->GetSysVector(SelfItemPointerAttributeNumber); - pValNotInPart = pBatchNotInPart->GetSysVector(SelfItemPointerAttributeNumber); - pValNotInPart->m_vals[countNotInPart] = pVec->m_vals[row]; - pValNotInPart->m_flag[countNotInPart] = pVec->m_flag[row]; - pValNotInPart->m_rows++; - - // tableoid - pVec = vecScanBatch->GetSysVector(TableOidAttributeNumber); - pValNotInPart = pBatchNotInPart->GetSysVector(TableOidAttributeNumber); - pValNotInPart->m_vals[countNotInPart] = pVec->m_vals[row]; - pValNotInPart->m_flag[countNotInPart] = pVec->m_flag[row]; - pValNotInPart->m_rows++; - - countNotInPart++; - } - } - } - - if (exchangeVerbose && (countNotInPart != 0)) { - // routing to the right partition and insert - Assert(countNotInPart <= vecScanBatch->m_rows); - pBatchNotInPart->m_rows = countNotInPart; - - // insert to right partition - partionInsert->BatchInsert(pBatchNotInPart, TABLE_INSERT_FROZEN); - - // delete from old table - pBatchForDelete->m_rows = countNotInPart; - pBatchForDelete->m_arr[tididx - 1].copy(pBatchNotInPart->GetSysVector(SelfItemPointerAttributeNumber)); - pBatchForDelete->m_arr[tableoidIdx - 1].copy(pBatchNotInPart->GetSysVector(TableOidAttributeNumber)); - ordTabelDelete->PutDeleteBatch(pBatchForDelete, resultRelInfo->ri_junkFilter); - - // reset batch and count - pBatchNotInPart->Reset(); - pBatchForDelete->Reset(); - countNotInPart = 0; - } - } - } while (!CStoreIsEndScan(scanstate)); - - CStoreEndScan(scanstate); - - if (exchangeVerbose) { - partionInsert->EndBatchInsert(); - DELETE_EX(partionInsert); - - (void)ordTabelDelete->ExecDelete(); - DELETE_EX(ordTabelDelete); - - ExecCloseIndices(resultRelInfo); - } - - pfree_ext(scanAttrNumbers); - pfree_ext(values); - pfree_ext(nulls); -} - -static Oid getPartitionElementsOid(const char* partitionElement) -{ - return *(Oid*)partitionElement; -} - -int getPartitionElementsIndexByOid(Relation partTableRel, Oid partOid) -{ - int partSeq = -1; - int partitionElementsNum = 0; - char* partitionElements = NULL; - void* partMap = NULL; - int offset = 0; - - switch (partTableRel->partMap->type) { - case PART_TYPE_LIST: - partMap = (ListPartitionMap*)(partTableRel->partMap); - partitionElementsNum = ((ListPartitionMap*)partMap)->listElementsNum; - partitionElements = (char*)(((ListPartitionMap*)partMap)->listElements); - offset = sizeof(ListPartElement); - break; - case PART_TYPE_HASH: - partMap = (HashPartitionMap*)(partTableRel->partMap); - partitionElementsNum = ((HashPartitionMap*)partMap)->hashElementsNum; - partitionElements = (char*)(((HashPartitionMap*)partMap)->hashElements); - offset = sizeof(HashPartElement); - break; - case PART_TYPE_RANGE: - case PART_TYPE_INTERVAL: - partMap = (RangePartitionMap*)(partTableRel->partMap); - partitionElementsNum = ((RangePartitionMap*)partMap)->rangeElementsNum; - partitionElements = (char*)(((RangePartitionMap*)partMap)->rangeElements); - offset = sizeof(RangeElement); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("The partitioned table is unsupported for getting the sequence of partition elements."))); - break; - } - - // the premise is that partitioned table was locked by AccessExclusiveLock - for (int i = 0; i < partitionElementsNum; i++) { - if (partOid == getPartitionElementsOid(partitionElements)) { - partSeq = i; - break; - } - partitionElements += offset; - } - - return partSeq; -} - -static void checkValidationForExchange(Relation partTableRel, Relation ordTableRel, Oid partOid, bool exchangeVerbose) -{ - Assert(partTableRel && partTableRel->partMap); - - int partSeq = getPartitionElementsIndexByOid(partTableRel, partOid); - - if (RelationIsColStore(ordTableRel)) { - if (exchangeVerbose) - checkValidationForExchangeCStore(partTableRel, ordTableRel, partSeq); - else - checkValidationForExchangeCStore(partTableRel, ordTableRel, partSeq); - } else { - if (exchangeVerbose) - checkValidationForExchangeTable(partTableRel, ordTableRel, partSeq); - else - checkValidationForExchangeTable(partTableRel, ordTableRel, partSeq); - } -} - -// Description : Get partition oid by name or partition key values -static Oid getPartitionOid(Relation partTableRel, const char *partName, Node *PartDef) -{ - Oid partOid = InvalidOid; - - if (PointerIsValid(partName)) { - partOid = PartitionNameGetPartitionOid(RelationGetRelid(partTableRel), - partName, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - true, - false, - NULL, - NULL, - NoLock); - } else { - List* boundary = GetPartitionBoundary(partTableRel, PartDef); - partOid = PartitionValuesGetPartitionOid( - partTableRel, boundary, AccessExclusiveLock, true, true, false); - } - - return partOid; -} - -// Description : Swap relfilenode of index -static void finishIndexSwap(List* partIndexList, List* ordIndexList) -{ - ListCell* cell1 = NULL; - ListCell* cell2 = NULL; - - forboth(cell1, partIndexList, cell2, ordIndexList) - { - Oid partOid, ordOid; - partOid = (Oid)lfirst_oid(cell1); - ordOid = (Oid)lfirst_oid(cell2); - - finishPartitionHeapSwap(partOid, ordOid, true, u_sess->utils_cxt.RecentGlobalXmin, GetOldestMultiXactId()); - } -} - -static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) -{ - SplitPartitionState* splitPart = NULL; - List* destPartDefList = NIL; - RangePartitionMap* partMap = NULL; - Oid partTableOid = InvalidOid; - Oid srcPartOid = InvalidOid; - int srcPartIndex = -1; - ListCell* cell = NULL; - int currentPartNum; - int targetPartNum; - int partKeyNum = 0; - List* newPartOidList = NIL; - List* destPartBoundaryList = NIL; - List* listForFree = NIL; - - Partition part = NULL; - Oid tempTableOid = InvalidOid; - Relation tempTableRel = NULL; - ObjectAddress object; - - splitPart = (SplitPartitionState*)cmd->def; - destPartDefList = splitPart->dest_partition_define_list; - partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; - partTableOid = RelationGetRelid(partTableRel); - - // get src partition oid - if (PointerIsValid(splitPart->src_partition_name)) { - srcPartOid = PartitionNameGetPartitionOid(RelationGetRelid(partTableRel), - splitPart->src_partition_name, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - true, - false, - NULL, - NULL, - NoLock); - } else { - splitPart->partition_for_values = transformConstIntoTargetType( - partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->partition_for_values); - srcPartOid = PartitionValuesGetPartitionOid( - partTableRel, splitPart->partition_for_values, AccessExclusiveLock, true, true, false); - } - - /* add INTERVAL_PARTITION_LOCK_SDEQUENCE here to avoid ADD INTERVAL PARTITION */ - if (RELATION_IS_INTERVAL_PARTITIONED(partTableRel)) { - LockPartitionObject(partTableRel->rd_id, INTERVAL_PARTITION_LOCK_SDEQUENCE, PARTITION_EXCLUSIVE_LOCK); - } - - // check final partition num - targetPartNum = getNumberOfPartitions(partTableRel) + list_length(destPartDefList) - 1; - if (targetPartNum > MAX_PARTITION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("too many partitions for partitioned table"), - errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM))); - } - - /* check src partition exists */ - if (!OidIsValid(srcPartOid)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - splitPart->src_partition_name != NULL - ? errmsg("split partition \"%s\" does not exist.", splitPart->src_partition_name) - : errmsg("split partition does not exist."))); - } - - /* check local index 'usable' state */ - if (!checkRelationLocalIndexesUsable(partTableRel)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't split partition bacause relation %s has unusable local index", - NameStr(partTableRel->rd_rel->relname)), - errhint("please reindex the unusable index first."))); - } - - // check dest partitions name not existing - checkDestPartitionNameForSplit(partTableOid, destPartDefList); - - // check dest partition tablespace - foreach (cell, destPartDefList) { - RangePartitionDefState* rangePartDef = (RangePartitionDefState*)lfirst(cell); - - CheckPartitionTablespace(rangePartDef->tablespacename, partTableRel->rd_rel->relowner); - } - - // get src partition sequence - srcPartIndex = partOidGetPartSequence(partTableRel, srcPartOid) - 1; - if (srcPartIndex < 0) { - Assert(false); - ereport(ERROR, - (errcode(ERRCODE_NO_DATA_FOUND), - errmsg("the partition oid(%u) of partition name (%s) is not found in partitioned table(%u).", - srcPartOid, - splitPart->src_partition_name ? splitPart->src_partition_name : "NULL", - partTableRel->rd_id))); - } - bool isPrevInterval = srcPartIndex > 0 && partMap->rangeElements[srcPartIndex - 1].isInterval; - // if split point - if (PointerIsValid(splitPart->split_point)) { - RangePartitionDefState* rangePartDef = NULL; - - // check split point value - checkSplitPointForSplit(splitPart, partTableRel, srcPartIndex); - - Assert(list_length(destPartDefList) == 2); - - // set the two dest partitions boundary - rangePartDef = (RangePartitionDefState*)list_nth(destPartDefList, 0); - rangePartDef->boundary = splitPart->split_point; - - /* - * generate boundary for the second partititon - */ - rangePartDef = (RangePartitionDefState*)list_nth(destPartDefList, 1); - rangePartDef->boundary = getRangePartitionBoundaryList(partTableRel, srcPartIndex); - } else { - // not split point - int compare = 0; - ListCell* otherCell = NULL; - - if (list_length(destPartDefList) < 2) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("the number of resulting partitions must be more than one"))); - } - - // transform partition key type - destPartBoundaryList = getDestPartBoundaryList(partTableRel, destPartDefList, &listForFree); - - // check the first dest partition boundary - if (srcPartIndex != 0) { - if (!partMap->rangeElements[srcPartIndex].isInterval) { - compare = comparePartitionKey(partMap, (Const**)lfirst(list_head(destPartBoundaryList)), - partMap->rangeElements[srcPartIndex - 1].boundary, partKeyNum); - } else { - Const** partKeyValue = (Const**)lfirst(list_head(destPartBoundaryList)); - RangeElement& srcPartition = partMap->rangeElements[srcPartIndex]; - compare = ValueCmpLowBoudary(partKeyValue, &srcPartition, partMap->intervalValue); - } - if (compare <= 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("the bound of the first resulting partition is too low"))); - } - } - - // check the dest partitions boundary - forboth(cell, destPartBoundaryList, otherCell, destPartDefList) - { - Const** currentBoundary = (Const**)lfirst(cell); - RangePartitionDefState* rangePartDef = (RangePartitionDefState*)lfirst(otherCell); - Const** nextBoudary = NULL; - - if (!PointerIsValid(cell->next)) { - break; - } - - nextBoudary = (Const**)lfirst(cell->next); - rangePartDef = (RangePartitionDefState*)lfirst(otherCell->next); - - compare = comparePartitionKey(partMap, currentBoundary, nextBoudary, partKeyNum); - - if (compare >= 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("the bound of resulting partition \"%s\" is too low", rangePartDef->partitionName))); - } - } - - // check the last dest partition boundary equal the src partition boundary - compare = comparePartitionKey(partMap, - (Const**)lfirst(list_tail(destPartBoundaryList)), - partMap->rangeElements[srcPartIndex].boundary, - partKeyNum); - if (compare != 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("the bound of the last resulting partition is not equal with specified partition bound"))); - } - } - - // add dest partitions - int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(partTableRel->rd_id)); - Assert(PARTITIONNO_IS_VALID(partitionno)); - foreach (cell, destPartDefList) { - partitionno++; - PartitionDefState* partDef = (PartitionDefState*)lfirst(cell); - partDef->partitionno = partitionno; - } - fastAddPartition(partTableRel, destPartDefList, &newPartOidList); - /* inplace update on partitioned table, because we can't cover the wait_clean_gpi info, which is inplace updated */ - UpdateCurrentPartitionNo(RelOidGetPartitionTupleid(partTableRel->rd_id), -partitionno, true); - - freeDestPartBoundaryList(destPartBoundaryList, listForFree); - if (isPrevInterval) { - // modify all previous *interval* partitions to range partitions, *possibly* no such partitions - Relation pgPartition = relation_open(PartitionRelationId, RowExclusiveLock); - UpdatePrevIntervalPartToRange(partTableRel, pgPartition, srcPartIndex, "SPLIT PARTITION"); - relation_close(pgPartition, NoLock); - } -#ifdef PGXC - if (IS_PGXC_DATANODE) { -#endif - part = partitionOpen(partTableRel, srcPartOid, AccessExclusiveLock); - - // creat temp table and swap relfilenode with src partition - tempTableOid = createTempTableForPartition(partTableRel, part); - finishPartitionHeapSwap(srcPartOid, tempTableOid, false, u_sess->utils_cxt.RecentXmin, GetOldestMultiXactId()); - - CommandCounterIncrement(); - - partitionClose(partTableRel, part, NoLock); -#ifdef PGXC - } -#endif - - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, srcPartOid); - // drop src partition - fastDropPartition(partTableRel, srcPartOid, "SPLIT PARTITION"); - currentPartNum = getNumberOfPartitions(partTableRel); - if (currentPartNum != targetPartNum) { - ereport(ERROR, - (errmsg("An error occurred in the split partition process. " - "The number of target partitions is %d, but the number of current partitions is %d.", - targetPartNum, currentPartNum))); - } - -#ifdef PGXC - if (IS_PGXC_DATANODE) { -#endif - tempTableRel = relation_open(tempTableOid, AccessExclusiveLock); - - // read temp table tuples and insert into partitioned table - readTuplesAndInsert(tempTableRel, partTableRel); - - relation_close(tempTableRel, NoLock); - - // delete temp table - object.classId = RelationRelationId; - object.objectId = tempTableOid; - object.objectSubId = 0; - performDeletion(&object, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - - // reindex dest partitions - foreach (cell, newPartOidList) { - Oid partOid = lfirst_oid(cell); - - reindexPartition(partTableOid, partOid, REINDEX_REL_SUPPRESS_INDEX_USE, REINDEX_ALL_INDEX); - - if (cmd->alterGPI) { - AddGPIForPartition(partTableOid, partOid); - } - } -#ifdef PGXC - } -#endif - - list_free_ext(newPartOidList); - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(partTableRel); - } -} - -void CheckSrcListSubPartitionForSplit(Relation rel, Oid partOid, Oid subPartOid) -{ - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partRel = partitionGetRelation(rel, part); - Partition subPart = partitionOpen(partRel, subPartOid, NoLock); - if (subPart->pd_part->partstrategy == PART_STRATEGY_HASH) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Hash subpartition does not support split."), - errdetail("N/A"), errcause("Hash subpartition does not support split."), - erraction("Use the correct split action"))); - } - if (subPart->pd_part->partstrategy != PART_STRATEGY_LIST) { - ereport( - ERROR, - (errcode(ERRCODE_INVALID_OPERATION), errmsg("The syntax format of split subpartition is incorrect."), - errdetail("SPLIT SUBPARTITION NAME VALUES shouldn't be used, it's for list subpartitions."), - errcause("Wrong split subpartition syntax used."), erraction("Use proper split subpartition syntax."))); - } - partitionClose(partRel, subPart, NoLock); - - int srcSubPartIndex = partOidGetPartSequence(partRel, subPartOid) - 1; - List* boundary = getListPartitionBoundaryList(partRel, srcSubPartIndex); - if (!(list_length(boundary) == 1 && ((Const*)list_nth(boundary, 0))->ismaxvalue)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Only the default boundary subpartition can be splited."))); - } - - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); - list_free_deep(boundary); -} - -void checkDestListSubPartitionNameForSplit(Relation rel, List* destPartDefList, Oid srcSubPartOid) -{ - Assert(list_length(destPartDefList) == 2); - - char *srcSubPartName = getPartitionName(srcSubPartOid, false); - char *destSubPartName1 = ((ListPartitionDefState*)list_nth(destPartDefList, 0))->partitionName; - char *destSubPartName2 = ((ListPartitionDefState*)list_nth(destPartDefList, 1))->partitionName; - ListCell* lc = NULL; - - - // check dest subpartitions name with self - if (!strcmp(destSubPartName1, destSubPartName2)) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("The dest subpartitions has the same name."))); - } - /* check dest partitions name with existing partitions name - * We allow the dest subpartition to have the same name as the src subpartition. - * Because we're going to delete the src subpartition. - */ - List *partOidList = relationGetPartitionOidList(rel); - foreach (lc, destPartDefList) { - char *destSubPartName = ((ListPartitionDefState*)lfirst(lc))->partitionName; - if (!strcmp(destSubPartName, srcSubPartName)) { - continue; - } - ListCell *cell = NULL; - foreach (cell, partOidList) { - Oid partOid = lfirst_oid(cell); - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, NameGetDatum(destSubPartName), - CharGetDatum(PART_OBJ_TYPE_TABLE_SUB_PARTITION), - ObjectIdGetDatum(partOid))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("resulting subpartition \"%s\" name conflicts with an existing subpartition", - destSubPartName))); - } - } - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, NameGetDatum(destSubPartName), - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION), - ObjectIdGetDatum(RelationGetRelid(rel)))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("resulting subpartition \"%s\" name conflicts with an existing partition", - destSubPartName))); - } - } -} - -static void CheckDestRangeSubPartitionNameForSplit(Relation rel, List* destPartDefList) -{ - Assert(list_length(destPartDefList) == 2); - - ListCell* lc = NULL; - - // check dest subpartitions name with self - checkPartitionName(destPartDefList); - - // check dest subpartitions name with existing subpartitions name - List *partOidList = relationGetPartitionOidList(rel); - foreach (lc, destPartDefList) { - char *destSubPartName = ((ListPartitionDefState*)lfirst(lc))->partitionName; - - ListCell *cell = NULL; - foreach (cell, partOidList) { - Oid partOid = lfirst_oid(cell); - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, NameGetDatum(destSubPartName), - CharGetDatum(PART_OBJ_TYPE_TABLE_SUB_PARTITION), - ObjectIdGetDatum(partOid))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("resulting subpartition \"%s\" name conflicts with an existing subpartition", - destSubPartName))); - } - } - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, NameGetDatum(destSubPartName), - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION), - ObjectIdGetDatum(RelationGetRelid(rel)))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("resulting subpartition \"%s\" name conflicts with an existing partition", - destSubPartName))); - } - } -} - -static void ChecksrcRangeSubPartitionNameForSplit(Relation rel, Oid partOid, Oid subPartOid) -{ - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partRel = partitionGetRelation(rel, part); - Partition subPart = partitionOpen(partRel, subPartOid, NoLock); - if (subPart->pd_part->partstrategy == PART_STRATEGY_HASH) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Hash subpartition does not support split."), - errdetail("N/A"), errcause("Hash subpartition does not support split."), - erraction("Use the correct split action"))); - } - if (subPart->pd_part->partstrategy != PART_STRATEGY_RANGE) { - ereport( - ERROR, - (errcode(ERRCODE_INVALID_OPERATION), errmsg("The syntax format of split subpartition is incorrect."), - errdetail("SPLIT SUBPARTITION NAME AT shouldn't be used, it's for range subpartitions."), - errcause("Wrong split subpartition syntax used."), erraction("Use proper split subpartition syntax."))); - } - partitionClose(partRel, subPart, NoLock); - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); -} - -void CheckDestListSubPartitionBoundaryForSplit(Relation rel, Oid partOid, SplitPartitionState* splitSubPart) -{ - ListCell *cell = NULL; - Oid existingSubPartOid; - Oid defaultSubPartOid = InvalidOid; - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partRel = partitionGetRelation(rel, part); - ListPartitionMap* partMap = (ListPartitionMap*)partRel->partMap; - ParseState* pstate = make_parsestate(NULL); - - splitSubPart->newListSubPartitionBoundry = - transformListPartitionValue(pstate, splitSubPart->newListSubPartitionBoundry, true, true); - pfree_ext(pstate); - - for (int i = 0; i < partMap->listElementsNum; i++) { - ListPartElement *list = &partMap->listElements[i]; - if (list->boundary[0].values[0]->ismaxvalue) { - defaultSubPartOid = list->partitionOid; - break; - } - } - - bool partkeyIsFunc = IsPartKeyFunc(partRel, true, true); - foreach (cell, splitSubPart->newListSubPartitionBoundry) { - existingSubPartOid = FindPartOidByListBoundary(partRel, partMap, (Node*)lfirst(cell), partkeyIsFunc); - if (OidIsValid(existingSubPartOid) && existingSubPartOid != defaultSubPartOid) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("list subpartition %s has overlapped value", getPartitionName(existingSubPartOid, false)))); - } - } - - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); -} - -int GetNumberOfSubPartitions(Relation rel) -{ - int subPartitionNum = 0; - List *partOidList = relationGetPartitionOidList(rel); - ListCell *cell = NULL; - foreach (cell, partOidList) { - Oid partOid = lfirst_oid(cell); - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partRel = partitionGetRelation(rel, part); - subPartitionNum += GetNumberOfSubPartitions(partRel); - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); - } - return subPartitionNum; -} -static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd) -{ - SplitPartitionState* splitSubPart = NULL; - ListCell* cell = NULL; - List* destPartDefList = NIL; - List* newSubPartOidList = NIL; - Oid tempTableOid = InvalidOid; - int currentPartNum = 0; - Oid partOid = InvalidOid; - Oid srcSubPartOid = InvalidOid; - int subpartitionno = INVALID_PARTITION_NO; - - splitSubPart = (SplitPartitionState*)cmd->def; - destPartDefList = splitSubPart->dest_partition_define_list; - - // check final partition num - currentPartNum = GetNumberOfSubPartitions(partTableRel); - if ((currentPartNum + list_length(destPartDefList) - 1) > MAX_PARTITION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("too many subpartitions for partitioned table"), - errhint("Number of subpartitions can not be more than %d", MAX_PARTITION_NUM))); - } - if (splitSubPart->splitType == LISTSUBPARTITIION) { - srcSubPartOid = SubPartitionNameGetSubPartitionOid(RelationGetRelid(partTableRel), - splitSubPart->src_partition_name, - ShareUpdateExclusiveLock, /* partition lock */ - AccessExclusiveLock, /* subpartition lock */ - true, - false, - NULL, - NULL, - NoLock, - &partOid); - /* check src subpartition exists */ - if (!OidIsValid(srcSubPartOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), - splitSubPart->src_partition_name != NULL - ? errmsg("split subpartition \"%s\" does not exist.", splitSubPart->src_partition_name) - : errmsg("split subpartition does not exist."))); - } - - // check src subpartition is default subpartition - CheckSrcListSubPartitionForSplit(partTableRel, partOid, srcSubPartOid); - // check dest subpartitions name not existing - checkDestListSubPartitionNameForSplit(partTableRel, destPartDefList, srcSubPartOid); - // check new boundry not existing - CheckDestListSubPartitionBoundaryForSplit(partTableRel, partOid, splitSubPart); - - /* check local index 'usable' state */ - if (!checkRelationLocalIndexesUsable(partTableRel)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't split subpartition bacause relation %s has unusable local index", - NameStr(partTableRel->rd_rel->relname)), - errhint("please reindex the unusable index first."))); - } - - // check dest partition tablespace - subpartitionno = -GetCurrentSubPartitionNo(partOid); - Assert(PARTITIONNO_IS_VALID(subpartitionno)); - foreach (cell, destPartDefList) { - ListPartitionDefState *listSubPartDef = (ListPartitionDefState *)lfirst(cell); - subpartitionno++; - listSubPartDef->partitionno = subpartitionno; - CheckPartitionTablespace(listSubPartDef->tablespacename, partTableRel->rd_rel->relowner); - } - - ListPartitionDefState* listPartDef = NULL; - // set the two dest subpartitions boundary - listPartDef = (ListPartitionDefState*)list_nth(destPartDefList, 0); - listPartDef->boundary = splitSubPart->newListSubPartitionBoundry; - - /* - * generate boundary for the second partititon - */ - listPartDef = (ListPartitionDefState*)list_nth(destPartDefList, 1); - Const *boundaryDefault = makeNode(Const); - boundaryDefault->ismaxvalue = true; - boundaryDefault->location = -1; - listPartDef->boundary = list_make1(boundaryDefault); - } else if (splitSubPart->splitType == RANGESUBPARTITIION) { - srcSubPartOid = SubPartitionNameGetSubPartitionOid(RelationGetRelid(partTableRel), - splitSubPart->src_partition_name, - ShareUpdateExclusiveLock, /* partition lock */ - AccessExclusiveLock, /* subpartition lock */ - true, - false, - NULL, - NULL, - NoLock, - &partOid); - /* check src subpartition exists */ - if (!OidIsValid(srcSubPartOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), - splitSubPart->src_partition_name != NULL - ? errmsg("split subpartition \"%s\" does not exist.", splitSubPart->src_partition_name) - : errmsg("split subpartition does not exist."))); - } - // check src subpartitions - ChecksrcRangeSubPartitionNameForSplit(partTableRel, partOid, srcSubPartOid); - // check dest subpartitions name not existing - CheckDestRangeSubPartitionNameForSplit(partTableRel, destPartDefList); - /* check local index 'usable' state */ - if (!checkRelationLocalIndexesUsable(partTableRel)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't split subpartition bacause relation %s has unusable local index", - NameStr(partTableRel->rd_rel->relname)), - errhint("please reindex the unusable index first."))); - } - - // check dest partition tablespace - subpartitionno = -GetCurrentSubPartitionNo(partOid); - Assert(PARTITIONNO_IS_VALID(subpartitionno)); - foreach (cell, destPartDefList) { - RangePartitionDefState *listSubPartDef = (RangePartitionDefState *)lfirst(cell); - subpartitionno++; - listSubPartDef->partitionno = subpartitionno; - CheckPartitionTablespace(listSubPartDef->tablespacename, partTableRel->rd_rel->relowner); - } - Partition part = partitionOpen(partTableRel, partOid, NoLock); - Relation partRel = partitionGetRelation(partTableRel, part); - - // get src partition sequence - int srcSubPartIndex = partOidGetPartSequence(partRel, srcSubPartOid) - 1; - if (srcSubPartIndex < 0) { - Assert(false); - ereport(ERROR, - (errcode(ERRCODE_NO_DATA_FOUND), - errmsg("the subpartition oid(%u) of subpartition name (%s) is not found in partitioned table(%u).", - srcSubPartOid, splitSubPart->src_partition_name ? splitSubPart->src_partition_name : "NULL", - partTableRel->rd_id))); - } - - if (PointerIsValid(splitSubPart->split_point)) { - RangePartitionDefState *rangePartDef = NULL; - - // check split point value - checkSplitPointForSplit(splitSubPart, partRel, srcSubPartIndex); - - Assert(list_length(destPartDefList) == 2); - - // set the two dest partitions boundary - rangePartDef = (RangePartitionDefState *)list_nth(destPartDefList, 0); - rangePartDef->boundary = splitSubPart->split_point; - - /* - * generate boundary for the second partititon - */ - rangePartDef = (RangePartitionDefState *)list_nth(destPartDefList, 1); - rangePartDef->boundary = getRangePartitionBoundaryList(partRel, srcSubPartIndex); - } - - // add dest partitions - FastAddRangeSubPartition(partTableRel, destPartDefList, partOid, &newSubPartOidList); - - releaseDummyRelation(&partRel); - partitionClose(partTableRel, part, NoLock); - } - Partition part = partitionOpen(partTableRel, partOid, NoLock); - Relation partRel = partitionGetRelation(partTableRel, part); - Partition subPart = partitionOpen(partRel, srcSubPartOid, NoLock); - - // creat temp table and swap relfilenode with src partition - tempTableOid = createTempTableForPartition(partTableRel, subPart); - finishPartitionHeapSwap(srcSubPartOid, tempTableOid, false, u_sess->utils_cxt.RecentXmin, GetOldestMultiXactId()); - - CommandCounterIncrement(); - - partitionClose(partRel, subPart, NoLock); - - AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, partOid, srcSubPartOid); - - if (cmd->alterGPI) { - DeleteGPITuplesForSubPartition(RelationGetRelid(partTableRel), partOid, srcSubPartOid); - } - - // drop src partition - fastDropPartition(partRel, srcSubPartOid, "SPLIT SUBPARTITION", InvalidOid, false); - CacheInvalidatePartcache(part); - - releaseDummyRelation(&partRel); - partitionClose(partTableRel, part, NoLock); - - if (splitSubPart->splitType == LISTSUBPARTITIION) { - /* - * add dest subpartitions. For list subpartition We allow the dest subpartition name - * to be the same as the src subpartition name. So we need delete src subpartition - * and then add the dest subpartition name. - */ - FastAddListSubPartition(partTableRel, destPartDefList, partOid, &newSubPartOidList); - } - - UpdateCurrentSubPartitionNo(partOid, -subpartitionno); - - Relation tempTableRel = relation_open(tempTableOid, AccessExclusiveLock); - - // read temp table tuples and insert into partitioned table - readTuplesAndInsert(tempTableRel, partTableRel); - - relation_close(tempTableRel, NoLock); - - // delete temp table - ObjectAddress object; - object.classId = RelationRelationId; - object.objectId = tempTableOid; - object.objectSubId = 0; - performDeletion(&object, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - - // reindex dest partitions - foreach (cell, newSubPartOidList) { - Oid subPartOid = lfirst_oid(cell); - - reindexPartition(RelationGetRelid(partTableRel), subPartOid, REINDEX_REL_SUPPRESS_INDEX_USE, REINDEX_ALL_INDEX); - if (cmd->alterGPI) { - AddGPIForSubPartition(RelationGetRelid(partTableRel), partOid, subPartOid); - } - } - list_free_ext(newSubPartOidList); - - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(partTableRel); - } -} - -// check split point -static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation partTableRel, int srcPartIndex) -{ - RangePartitionMap* partMap = NULL; - ParseState* pstate = NULL; - ListCell* cell = NULL; - Const* partKeyValueArr[RANGE_PARTKEYMAXNUM] = {NULL}; - int i = 0; - int partKeyNum = 0; - int compareSrcPart = 0; - - // get partition key number - partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; - - // check split point length - if (partKeyNum != list_length(splitPart->split_point)) { - if (RelationIsPartitionOfSubPartitionTable(partTableRel)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - (errmsg("Number of boundary items NOT EQUAL to number of partition keys"), - errdetail("There can only be one boundary value for split range subpartitions"), - errcause("N/A"), erraction("Check whether the SQL statements. is correct.")))); - } else { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - (errmsg("number of boundary items NOT EQUAL to number of partition keys"), errdetail("N/A"), - errcause("N/A"), erraction("Check whether the SQL statements. is correct.")))); - } - } - - pstate = make_parsestate(NULL); - splitPart->split_point = transformRangePartitionValueInternal(pstate, splitPart->split_point, true, true); - pfree_ext(pstate); - - List *tmp = splitPart->split_point; - splitPart->split_point = - transformConstIntoTargetType(partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->split_point); - list_free_ext(tmp); - - foreach (cell, splitPart->split_point) { - partKeyValueArr[i++] = (Const*)lfirst(cell); - } - - // compare split point with src partition - compareSrcPart = - comparePartitionKey(partMap, partKeyValueArr, partMap->rangeElements[srcPartIndex].boundary, partKeyNum); - if (compareSrcPart >= 0) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("split point is too high"))); - } - bool lowerOverflow = false; - if (partMap->rangeElements[srcPartIndex].isInterval) { - // compare split point with the lower boundary of src partition - RangeElement& srcPart = partMap->rangeElements[srcPartIndex]; - Interval *interval = partMap->intervalValue; - lowerOverflow = !ValueSatisfyLowBoudary(&partKeyValueArr[0], &srcPart, interval, false); - } else { - // compare split point with the previous partition of src partition - if (srcPartIndex != 0) { - lowerOverflow = comparePartitionKey( - partMap, partKeyValueArr, partMap->rangeElements[srcPartIndex - 1].boundary, partKeyNum) <= 0; - } - } - if (lowerOverflow) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("split point is too low"))); - } -} - -static void checkDestPartitionNameForSplit(Oid partTableOid, List* partDefList) -{ - ListCell* cell = NULL; - - // check dest partitions name with self - checkPartitionName(partDefList); - - // check dest partitions name with existing partitions name - foreach (cell, partDefList) { - RangePartitionDefState* rangePartDef = (RangePartitionDefState*)lfirst(cell); - - if (InvalidOid != GetSysCacheOid3(PARTPARTOID, - NameGetDatum(rangePartDef->partitionName), - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION), - ObjectIdGetDatum(partTableOid))) { - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("resulting partition \"%s\" name conflicts with that of an existing partition", - rangePartDef->partitionName))); - } - } -} - -static List* getDestPartBoundaryList(Relation partTableRel, List* destPartDefList, List** listForFree) -{ - ListCell* cell = NULL; - List* result = NIL; - bool partkeyIsFunc = IsPartKeyFunc(partTableRel, false, false); - foreach (cell, destPartDefList) { - RangePartitionDefState* rangePartDef = (RangePartitionDefState*)lfirst(cell); - List* partKeyValueList = NIL; - ListCell* otherCell = NULL; - Const** partKeyValueArr = (Const**)palloc0(sizeof(Const*) * RANGE_PARTKEYMAXNUM); - int i = 0; - - partKeyValueList = transformConstIntoTargetType(partTableRel->rd_att->attrs, - ((RangePartitionMap*)partTableRel->partMap)->partitionKey, - rangePartDef->boundary, partkeyIsFunc); - - foreach (otherCell, partKeyValueList) { - partKeyValueArr[i++] = (Const*)lfirst(otherCell); - } - - result = lappend(result, partKeyValueArr); - *listForFree = lappend(*listForFree, partKeyValueList); - } - - return result; -} - -static void freeDestPartBoundaryList(List* list1, List* list2) -{ - ListCell* cell = NULL; - - foreach (cell, list1) { - Const** partKeyValues = (Const**)lfirst(cell); - - pfree_ext(partKeyValues); - } - - list_free_ext(list1); - - foreach (cell, list2) { - List* partKeyList = (List*)lfirst(cell); - - list_free_deep(partKeyList); - } - - list_free_ext(list2); -} - -/* - * Description: Generates a partition's name for the temporary partition that may be created during merge partitions. - * - * Parameters: - * @in partTableRel: partition table relation. - * @in sequence: current partition sequence, default 0. - * - * Returns: - * sample partition name. This must be pfree'd by the caller. - */ -static char* GenTemporaryPartitionName(Relation partTableRel, int sequence) -{ - char tmpName[NAMEDATALEN]; - errno_t ret; - - ret = snprintf_s(tmpName, - NAMEDATALEN, - NAMEDATALEN - 1, - "pg_partition_%u_%d_%ld", - partTableRel->rd_id, - sequence, - GetCurrentTimestamp()); - securec_check_ss_c(ret, "\0", "\0"); - - return pstrdup(tmpName); -} - -#ifndef ENABLE_MULTIPLE_NODES -static Oid GetNewPartitionOid(Relation pgPartRel, Relation partTableRel, Node *partDef, - Oid bucketOid, bool *isTimestamptz, StorageType stype, Datum new_reloptions, bool isSubpartition) -{ -#else -static Oid GetNewPartitionOid(Relation pgPartRel, Relation partTableRel, Node *partDef, - Oid bucketOid, bool *isTimestamptz, StorageType stype, bool isSubpartition) -{ - Datum new_reloptions = (Datum)0; -#endif - Oid newPartOid = InvalidOid; - switch (nodeTag(partDef)) { - case T_RangePartitionDefState: - newPartOid = heapAddRangePartition(pgPartRel, - partTableRel->rd_id, - partTableRel->rd_rel->reltablespace, - bucketOid, - (RangePartitionDefState *)partDef, - partTableRel->rd_rel->relowner, - new_reloptions, - isTimestamptz, - stype, - AccessExclusiveLock, - NULL, - isSubpartition); - break; - case T_ListPartitionDefState: - newPartOid = HeapAddListPartition(pgPartRel, - partTableRel->rd_id, - partTableRel->rd_rel->reltablespace, - bucketOid, - (ListPartitionDefState *)partDef, - partTableRel->rd_rel->relowner, - new_reloptions, - isTimestamptz, - stype, - NULL, - isSubpartition); - break; - case T_HashPartitionDefState: - newPartOid = HeapAddHashPartition(pgPartRel, - partTableRel->rd_id, - partTableRel->rd_rel->reltablespace, - bucketOid, - (HashPartitionDefState *)partDef, - partTableRel->rd_rel->relowner, - new_reloptions, - isTimestamptz, - stype, - NULL, - isSubpartition); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("The partitioned table is unsupported for adding new partition when data is exchanged."))); - break; - } - - return newPartOid; -} - -/* - * Description: Add a temporary partition during merge partitions. - * - * Parameters: - * @in partTableRel: partition table relation - * @in partDef: partition description. - * - * Returns: - * temporary partition oid. - */ -static Oid AddTemporaryPartition(Relation partTableRel, Node* partDef) -{ - Relation pgPartRel = NULL; - HeapTuple tuple = NULL; - Oid newPartOid = InvalidOid; - Oid bucketOid = InvalidOid; - bool isnull = false; - Datum rel_reloptions; - Datum new_reloptions; - List* old_reloptions = NIL; - bool isPartitionOfSubPartition = RelationIsPartitionOfSubPartitionTable(partTableRel); - - bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(partTableRel); - bucketOid = RelationGetBucketOid(partTableRel); - pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); - - /* add new partition entry in pg_partition */ - Oid relOid = isPartitionOfSubPartition ? - ObjectIdGetDatum(partTableRel->parentId) : ObjectIdGetDatum(partTableRel->rd_id); - tuple = SearchSysCache1(RELOID, relOid); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - (errmsg("cache lookup failed"), errdetail("cache lookup failed for relation %u", relOid), - errcause("The oid of the target relation is invalid."), - erraction("Check whether the target relation is correct.")))); - } - rel_reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); - - old_reloptions = untransformRelOptions(rel_reloptions); - RemoveRedisRelOptionsFromList(&old_reloptions); - new_reloptions = transformRelOptions((Datum)0, old_reloptions, NULL, NULL, false, false); - ReleaseSysCache(tuple); - - if (old_reloptions != NIL) { - list_free_ext(old_reloptions); - } - - /* Temporary tables do not use segment-page */ -#ifndef ENABLE_MULTIPLE_NODES - newPartOid = GetNewPartitionOid(pgPartRel, partTableRel, partDef, bucketOid, - isTimestamptz, RelationGetStorageType(partTableRel), new_reloptions, isPartitionOfSubPartition); -#else - newPartOid = GetNewPartitionOid(pgPartRel, partTableRel, partDef, bucketOid, isTimestamptz, - RelationGetStorageType(partTableRel), isPartitionOfSubPartition); -#endif - - // We must bump the command counter to make the newly-created - // partition tuple visible for opening. - CommandCounterIncrement(); - - if (isPartitionOfSubPartition) { - Relation rel = heap_open(partTableRel->parentId, AccessShareLock); - addIndexForPartition(rel, newPartOid); - heap_close(rel, NoLock); - } else { - addIndexForPartition(partTableRel, newPartOid); - } - - addToastTableForNewPartition(partTableRel, newPartOid, isPartitionOfSubPartition); - - // invalidate relation - CacheInvalidateRelcache(partTableRel); - - pfree_ext(isTimestamptz); - relation_close(pgPartRel, NoLock); - - return newPartOid; -} - -/* - * Description: Set one partitioned relation's reloptions wait_clean_gpi and one partition's reloptions wait_clean_gpi. - * - * Parameters: - * @in alterGPI: check whether is global partition index alter statement. - * @in partTableRel: partition table relation. - * @in partOid: partition oid. - */ -static void AlterPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel, Oid targetPartOid) -{ - if (!alterGPI) { - return; - } - - Partition targetPart = NULL; - Partition part = NULL; - Relation partRel = NULL; - Oid parentOid = partid_get_parentid(targetPartOid); - if (parentOid != partTableRel->rd_id) { - part = partitionOpen(partTableRel, parentOid, AccessShareLock); - partRel = partitionGetRelation(partTableRel, part); - targetPart = partitionOpen(partRel, targetPartOid, AccessExclusiveLock); - } else { - targetPart = partitionOpen(partTableRel, targetPartOid, AccessExclusiveLock); - } - - if (!PartitionEnableWaitCleanGpi(targetPart)) { - /* partition create not set wait_clean_gpi, must use update, and we ensure no concurrency */ - PartitionSetWaitCleanGpi(targetPartOid, true, false); - /* Partitioned create set wait_clean_gpi=n, and we want save it, so just use inplace */ - PartitionedSetWaitCleanGpi(RelationGetRelationName(partTableRel), RelationGetRelid(partTableRel), true, true); - } - - if (partRel != NULL) { - partitionClose(partRel, targetPart, NoLock); - releaseDummyRelation(&partRel); - partitionClose(partTableRel, part, NoLock); - } else { - partitionClose(partTableRel, targetPart, NoLock); - } -} - -/* - * Description: Set one subpartitioned relation's reloptions wait_clean_gpi and - * one subpartition's reloptions wait_clean_gpi. - * - * Parameters: - * @in alterGPI: check whether is global partition index alter statement. - * @in partTableRel: partition table relation. - * @in subpartOid: subpartition oid. - */ -static void AlterSubPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel, Oid partOid, Oid subPartOid) -{ - if (!alterGPI) { - return; - } - - Partition part = partitionOpen(partTableRel, partOid, AccessShareLock); - Relation partRel = partitionGetRelation(partTableRel, part); - Partition subPart = partitionOpen(partRel, subPartOid, AccessExclusiveLock); - if (!PartitionEnableWaitCleanGpi(subPart)) { - /* partition create not set wait_clean_gpi, must use update, and we ensure no concurrency */ - PartitionSetWaitCleanGpi(subPartOid, true, false); - /* Partitioned create set wait_clean_gpi=n, and we want save it, so just use inplace */ - PartitionedSetWaitCleanGpi(RelationGetRelationName(partTableRel), RelationGetRelid(partTableRel), true, true); - } - partitionClose(partRel, subPart, NoLock); - releaseDummyRelation(&partRel); - partitionClose(partTableRel, part, NoLock); -} - - -/* - * Description: Add a temporary range partition during merge/exchange partitions. - * - * Parameters: - * @in cmd: subcommand of an ALTER TABLE. - * @in partTableRel: partition table relation. - * @in sequence: current partition sequence. - * @in renameTargetPart: if need to rename target partition's name. - * - * Returns: - * temporary partition oid. - */ -static Oid AddTemporaryRangePartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, - int sequence, bool* renameTargetPart) -{ - Oid newPartOid = InvalidOid; - RangePartitionDefState* partDef = NULL; - - partDef = makeNode(RangePartitionDefState); - if (*renameTargetPart) { - /* use target partition's name to add a new partition if it not exists. */ - partDef->partitionName = pstrdup(cmd->name); - *renameTargetPart = false; - } else { - /* use temporary partition's name to add a new partition if target partition's name already exists. */ - partDef->partitionName = GenTemporaryPartitionName(partTableRel, sequence); - *renameTargetPart = true; - } - partDef->boundary = getRangePartitionBoundaryList(partTableRel, sequence); - partDef->tablespacename = pstrdup(cmd->target_partition_tablespace); - partDef->partitionno = GetPartitionnoFromSequence(partTableRel->partMap, sequence); - partDef->curStartVal = NULL; - partDef->partitionInitName = NULL; - newPartOid = AddTemporaryPartition(partTableRel, (Node*)partDef); - pfree_ext(partDef->partitionName); - pfree_ext(partDef->tablespacename); - pfree_ext(partDef); - - return newPartOid; -} - -/* - * Description: Add a temporary list partition during exchange partitions. - * - * Parameters: - * @in cmd: subcommand of an ALTER TABLE. - * @in partTableRel: partition table relation. - * @in sequence: current partition sequence. - * @in renameTargetPart: if need to rename target partition's name. - * - * Returns: - * temporary partition oid. - */ -static Oid AddTemporaryListPartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, - int sequence, bool* renameTargetPart) -{ - Oid newPartOid = InvalidOid; - ListPartitionDefState* partDef = NULL; - - partDef = makeNode(ListPartitionDefState); - if (*renameTargetPart) { - /* use target partition's name to add a new partition if it not exists. */ - partDef->partitionName = pstrdup(cmd->name); - *renameTargetPart = false; - } else { - /* use temporary partition's name to add a new partition if target partition's name already exists. */ - partDef->partitionName = GenTemporaryPartitionName(partTableRel, sequence); - *renameTargetPart = true; - } - partDef->boundary = getListPartitionBoundaryList(partTableRel, sequence); - partDef->tablespacename = pstrdup(cmd->target_partition_tablespace); - partDef->partitionno = GetPartitionnoFromSequence(partTableRel->partMap, sequence); - newPartOid = AddTemporaryPartition(partTableRel, (Node*)partDef); - pfree_ext(partDef->partitionName); - pfree_ext(partDef->tablespacename); - pfree_ext(partDef); - - return newPartOid; -} - -/* - * Description: Add a temporary hash partition during exchange partitions. - * - * Parameters: - * @in cmd: subcommand of an ALTER TABLE. - * @in partTableRel: partition table relation. - * @in sequence: current partition sequence. - * @in renameTargetPart: if need to rename target partition's name. - * - * Returns: - * temporary partition oid. - */ -static Oid AddTemporaryHashPartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, - int sequence, bool* renameTargetPart) -{ - Oid newPartOid = InvalidOid; - HashPartitionDefState* partDef = NULL; - - partDef = makeNode(HashPartitionDefState); - if (*renameTargetPart) { - /* use target partition's name to add a new partition if it not exists. */ - partDef->partitionName = pstrdup(cmd->name); - *renameTargetPart = false; - } else { - /* use temporary partition's name to add a new partition if target partition's name already exists. */ - partDef->partitionName = GenTemporaryPartitionName(partTableRel, sequence); - *renameTargetPart = true; - } - partDef->boundary = getHashPartitionBoundaryList(partTableRel, sequence); - partDef->tablespacename = pstrdup(cmd->target_partition_tablespace); - partDef->partitionno = GetPartitionnoFromSequence(partTableRel->partMap, sequence); - newPartOid = AddTemporaryPartition(partTableRel, (Node*)partDef); - pfree_ext(partDef->partitionName); - pfree_ext(partDef->tablespacename); - pfree_ext(partDef); - - return newPartOid; -} - -/* - * Description: Add a temporary partition during exchange/truncate partition. - * - * Parameters: - * @in cmd: subcommand of an ALTER TABLE. - * @in partTableRel: partition table relation. - * @in srcPartOid: current partition oid. - */ -static Oid AddTemporaryPartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, - Oid srcPartOid, bool* renameTargetPart) -{ - Oid destPartOid = InvalidOid; - int partSeq = getPartitionElementsIndexByOid(partTableRel, srcPartOid); - - switch (partTableRel->partMap->type) { - case PART_TYPE_LIST: { - destPartOid = AddTemporaryListPartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart); - break; - } - case PART_TYPE_HASH: { - destPartOid = AddTemporaryHashPartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart); - break; - } - case PART_TYPE_RANGE: - case PART_TYPE_INTERVAL: { - RangePartitionMap *partmap = (RangePartitionMap *)partTableRel->partMap; - bool isinterval = partmap->rangeElements[partSeq].isInterval; - destPartOid = AddTemporaryRangePartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart); - - if (!isinterval) { - break; - } - - /* if srcPartOid is a interval partition oid, we need transform destPart to interval */ - Relation pg_partition = relation_open(PartitionRelationId, RowExclusiveLock); - HeapTuple parttup = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(destPartOid)); - Assert(HeapTupleIsValid(parttup)); - - Form_pg_partition partform = (Form_pg_partition)GETSTRUCT(parttup); - partform->partstrategy = PART_STRATEGY_INTERVAL; - simple_heap_update(pg_partition, &parttup->t_self, parttup); - CatalogUpdateIndexes(pg_partition, parttup); - - tableam_tops_free_tuple(parttup); - relation_close(pg_partition, RowExclusiveLock); - CommandCounterIncrement(); - break; - } - default: - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Only the List/Hash/Range partitioned table is supported for update global index."))); - break; - } - return destPartOid; -} - -/* - * Description: Add a temporary partition during exchange partition. - * - * Parameters: - * @in cmd: subcommand of an ALTER TABLE. - * @in partTableRel: partition table relation. - * @in srcPartOid: current partition oid. - */ -static void ExchangePartitionWithGPI(const AlterTableCmd* cmd, Relation partTableRel, Oid srcPartOid, - TransactionId frozenXid, MultiXactId multiXid) -{ - // Return if no global index exists - List* gpiList = RelationGetSpecificKindIndexList(partTableRel, true); - if (list_length(gpiList) == 0) { - return; - } - - List* indexList = NIL; - ListCell* cell = NULL; - Partition srcPart = NULL; - bool renameTargetPart = false; - char* destPartitionName = NULL; - - Oid destPartOid = AddTemporaryPartitionForAlterPartitions(cmd, partTableRel, srcPartOid, &renameTargetPart); - int partitionno = GetCurrentPartitionNo(srcPartOid); - PARTITIONNO_VALID_ASSERT(partitionno); - UpdateCurrentPartitionNo(destPartOid, partitionno, false); - - srcPart = partitionOpen(partTableRel, srcPartOid, AccessExclusiveLock); - destPartitionName = pstrdup(PartitionGetPartitionName(srcPart)); - partitionClose(partTableRel, srcPart, NoLock); - - indexList = RelationGetSpecificKindIndexList(partTableRel, false); - foreach (cell, indexList) { - Oid indexOid = lfirst_oid(cell); - Relation indexRel = relation_open(indexOid, RowExclusiveLock); - Oid indexSrcPartOid = getPartitionIndexOid(indexRel->rd_id, srcPartOid); - Oid indexDestPartOid = getPartitionIndexOid(indexRel->rd_id, destPartOid); - Partition dstPart = NULL; - - /* before swap refilenode, promote lock on index partition from ExclusiveLock to AccessExclusiveLock */ - dstPart = partitionOpenWithRetry(indexRel, indexDestPartOid, AccessExclusiveLock, "EXCHANGE PARTITIONS"); - if (!dstPart) { - ereport(ERROR, - (errcode(ERRCODE_LOCK_WAIT_TIMEOUT), - errmsg( - "could not acquire AccessExclusiveLock on dest index partition \"%s\", " - "EXCHANGE PARTITIONS failed", - getPartitionName(indexDestPartOid, false)))); - } - - /* swap relfilenode between temp index relation and dest index partition */ - finishPartitionHeapSwap(indexDestPartOid, indexSrcPartOid, false, frozenXid, multiXid, true); - partitionClose(indexRel, dstPart, NoLock); - relation_close(indexRel, RowExclusiveLock); - } - - // Swap relfilenode of table and toast table - CommandCounterIncrement(); - finishPartitionHeapSwap(destPartOid, srcPartOid, false, frozenXid, multiXid, true); - - CommandCounterIncrement(); - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, srcPartOid); - fastDropPartition(partTableRel, srcPartOid, "EXCHANGE PARTITIONS"); - - if (renameTargetPart) { - CommandCounterIncrement(); - renamePartitionInternal(partTableRel->rd_id, destPartOid, destPartitionName); - } - - list_free_ext(indexList); - pfree_ext(destPartitionName); - - AddGPIForPartition(RelationGetRelid(partTableRel), destPartOid); -} - -static void fastAddPartition(Relation partTableRel, List* destPartDefList, List** newPartOidList) -{ - Relation pgPartRel = NULL; - Oid newPartOid = InvalidOid; - ListCell* cell = NULL; - Oid bucketOid; - - bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(partTableRel); - - bucketOid = RelationGetBucketOid(partTableRel); - - pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); - -#ifndef ENABLE_MULTIPLE_NODES - bool isNull = false; - HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(partTableRel->rd_id)); - Datum relOptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isNull); - List* oldRelOptions = untransformRelOptions(relOptions); - Datum newRelOptions = transformRelOptions((Datum)0, oldRelOptions, NULL, NULL, false, false); - ReleaseSysCache(tuple); - list_free_ext(oldRelOptions); -#endif - - foreach (cell, destPartDefList) { - RangePartitionDefState* partDef = (RangePartitionDefState*)lfirst(cell); - - newPartOid = heapAddRangePartition(pgPartRel, - partTableRel->rd_id, - partTableRel->rd_rel->reltablespace, - bucketOid, - partDef, - partTableRel->rd_rel->relowner, -#ifndef ENABLE_MULTIPLE_NODES - (Datum)newRelOptions, -#else - (Datum)0, -#endif - isTimestamptz, - RelationGetStorageType(partTableRel), - AccessExclusiveLock); - - *newPartOidList = lappend_oid(*newPartOidList, newPartOid); - - // We must bump the command counter to make the newly-created - // partition tuple visible for opening. - CommandCounterIncrement(); - - addIndexForPartition(partTableRel, newPartOid); - - addToastTableForNewPartition(partTableRel, newPartOid); - - ereport(LOG, - (errmsg("Partition %s(%d) has been fast Added.", partDef->partitionName, newPartOid))); - - // invalidate relation - CacheInvalidateRelcache(partTableRel); - } - - pfree_ext(isTimestamptz); - relation_close(pgPartRel, NoLock); -} - -static void FastAddListSubPartition(Relation rel, List* destPartDefList, Oid partOid, List** newPartOidList) -{ - Relation pgPartRel = NULL; - Oid newSubPartOid = InvalidOid; - ListCell* cell = NULL; - Oid bucketOid; - - Partition part = partitionOpen(rel, partOid, ShareUpdateExclusiveLock); - Relation partRel = partitionGetRelation(rel, part); - - bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(partRel, true); - - bucketOid = RelationGetBucketOid(partRel); - - pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); - - foreach (cell, destPartDefList) { - ListPartitionDefState* subPartDef = (ListPartitionDefState*)lfirst(cell); - - newSubPartOid = HeapAddListPartition(pgPartRel, - partRel->rd_id, - partRel->rd_rel->reltablespace, - bucketOid, - subPartDef, - partRel->rd_rel->relowner, - (Datum)0, - isTimestamptz, - RelationGetStorageType(partRel), - NULL, - true); - - *newPartOidList = lappend_oid(*newPartOidList, newSubPartOid); - - // We must bump the command counter to make the newly-created - // partition tuple visible for opening. - CommandCounterIncrement(); - - addIndexForPartition(rel, newSubPartOid); - - addToastTableForNewPartition(partRel, newSubPartOid, true); - - // invalidate relation - CacheInvalidateRelcache(partRel); - CacheInvalidatePartcache(part); - } - - pfree_ext(isTimestamptz); - relation_close(pgPartRel, NoLock); - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); -} - -static void FastAddRangeSubPartition(Relation rel, List* destPartDefList, Oid partOid, List** newPartOidList) -{ - Relation pgPartRel = NULL; - Oid newSubPartOid = InvalidOid; - ListCell* cell = NULL; - Oid bucketOid; - - Partition part = partitionOpen(rel, partOid, ShareUpdateExclusiveLock); - Relation partRel = partitionGetRelation(rel, part); - - bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(partRel, true); - - bucketOid = RelationGetBucketOid(partRel); - - pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); - - foreach (cell, destPartDefList) { - RangePartitionDefState* subPartDef = (RangePartitionDefState*)lfirst(cell); - - newSubPartOid = heapAddRangePartition(pgPartRel, - partRel->rd_id, - partRel->rd_rel->reltablespace, - bucketOid, - subPartDef, - partRel->rd_rel->relowner, - (Datum)0, - isTimestamptz, - RelationGetStorageType(partRel), - AccessExclusiveLock, - NULL, - true); - - *newPartOidList = lappend_oid(*newPartOidList, newSubPartOid); - - // We must bump the command counter to make the newly-created - // partition tuple visible for opening. - CommandCounterIncrement(); - - addIndexForPartition(rel, newSubPartOid); - - addToastTableForNewPartition(partRel, newSubPartOid, true); - - // invalidate relation - CacheInvalidateRelcache(partRel); - CacheInvalidatePartcache(part); - } - - pfree_ext(isTimestamptz); - relation_close(pgPartRel, NoLock); - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); -} - -static Oid createTempTableForPartition(Relation partTableRel, Partition part) -{ - TupleDesc partTableHeapDesc; - Datum partTableRelOptions = 0; - HeapTuple tuple = NULL; - bool isNull = false; - Oid tempTableOid = InvalidOid; - - partTableHeapDesc = RelationGetDescr(partTableRel); - - /* get reloptions */ - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(RelationGetRelid(partTableRel))); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for relation %u", RelationGetRelid(partTableRel)))); - } - partTableRelOptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isNull); - if (isNull) { - partTableRelOptions = (Datum)0; - } - - // create temp table - tempTableOid = makePartitionNewHeap(partTableRel, - partTableHeapDesc, - partTableRelOptions, - part->pd_id, - part->pd_part->reltoastrelid, - part->pd_part->reltablespace); - - ReleaseSysCache(tuple); - - return tempTableOid; -} - -static void readTuplesAndInsertInternal(Relation tempTableRel, Relation partTableRel, int2 bucketId) -{ - TableScanDesc scan = NULL; - Tuple tuple = NULL; - HTAB *partRelHTAB = NULL; - bool relisustore = RelationIsUstoreFormat(tempTableRel); - - scan = tableam_scan_begin(tempTableRel, SnapshotNow, 0, NULL); - - while ((tuple = tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - Oid targetPartOid = InvalidOid; - int partitionno = INVALID_PARTITION_NO; - Oid targetSubPartOid = InvalidOid; - int subpartitionno = INVALID_PARTITION_NO; - Relation partRel = NULL; - Partition part = NULL; - Relation subPartRel = NULL; - Partition subPart = NULL; - Tuple copyTuple = NULL; - - /* tableam_tops_copy_tuple is not ready so we add UStore hack path */ - copyTuple = tableam_tops_copy_tuple(tuple); - targetPartOid = heapTupleGetPartitionId(partTableRel, (void *)tuple, &partitionno, true); - searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, partitionno, - partRel, part, RowExclusiveLock); - if (RelationIsSubPartitioned(partTableRel)) { - targetSubPartOid = heapTupleGetPartitionId(partRel, (void *)tuple, &subpartitionno, true); - searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partRel, targetSubPartOid, - subpartitionno, subPartRel, subPart, RowExclusiveLock); - partRel = subPartRel; - } - if (bucketId != InvalidBktId) { - searchHBucketFakeRelation(partRelHTAB, CurrentMemoryContext, partRel, bucketId, partRel); - } - - if (RelationIsSubPartitioned(partTableRel)) { - AlterSubPartitionedSetWaitCleanGPI(true, partTableRel, targetPartOid, targetSubPartOid); - } else { - AlterPartitionedSetWaitCleanGPI(true, partTableRel, targetPartOid); - } - - if (relisustore) { - Oid reloid = RelationGetRelid(partRel); - if (reloid != InvalidOid) { - ((UHeapTuple)copyTuple)->table_oid = reloid; - ((UHeapTuple)copyTuple)->xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; - } - } - tableam_tuple_insert(partRel, copyTuple, GetCurrentCommandId(true), 0, NULL); - HeapTuple tup = (HeapTuple)copyTuple; - tableam_tops_free_tuple(tup); - } - - tableam_scan_end(scan); - - if (PointerIsValid(partRelHTAB)) { - FakeRelationCacheDestroy(partRelHTAB); - } -} - -static void readTuplesAndInsert(Relation tempTableRel, Relation partTableRel) -{ - if (RELATION_CREATE_BUCKET(tempTableRel)) { - Relation bucketRel = NULL; - oidvector* bucketlist = searchHashBucketByOid(tempTableRel->rd_bucketoid); - - for (int i = 0; i < bucketlist->dim1; i++) { - bucketRel = bucketGetRelation(tempTableRel, NULL, bucketlist->values[i]); - readTuplesAndInsertInternal(bucketRel, partTableRel, bucketlist->values[i]); - bucketCloseRelation(bucketRel); - } - } else { - readTuplesAndInsertInternal(tempTableRel, partTableRel, InvalidBktId); - } -} - -List* transformIntoTargetType(FormData_pg_attribute* attrs, int2 keypos, List* boundary) -{ - Node* nodeBoundaryItem = NULL; - Const* constBoundaryItem = NULL; - Const* targetConstBoundaryItem = NULL; - ListCell* cell = NULL; - List* newBoundaryList = NULL; - - foreach (cell, boundary) { - nodeBoundaryItem = (Node*)lfirst(cell); - Assert(nodeTag(nodeBoundaryItem) == T_Const); - constBoundaryItem = (Const*)nodeBoundaryItem; - if (constBoundaryItem->ismaxvalue) { - targetConstBoundaryItem = constBoundaryItem; - } else { - targetConstBoundaryItem = (Const*)GetTargetValue(&attrs[keypos - 1], constBoundaryItem, false); - if (!PointerIsValid(targetConstBoundaryItem)) { - list_free_ext(newBoundaryList); - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("partition key value must be const or const-evaluable expression"))); - } - if (!OidIsValid(targetConstBoundaryItem->constcollid) && OidIsValid(attrs[keypos - 1].attcollation)) { - targetConstBoundaryItem->constcollid = attrs[keypos - 1].attcollation; - } - } - Assert(nodeTag(targetConstBoundaryItem) == T_Const); - newBoundaryList = lappend(newBoundaryList, targetConstBoundaryItem); - } - - return newBoundaryList; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -List* transformConstIntoTargetType(FormData_pg_attribute* attrs, int2vector* partitionKey, List* boundary, bool partkeyIsFunc) -{ - int counter = 0; - int2 partKeyPos = 0; - Node* nodeBoundaryItem = NULL; - Const* constBoundaryItem = NULL; - Const* targetConstBoundaryItem = NULL; - ListCell* cell = NULL; - List* newBoundaryList = NULL; - - if (partitionKey->dim1 != boundary->length) { - ereport(ERROR, - - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("number of boundary items NOT EQUAL to number of partition keys"))); - } - - foreach (cell, boundary) { - nodeBoundaryItem = (Node*)lfirst(cell); - Assert(nodeTag(nodeBoundaryItem) == T_Const); - constBoundaryItem = (Const*)nodeBoundaryItem; - - partKeyPos = partitionKey->values[counter++]; - - if (constBoundaryItem->ismaxvalue) { - targetConstBoundaryItem = constBoundaryItem; - } else { - targetConstBoundaryItem = (Const*)GetTargetValue(&attrs[partKeyPos - 1], constBoundaryItem, false, partkeyIsFunc); - if (!PointerIsValid(targetConstBoundaryItem)) { - list_free_ext(newBoundaryList); - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("partition key value must be const or const-evaluable expression"))); - } - if (!OidIsValid(targetConstBoundaryItem->constcollid) && OidIsValid(attrs[partKeyPos - 1].attcollation)) { - targetConstBoundaryItem->constcollid = attrs[partKeyPos - 1].attcollation; - } - } - Assert(nodeTag(targetConstBoundaryItem) == T_Const); - newBoundaryList = lappend(newBoundaryList, targetConstBoundaryItem); - } - - return newBoundaryList; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : create a toast for a new partition - * Description : - * Input : relation: partitined table 's relation - * : - * Output : - * Return : - * Notes : - */ -void addToastTableForNewPartition(Relation relation, Oid newPartId, bool isForSubpartition) -{ - Oid firstPartitionId = InvalidOid; - Oid firstPartitionToastId = InvalidOid; - Partition firstPartition = NULL; - Datum reloptions = (Datum)0; - bool isnull = false; - HeapTuple reltuple = NULL; - - /* create toast table */ - firstPartitionId = ((RangePartitionMap*)relation->partMap)->rangeElements[0].partitionOid; - firstPartition = partitionOpen(relation, firstPartitionId, NoLock); - firstPartitionToastId = firstPartition->pd_part->reltoastrelid; - - if (OidIsValid(firstPartitionToastId)) { - reltuple = SearchSysCache1(RELOID, ObjectIdGetDatum(firstPartitionToastId)); - if (!PointerIsValid(reltuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for toast table: %u", firstPartitionToastId))); - } - reloptions = SysCacheGetAttr(RELOID, reltuple, Anum_pg_class_reloptions, &isnull); - - if (isnull) { - reloptions = (Datum)0; - } - - if (isForSubpartition) { - (void)CreateToastTableForSubPartition(relation, newPartId, reloptions, AccessExclusiveLock); - } else { - (void)createToastTableForPartition(relation->rd_id, newPartId, reloptions, AccessExclusiveLock); - } - - if (PointerIsValid(reltuple)) { - ReleaseSysCache(reltuple); - } - - /* Make the changes visible */ - CommandCounterIncrement(); - } - partitionClose(relation, firstPartition, NoLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : create a cuDesc table for a new partition - * Description : - * Input : relation: partitined table 's relation - * : - * Output : - * Return : - * Notes : - */ -static void addCudescTableForNewPartition(Relation relation, Oid newPartId) -{ - Oid firstPartitionId = InvalidOid; - Oid firstPartitionCudescTableId = InvalidOid; - Partition firstPartition = NULL; - Datum reloptions = (Datum)0; - bool isnull = false; - HeapTuple reltuple = NULL; - - /* create toast table */ - firstPartitionId = ((RangePartitionMap*)relation->partMap)->rangeElements[0].partitionOid; - firstPartition = partitionOpen(relation, firstPartitionId, NoLock); - firstPartitionCudescTableId = firstPartition->pd_part->relcudescrelid; - - if (OidIsValid(firstPartitionCudescTableId)) { - reltuple = SearchSysCache1(RELOID, ObjectIdGetDatum(firstPartitionCudescTableId)); - if (!PointerIsValid(reltuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for cuDesc table: %u", firstPartitionCudescTableId))); - } - reloptions = SysCacheGetAttr(RELOID, reltuple, Anum_pg_class_reloptions, &isnull); - - if (isnull) { - reloptions = (Datum)0; - } - - (void)createCUDescTableForPartition(relation->rd_id, newPartId, reloptions); - if (PointerIsValid(reltuple)) { - ReleaseSysCache(reltuple); - } - - /* Make the changes visible */ - CommandCounterIncrement(); - } - partitionClose(relation, firstPartition, NoLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : create a delta table for a new partition - * Description : - * Input : relation: partitined table 's relation - * : - * Output : - * Return : - * Notes : - */ -static void addDeltaTableForNewPartition(Relation relation, Oid newPartId) -{ - Oid firstPartitionId = InvalidOid; - Oid firstPartitionDeltaTableId = InvalidOid; - Partition firstPartition = NULL; - Datum reloptions = (Datum)0; - bool isnull = false; - HeapTuple reltuple = NULL; - - /* create toast table */ - firstPartitionId = ((RangePartitionMap*)relation->partMap)->rangeElements[0].partitionOid; - firstPartition = partitionOpen(relation, firstPartitionId, NoLock); - firstPartitionDeltaTableId = firstPartition->pd_part->reldeltarelid; - - if (OidIsValid(firstPartitionDeltaTableId)) { - reltuple = SearchSysCache1(RELOID, ObjectIdGetDatum(firstPartitionDeltaTableId)); - if (!PointerIsValid(reltuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for delta table: %u", firstPartitionDeltaTableId))); - } - reloptions = SysCacheGetAttr(RELOID, reltuple, Anum_pg_class_reloptions, &isnull); - - if (isnull) { - reloptions = (Datum)0; - } - - (void)createDeltaTableForPartition(relation->rd_id, newPartId, reloptions, NULL); - if (PointerIsValid(reltuple)) { - ReleaseSysCache(reltuple); - } - - /* Make the changes visible */ - CommandCounterIncrement(); - } - partitionClose(relation, firstPartition, NoLock); -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : create a partiton for a special SN(sequence number) - * Description : - * Input : relation: partitined table 's relation - * : seqnum: the sequnce number for interval partition to be created - * Output : - * Return : the oid of the interval partition to be created - * Notes : - */ -Oid addPartitionBySN(Relation relation, int seqnum) -{ - return InvalidOid; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : get boundary for table partition - * Description : - * Notes : - */ -Datum caculateBoundary(Datum transpoint, Oid attrtypid, Datum intervalue, Oid intertypid, int seqnum) -{ - return (Datum)0; -} - -#ifdef PGXC -/* - * IsTempTable - * - * Check if given table Oid is temporary. - */ -bool IsTempTable(Oid relid) -{ - HeapTuple tuple = NULL; - bool res = false; - - // if oid is invalid, it's not a table, and not a temp table. - if (InvalidOid == relid) - return false; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - return false; - } - Form_pg_class classForm = (Form_pg_class)GETSTRUCT(tuple); - res = (classForm->relpersistence == RELPERSISTENCE_TEMP); - ReleaseSysCache(tuple); - - return res; -} - -bool IsGlobalTempTable(Oid relid) -{ - HeapTuple tuple = NULL; - bool res = false; - - // if oid is invalid, it's not a table, and not a temp table. - if (InvalidOid == relid) - return false; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - return false; - } - Form_pg_class classForm = (Form_pg_class)GETSTRUCT(tuple); - res = (classForm->relpersistence == RELPERSISTENCE_GLOBAL_TEMP); - ReleaseSysCache(tuple); - - return res; -} - -bool IsGlobalTempTableParallelTrunc() -{ -#ifndef ENABLE_MULTIPLE_NODES - if (u_sess->attr.attr_sql.enable_gtt_concurrent_truncate) { - return true; - } -#endif - - return false; -} - -/* - * IsUnloggedTable - * - * Check if given table Oid is unlogged. - */ -bool IsUnloggedTable(Oid relid) -{ - HeapTuple tuple = NULL; - bool res = false; - - // if oid is invalid, it's not a table, and not a unlogged table. - if (InvalidOid == relid) - return false; - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tuple)) { - return false; - } - Form_pg_class classForm = (Form_pg_class)GETSTRUCT(tuple); - res = (classForm->relpersistence == RELPERSISTENCE_UNLOGGED); - ReleaseSysCache(tuple); - - return res; -} - -/* - * IsRelaionView - * - * Check if given object Oid is view. - */ -bool IsRelaionView(Oid relid) -{ - Relation rel; - bool res = false; - /* - * Is it correct to open without locks? - * we just check if this object is view. - */ - rel = relation_open(relid, NoLock); - res = (rel->rd_rel->relkind == RELKIND_VIEW || rel->rd_rel->relkind == RELKIND_CONTQUERY); - relation_close(rel, NoLock); - return res; -} - -/* - * IsIndexUsingTemp - * - * Check if given index relation uses temporary tables. - */ -bool IsIndexUsingTempTable(Oid relid) -{ - bool res = false; - HeapTuple tuple; - Oid parent_id = InvalidOid; - - tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relid)); - if (HeapTupleIsValid(tuple)) { - Form_pg_index index = (Form_pg_index)GETSTRUCT(tuple); - parent_id = index->indrelid; - - /* Release system cache BEFORE looking at the parent table */ - ReleaseSysCache(tuple); - - res = IsTempTable(parent_id); - } else - res = false; /* Default case */ - - return res; -} - -/* - * IsOnCommitActions - * - * Check if there are any on-commit actions activated. - */ -bool IsOnCommitActions(void) -{ - return list_length(u_sess->cmd_cxt.on_commits) > 0; -} - -/* - * DropTableThrowErrorExternal - * - * Error interface for DROP when looking for execution node type. - */ -void DropTableThrowErrorExternal(RangeVar* relation, ObjectType removeType, bool missing_ok) -{ - char relkind; - - /* Determine required relkind */ - switch (removeType) { - case OBJECT_TABLE: - relkind = RELKIND_RELATION; - break; - - case OBJECT_INDEX: - relkind = RELKIND_INDEX; - break; - - case OBJECT_SEQUENCE: - relkind = RELKIND_SEQUENCE; - break; - - case OBJECT_LARGE_SEQUENCE: - relkind = RELKIND_LARGE_SEQUENCE; - break; - - case OBJECT_VIEW: - relkind = RELKIND_VIEW; - break; - - case OBJECT_MATVIEW: - relkind = RELKIND_MATVIEW; - break; - - case OBJECT_CONTQUERY: - relkind = RELKIND_CONTQUERY; - break; - - case OBJECT_FOREIGN_TABLE: - relkind = RELKIND_FOREIGN_TABLE; - break; - - case OBJECT_STREAM: - relkind = RELKIND_STREAM; - break; - - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized drop object type: %d", (int)removeType))); - - relkind = 0; /* keep compiler quiet */ - } break; - } - - DropErrorMsgNonExistent(relation->relname, relkind, missing_ok); -} -#endif - -typedef struct CompressMetaInfo { - const char* id; - const int8 type; -} CompressMetaInfo; - -static const CompressMetaInfo s_CmprsMetaInfo[] = { - {"CMPRS_NOT_SUPPORT", REL_CMPRS_NOT_SUPPORT}, - - /* IMPORTANT: add compression type after this line */ - {"NOCOMPRESS", REL_CMPRS_PAGE_PLAIN}, - {"COMPRESS", REL_CMPRS_FIELDS_EXTRACT}, - - /* IMPORTANT: add compression type before this line */ - {"CMPRS_MAX", REL_CMPRS_MAX_TYPE}, -}; - -#define GET_FIRST_CMPRS_METAITEM() ((CompressMetaInfo*)(s_CmprsMetaInfo + 1)) -#define MOVE_NEXT_CMPRS_METAITEM(item) ((item)++) -#define LAST_CMPRS_METAITEM(item) ((item)->type == REL_CMPRS_MAX_TYPE) - -static int8 getCompressType(const char* id) -{ - const CompressMetaInfo* metaItem = NULL; - - metaItem = GET_FIRST_CMPRS_METAITEM(); - while (!LAST_CMPRS_METAITEM(metaItem)) { - if (0 == pg_strcasecmp(metaItem->id, id)) { - break; - } - - MOVE_NEXT_CMPRS_METAITEM(metaItem); - } - - return metaItem->type; -} - -static List* ATExecReplaceRelOptionListCell(List* options, char* keyName, char* newValue) -{ - ListCell* opt = NULL; - ListCell* prev = NULL; - bool found = false; - - foreach (opt, options) { - DefElem* optDef = (DefElem*)lfirst(opt); - if (pg_strcasecmp(optDef->defname, keyName) == 0) { - found = true; - break; - } - prev = opt; - } - - if (found) { - /* first delete this list cell. */ - options = list_delete_cell(options, opt, prev); - - /* insert new value */ - DefElem* newElem = makeDefElem(keyName, (Node*)makeString(newValue)); - options = lappend(options, newElem); - } - - return options; -} - -/* - * ATDisableROTCompress is short for: Alter table disable row-oriented-table compress - */ -static void ATDisableROTCompress(const Relation rel, const int8 cmprsType) -{ - if (!RelationIsRowFormat(rel)) { - return; - } - if (IsCompressedByCmprsInPgclass((RelCompressType)cmprsType)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("row-oriented table does not support compression"))); - } - /* - * relcmprs(pg_class's field) of mlog table in materialized view is 0, - * and it does not satisfys the following check. Because we have disabled - * compression for row-oriented table, we work around it with reporting - * an error and forbid coredump. - * - * Why relcmprs of mlog table is 0? The reason is in GetCreateTableStmt. - * It uses row_compress in stmt->into, and this value is created by - * rule CreateMatViewStmt in gram.y. It is not convenient for us to set it - * to 1 there, otherwise, it is too hack. We can not let users specify - * compress/nocompress when creating materialized view, either. - * - * In summary, checking it here may be the best choice. - */ - if (CHECK_CMPRS_NOT_SUPPORT(RELATION_GET_CMPRS_ATTR(rel))) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("row-oriented table does not support compression."))); - } -} - -static void ATExecSetCompress(Relation rel, const char* cmprsId) -{ - Oid relId = RelationGetRelid(rel); - Relation pgclass; - HeapTuple relTuple; - const int8 cmprsType = getCompressType(cmprsId); - - ATDisableROTCompress(rel, cmprsType); - - Assert(!CHECK_CMPRS_NOT_SUPPORT(RELATION_GET_CMPRS_ATTR(rel))); - Assert(CHECK_CMPRS_VALID(cmprsType)); - - /* always overwrite relcmprs field and compression options for row relation */ - pgclass = heap_open(RelationRelationId, RowExclusiveLock); - - relTuple = SearchSysCacheCopy1((int)RELOID, ObjectIdGetDatum(relId)); - if (!HeapTupleIsValid(relTuple)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relId))); - } - - Datum replVal[Natts_pg_class] = {0}; - bool replNull[Natts_pg_class] = {false}; - bool replChange[Natts_pg_class] = {false}; - - errno_t rc = memset_s(replVal, sizeof(replVal), 0, sizeof(replVal)); - securec_check(rc, "\0", "\0"); - rc = memset_s(replNull, sizeof(replNull), false, sizeof(replNull)); - securec_check(rc, "\0", "\0"); - rc = memset_s(replChange, sizeof(replChange), false, sizeof(replChange)); - securec_check(rc, "\0", "\0"); - - /* first change the value of field RELCMPRS in pg_class */ - replVal[ANUM_PG_CLASS_RELCMPRS - 1] = cmprsType; - replChange[ANUM_PG_CLASS_RELCMPRS - 1] = true; - - /* then change the value of COMPRESSION in relation options field */ - bool isNull = false; - Datum relOptions = heap_getattr(relTuple, Anum_pg_class_reloptions, RelationGetDescr(pgclass), &isNull); - if (!isNull) { - List* options = untransformRelOptions(relOptions); - options = ATExecReplaceRelOptionListCell(options, - "compression", - IsCompressedByCmprsInPgclass((RelCompressType)cmprsType) ? pstrdup(COMPRESSION_YES) - : pstrdup(COMPRESSION_NO)); - relOptions = transformRelOptions((Datum)0, options, NULL, NULL, false, false); - list_free_deep(options); - - replVal[Anum_pg_class_reloptions - 1] = relOptions; - replChange[Anum_pg_class_reloptions - 1] = true; - } - - HeapTuple newTuple = (HeapTuple) tableam_tops_modify_tuple(relTuple, RelationGetDescr(pgclass), replVal, replNull, replChange); - simple_heap_update(pgclass, &newTuple->t_self, newTuple); - CatalogUpdateIndexes(pgclass, newTuple); - tableam_tops_free_tuple(newTuple); - tableam_tops_free_tuple(relTuple); - heap_close(pgclass, RowExclusiveLock); -} - -/* CStore Rewrite Table Methods */ -#include "access/cstore_rewrite.h" - -#define RelAttrName(__tupdesc, __attridx) (NameStr((__tupdesc)->attrs[(__attridx)].attname)) - -// get all the attributes to be checked or rewrited. -// -static void ATCStoreGetRewriteAttrs(_in_ AlteredTableInfo* tab, _in_ TupleDesc oldTupDesc, _in_ TupleDesc newTupDesc, - _out_ CStoreRewriteColumn** rewriteInfo, _out_ bool* rewriteFlags, _out_ int* nColsOfEachType) -{ - Assert(newTupDesc->natts >= oldTupDesc->natts); - Assert(nColsOfEachType && rewriteFlags); - - for (int pass = 0; pass < AT_NUM_PASSES; ++pass) { - if (tab->subcmds[pass] == NIL) { - continue; - } - - List* subcmds = tab->subcmds[pass]; - ListCell* cmdCell = NULL; - - foreach (cmdCell, subcmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(cmdCell); - - // ADD COLUMN. - if (pass == AT_PASS_ADD_COL) { - Assert(cmd->def && cmd->def->type == T_ColumnDef); - ColumnDef* colDef = (ColumnDef*)cmd->def; - Assert(colDef->colname && colDef->colname[0] != '\0'); - - // search newTupDesc' attributes backward, so as to reduce loop as possible. - for (int attrIdx = newTupDesc->natts - 1; attrIdx >= 0; --attrIdx) { - if (pg_strcasecmp(colDef->colname, RelAttrName(newTupDesc, attrIdx)) == 0) { - Assert(rewriteInfo[attrIdx] == NULL); - rewriteInfo[attrIdx] = CStoreRewriteColumn::CreateForAddColumn(attrIdx + 1); - - // collect how many new columns will be added. - ++nColsOfEachType[CSRT_ADD_COL]; - rewriteFlags[attrIdx] = true; - break; - } - } - continue; - } - - // ALTER COLUMN DATA TYPE. - if (pass == AT_PASS_ALTER_TYPE) { - Assert(cmd->name && cmd->name[0] != '\0'); - for (int attrIdx = 0; attrIdx < oldTupDesc->natts; ++attrIdx) { - if (pg_strcasecmp(cmd->name, RelAttrName(oldTupDesc, attrIdx)) == 0) { - // forbit multiple ALTER TYPE on the same column. - Assert(rewriteInfo[attrIdx] == NULL); - rewriteInfo[attrIdx] = CStoreRewriteColumn::CreateForSetDataType(attrIdx + 1); - - // collect how many existing columns will be changed data type. - ++nColsOfEachType[CSRT_SET_DATA_TYPE]; - rewriteFlags[attrIdx] = true; - break; - } - } - continue; - } - } - } -} - -static void ATCStoreRewriteTable(AlteredTableInfo* tab, Relation oldHeapRel, LOCKMODE lockMode, Oid targetTblspc) -{ - bool tblspcChanged = NeedToSetTableSpace(oldHeapRel, targetTblspc); - Oid newfilenode = InvalidOid; - Oid cudescOid = InvalidOid; - Oid cudescIdxOid = InvalidOid; - Relation cudescRel = NULL; - Relation pg_class = NULL; - Relation CUReplicationRel = NULL; - HeapTuple pgclass_tuple = NULL; - Form_pg_class pgclass_form = NULL; - CStoreRewriter* rewriter = NULL; - errno_t rc; - - /* - * Notice: old TupleDesc has been copied and saved in tab->oldDesc. - * now the new TupleDesc can be seen and found in oldHeapRel. - */ - TupleDesc newTupDesc = RelationGetDescr(oldHeapRel); - TupleDesc oldTupDesc = tab->oldDesc; - Assert(newTupDesc->natts >= oldTupDesc->natts); - - /* unsupported table/column constraints: CHECK; FOREIGN EKY; */ - Assert(tab->constraints == NIL); - - int nColsOfEachType[CSRT_NUM]; - rc = memset_s(nColsOfEachType, sizeof(int) * CSRT_NUM, 0, sizeof(int) * CSRT_NUM); - securec_check(rc, "", ""); - - int maxCols = newTupDesc->natts; - bool* rewriteFlags = (bool*)palloc0(sizeof(bool) * maxCols); - CStoreRewriteColumn** rewriteInfo = (CStoreRewriteColumn**)palloc0(sizeof(void*) * maxCols); - - /* split out: ADD COLUMNs; SET DATA TYPE COLUMNs; the others */ - ATCStoreGetRewriteAttrs(tab, oldTupDesc, newTupDesc, rewriteInfo, rewriteFlags, nColsOfEachType); - - /* set recomputing expression for updated columns. */ - ListCell* l = NULL; - foreach (l, tab->newvals) { - NewColumnValue* ex = (NewColumnValue*)lfirst(l); - ex->exprstate = ExecInitExpr((Expr*)ex->expr, NULL); - - /* we expect only one NewColumnValue for each attrubute. */ - Assert(rewriteInfo[ex->attnum - 1] != NULL); - Assert(rewriteInfo[ex->attnum - 1]->newValue == NULL); - - ColumnNewValue* newValExp = (ColumnNewValue*)palloc(sizeof(ColumnNewValue)); - newValExp->expr = ex->expr; - newValExp->exprstate = ex->exprstate; - rewriteInfo[ex->attnum - 1]->newValue = newValExp; - } - - /* set NOT NULL constraint for updated columns. */ - if (tab->rewrite>0 || tab->new_notnull) { - for (int i = 0; i < maxCols; ++i) { - if (rewriteInfo[i] != NULL && !rewriteInfo[i]->isDropped && newTupDesc->attrs[i].attnotnull) { - rewriteInfo[i]->notNull = true; - } - } - } - - /* rewrite the column-store table. */ - rewriter = New(CurrentMemoryContext) CStoreRewriter(oldHeapRel, oldTupDesc, newTupDesc); - - /* lock order: - * 1. column relation - * 2. Delta relation [ Delta Index relation ] - * 3. Cudesc relation + Cudesc Index relation - */ - if (OidIsValid(oldHeapRel->rd_rel->reldeltarelid)) { - LockRelationOid(oldHeapRel->rd_rel->reldeltarelid, lockMode); - } - cudescOid = oldHeapRel->rd_rel->relcudescrelid; - cudescRel = heap_open(cudescOid, lockMode); - cudescIdxOid = cudescRel->rd_rel->relcudescidx; - /* don't index_open(cudescIdxOid) becuase rewriter need to reindex Cudesc relation */ - LockRelationOid(cudescIdxOid, lockMode); - - if (tblspcChanged) { - /* Handle Delta && Delta Index Relation - * Now it's safe to copy the relation data by block directly, - * because Delta Relation now is unusable and has no data. - * - * When Delta Relation is usable, all tuples in it must be scanned, - * re-written and merged/appended into CU files - */ - ChangeTableSpaceForDeltaRelation(oldHeapRel->rd_rel->reldeltarelid, targetTblspc, lockMode); - - /* Handle each column' data */ - /* Here it's safe to open pg_class relation, because: - * 1. Cannot change tablespace of partitioned table; - * 2. Cannot ADD COLUMN/SET DATA TYPE for one partition; - * 3. Changing tablespace for one partition cannot hit this branch; - * so it's a ordinary relation from pg_class. - */ - pg_class = heap_open(RelationRelationId, RowExclusiveLock); - - /* Get a modifiable copy of the relation's pg_class row */ - pgclass_tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(tab->relid)); - if (!HeapTupleIsValid(pgclass_tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", tab->relid))); - } - pgclass_form = (Form_pg_class)GETSTRUCT(pgclass_tuple); - - /* - * Relfilenodes are not unique across tablespaces, so we need to allocate - * a new one in the new tablespace. - */ - newfilenode = GetNewRelFileNode(targetTblspc, NULL, oldHeapRel->rd_rel->relpersistence); - - RelFileNode CUReplicationFile = { - ConvertToRelfilenodeTblspcOid(targetTblspc), oldHeapRel->rd_node.dbNode, newfilenode, InvalidBktId}; - CUReplicationRel = CreateCUReplicationRelation(CUReplicationFile, - oldHeapRel->rd_backend, - oldHeapRel->rd_rel->relpersistence, - RelationGetRelationName(oldHeapRel)); - - for (int i = 0; i < maxCols; ++i) { - Form_pg_attribute thisattr = &newTupDesc->attrs[i]; - - /* skip the dropped and rewritted columns */ - if (!thisattr->attisdropped && !rewriteFlags[i]) { - CStoreCopyColumnData(CUReplicationRel, oldHeapRel, thisattr->attnum); - } - } - - /* prepare to handle ADD COLUMNs + SET DATATYPE COLUMNs */ - rewriter->ChangeTableSpace(CUReplicationRel); - } - - /* handle Cudesc relation + ADD COLUMNs + SET DATATYPE COLUMNs */ - rewriter->BeginRewriteCols(maxCols, rewriteInfo, nColsOfEachType, rewriteFlags); - rewriter->RewriteColsData(); - rewriter->EndRewriteCols(); - DELETE_EX(rewriter); - - if (tblspcChanged) { - CStoreCopyColumnDataEnd(oldHeapRel, targetTblspc, newfilenode); - - /* destroy fake relation */ - FreeFakeRelcacheEntry(CUReplicationRel); - - /* update the pg_class row */ - pgclass_form->reltablespace = ConvertToPgclassRelTablespaceOid(targetTblspc); - pgclass_form->relfilenode = newfilenode; - simple_heap_update(pg_class, &pgclass_tuple->t_self, pgclass_tuple); - CatalogUpdateIndexes(pg_class, pgclass_tuple); - - tableam_tops_free_tuple(pgclass_tuple); - heap_close(pg_class, RowExclusiveLock); - - /* Make sure the reltablespace change is visible */ - CommandCounterIncrement(); - - /* Handle Cudesc Index Relation */ - ATExecSetTableSpace(cudescIdxOid, ConvertToRelfilenodeTblspcOid(targetTblspc), lockMode); - } - - /* unlock until committed */ - heap_close(cudescRel, NoLock); - - /* clean up work at last. */ - for (int k = 0; k < maxCols; ++k) { - if (rewriteInfo[k]) { - CStoreRewriteColumn::Destroy(&rewriteInfo[k]); - } - } - pfree_ext(rewriteInfo); - pfree_ext(rewriteFlags); -} - -static void ATCStoreRewritePartition(AlteredTableInfo* tab, LOCKMODE lockMode) -{ - Relation parentRel = NULL; - Relation newPartitionRel = NULL; - List* partitionsList = NULL; - ListCell* partitionCell = NULL; - - // construct a AlteredTableInfo object for a partition. - // when it's passed into ATCStoreRewriteTable(), we take it as - // the normall heap relation. therefore relid && partid shouldn't be - // used, and so reset them. - AlteredTableInfo* partitionTabInfo = (AlteredTableInfo*)palloc(sizeof(AlteredTableInfo)); - *partitionTabInfo = *tab; - partitionTabInfo->relid = InvalidOid; - partitionTabInfo->partid = InvalidOid; - - parentRel = heap_open(tab->relid, AccessExclusiveLock); - Assert(RELATION_IS_PARTITIONED(parentRel) == true); - - partitionsList = relationGetPartitionList(parentRel, AccessExclusiveLock); - foreach (partitionCell, partitionsList) { - newPartitionRel = partitionGetRelation(parentRel, (Partition)lfirst(partitionCell)); - - // rewrite each partition as the normall relation. - ATCStoreRewriteTable(partitionTabInfo, newPartitionRel, lockMode, newPartitionRel->rd_rel->reltablespace); - - releaseDummyRelation(&newPartitionRel); - } - - releasePartitionList(parentRel, &partitionsList, AccessExclusiveLock); - heap_close(parentRel, NoLock); - - pfree_ext(partitionTabInfo); - partitionTabInfo = NULL; -} - -/* - * @Description: forbidden to set tablespace for partitioned table - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ForbidToChangeTableSpaceOfPartitionedTable(AlteredTableInfo* tab) -{ - if (OidIsValid(tab->newTableSpace) && !OidIsValid(tab->partid)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("can not set tablespace for partitioned relation"), - errdetail("set tablespace for partition instead"))); - } -} - -/* - * @Description: rewrite row relation data. - * @Param[IN] lockmode: lock mode used during rewriting data - * @Param[IN] NewTableSpace: new tablespace used by row relation - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecRewriteRowTable(AlteredTableInfo* tab, Oid NewTableSpace, LOCKMODE lockmode) -{ - ForbidToRewriteOrTestCstoreIndex(tab); - Oid OIDNewHeap = make_new_heap(tab->relid, NewTableSpace); - - /* - * Copy the heap data into the new table with the desired - * modifications, and test the current data within the table - * against new constraints generated by ALTER TABLE commands. - */ - Relation oldRel = heap_open(tab->relid, NoLock); - Relation newRel = heap_open(OIDNewHeap, lockmode); - - /* - * Temporarily set the relOptions of the old rel to th ones before - * modification to execute rewrite table. - */ - if (tab->rewrite == AT_REWRITE_ALTER_COMPRESSION) { - oldRel->rd_node.opt = tab->opt; - } - ATRewriteTable(tab, oldRel, newRel); - heap_close(oldRel, NoLock); - heap_close(newRel, NoLock); - - /* - * Swap the physical files of the old and new heaps, then rebuild - * indexes and discard the old heap. We can use RecentXmin for - * the table's new relfrozenxid because we rewrote all the tuples - * in ATRewriteTable, so no older Xid remains in the table. Also, - * we never try to swap toast tables by content, since we have no - * interest in letting this code work on system catalogs. - */ - finish_heap_swap(tab->relid, OIDNewHeap, false, false, true, u_sess->utils_cxt.RecentXmin, - GetOldestMultiXactId(), NULL, tab); - - /* clear all attrinitdefval */ - clearAttrInitDefVal(tab->relid); -} - -/* - * @Description: rewrite column relation data. - * @Param[IN] lockmode: lock mode used during rewriting data - * @Param[IN] NewTableSpace: new tablespace used by column relation - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecRewriteCStoreTable(AlteredTableInfo* tab, Oid NewTableSpace, LOCKMODE lockmode) -{ - Relation OldHeap = heap_open(tab->relid, NoLock); - ATCStoreRewriteTable(tab, OldHeap, lockmode, NewTableSpace); - heap_close(OldHeap, NoLock); - - /* then, rebuild its index. */ - (void)ReindexRelation( - tab->relid, REINDEX_REL_SUPPRESS_INDEX_USE | REINDEX_REL_CHECK_CONSTRAINTS, REINDEX_ALL_INDEX, NULL); -} - -/* - * @Description: rewrite row partitioned relation data. - * Take each partition as a ordinary table, and rewrite it. - * @Param[IN] lockmode: lock mode used during rewriting each partition. - * @Param[IN] NewTableSpace: new tablespace used by some partition. - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecRewriteRowPartitionedTable(AlteredTableInfo* tab, Oid NewTableSpace, LOCKMODE lockmode) -{ - Relation partitionedTableRel = NULL; - TupleDesc partTabHeapDesc = NULL; - HeapTuple tuple = NULL; - List* tempTableOidList = NIL; - List* partitions = NULL; - ListCell* cell = NULL; - Oid tempTableOid = InvalidOid; - Datum partTabRelOptions = 0; - int reindexFlags = 0; - bool isNull = false; - - ForbidToChangeTableSpaceOfPartitionedTable(tab); - ForbidToRewriteOrTestCstoreIndex(tab); - - partitionedTableRel = heap_open(tab->relid, AccessExclusiveLock); - partTabHeapDesc = RelationGetDescr(partitionedTableRel); - - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(tab->relid)); - if (!HeapTupleIsValid(tuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", tab->relid))); - } - partTabRelOptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isNull); - if (isNull) { - partTabRelOptions = (Datum)0; - } - - if (RelationIsSubPartitioned(partitionedTableRel)) { - partitions = relationGetPartitionList(partitionedTableRel, AccessExclusiveLock); - foreach (cell, partitions) { - Partition partition = (Partition)lfirst(cell); - Relation partrel = partitionGetRelation(partitionedTableRel, partition); - - List *subpartitions = relationGetPartitionList(partrel, AccessExclusiveLock); - ListCell* subcell = NULL; - foreach (subcell, subpartitions) { - Partition subpartition = (Partition)lfirst(subcell); - Relation oldRel = partitionGetRelation(partrel, subpartition); - Datum relOptions = 0; - - /* - * Make new partition heap with the new reloptions when modifying - * compressed options. - */ - if (tab->rewrite == AT_REWRITE_ALTER_COMPRESSION) { - relOptions = tab->newOptions; - } else { - relOptions = partTabRelOptions; - } - /* make a temp table for swapping partition */ - Oid OIDNewHeap = makePartitionNewHeap(partrel, - RelationGetDescr(partrel), - relOptions, - oldRel->rd_id, - oldRel->rd_rel->reltoastrelid, - oldRel->rd_rel->reltablespace, - false, - partitionedTableRel->rd_rel->relfilenode); - - Relation newRel = heap_open(OIDNewHeap, lockmode); - /* rewrite the temp table by partition */ - ATRewriteTable(tab, oldRel, newRel); - heap_close(newRel, NoLock); - - /* swap the temp table and partition */ - finishPartitionHeapSwap(oldRel->rd_id, OIDNewHeap, false, u_sess->utils_cxt.RecentXmin, - GetOldestMultiXactId(), false, tab); - - /* record the temp table oid for dropping */ - tempTableOidList = lappend_oid(tempTableOidList, OIDNewHeap); - - releaseDummyRelation(&oldRel); - } - releasePartitionList(partrel, &subpartitions, AccessExclusiveLock); - releaseDummyRelation(&partrel); - } - } else { - partitions = relationGetPartitionList(partitionedTableRel, AccessExclusiveLock); - foreach (cell, partitions) { - Partition partition = (Partition)lfirst(cell); - Relation oldRel = partitionGetRelation(partitionedTableRel, partition); - Datum relOptions = 0; - - /* - * Make new partition heap with the new reloptions when modifying - * compressed options. - */ - if (tab->rewrite == AT_REWRITE_ALTER_COMPRESSION) { - relOptions = tab->newOptions; - } else { - relOptions = partTabRelOptions; - } - /* make a temp table for swapping partition */ - Oid OIDNewHeap = makePartitionNewHeap(partitionedTableRel, - partTabHeapDesc, - relOptions, - oldRel->rd_id, - oldRel->rd_rel->reltoastrelid, - oldRel->rd_rel->reltablespace); - - Relation newRel = heap_open(OIDNewHeap, lockmode); - /* rewrite the temp table by partition */ - ATRewriteTable(tab, oldRel, newRel); - heap_close(newRel, NoLock); - - /* swap the temp table and partition */ - finishPartitionHeapSwap(oldRel->rd_id, OIDNewHeap, false, u_sess->utils_cxt.RecentXmin, - GetOldestMultiXactId(), false, tab); - - /* record the temp table oid for dropping */ - tempTableOidList = lappend_oid(tempTableOidList, OIDNewHeap); - - releaseDummyRelation(&oldRel); - } - } - - ReleaseSysCache(tuple); - - /* rebuild index of partitioned table */ - reindexFlags = REINDEX_REL_SUPPRESS_INDEX_USE | REINDEX_REL_CHECK_CONSTRAINTS; - (void)ReindexRelation(tab->relid, reindexFlags, REINDEX_ALL_INDEX, NULL); - - /* drop the temp tables for swapping */ - foreach (cell, tempTableOidList) { - ObjectAddress object; - - tempTableOid = DatumGetObjectId(lfirst(cell)); - - object.classId = RelationRelationId; - object.objectId = tempTableOid; - object.objectSubId = 0; - - performDeletion(&object, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - } - list_free_ext(tempTableOidList); - - releasePartitionList(partitionedTableRel, &partitions, AccessExclusiveLock); - heap_close(partitionedTableRel, NoLock); - - /* clear all attrinitdefval */ - clearAttrInitDefVal(tab->relid); -} - -/* - * @Description: rewrite column partitioned relation data. - * Take each partition as a ordinary table, and rewrite it. - * @Param[IN] lockmode: lock mode used during rewriting each partition. - * @Param[IN] targetTableSpace: new tablespace used by some partition. - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecRewriteCStorePartitionedTable(AlteredTableInfo* tab, Oid targetTableSpace, LOCKMODE lockmode) -{ - /* forbid to change tablespace for partitioned table. - * so argument *targetTableSpace* is not used. - */ - ForbidToChangeTableSpaceOfPartitionedTable(tab); - - Relation OldHeap = heap_open(tab->relid, NoLock); - ATCStoreRewritePartition(tab, lockmode); - heap_close(OldHeap, NoLock); - - /* then, rebuild its index. */ - (void)ReindexRelation( - tab->relid, REINDEX_REL_SUPPRESS_INDEX_USE | REINDEX_REL_CHECK_CONSTRAINTS, REINDEX_ALL_INDEX, NULL); -} - -/* - * @Description: Only check relation data becuase constraints changed - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecOnlyTestRowTable(AlteredTableInfo* tab) -{ - ForbidToRewriteOrTestCstoreIndex(tab); - - Relation oldRel = heap_open(tab->relid, NoLock); - ATRewriteTable(tab, oldRel, NULL); - heap_close(oldRel, NoLock); -} - -/* - * @Description: Only check relation data becuase constraints changed - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecOnlyTestCStoreTable(AlteredTableInfo* tab) -{ -#ifdef ENABLE_MULTIPLE_NODES - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column stored relation doesn't support this feature"))); -#else - ForbidToRewriteOrTestCstoreIndex(tab); - - Relation rel = heap_open(tab->relid, NoLock); - ATOnlyCheckCStoreTable(tab, rel); - heap_close(rel, NoLock); - return; -#endif -} - -/** - * @Description: Build a list of all attributes. The attributes must has - * "NOT NULL" constraint and is not dropped column. - * @in tuple_desc, The tuple description. - * @return - */ -List* make_not_null_attrs(TupleDesc tuple_desc) -{ - List* not_null_attrs = NIL; - for (int i = 0; i < tuple_desc->natts; i++) { - if (tuple_desc->attrs[i].attnotnull && !tuple_desc->attrs[i].attisdropped) - not_null_attrs = lappend_int(not_null_attrs, i); - } - - return not_null_attrs; -} - -/* - * @Description: Only check relation data becuase constraints changed - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecOnlyTestRowPartitionedTable(AlteredTableInfo* tab) -{ - Relation partRel = NULL; - Partition partition = NULL; - ListCell* lc1 = NULL; - ListCell* lc2 = NULL; - Relation partitionedTableRel = NULL; - List* partitions = NULL; - Relation subPartRel = NULL; - Partition subPartition = NULL; - List* subPartitions = NULL; - - ForbidToRewriteOrTestCstoreIndex(tab); - - /* get all partitions of target partitioned table */ - partitionedTableRel = heap_open(tab->relid, NoLock); - partitions = relationGetPartitionList(partitionedTableRel, NoLock); - - foreach (lc1, partitions) { - partition = (Partition)lfirst(lc1); - partRel = partitionGetRelation(partitionedTableRel, partition); - if (RelationIsSubPartitioned(partitionedTableRel)) { - subPartitions = relationGetPartitionList(partRel, NoLock); - foreach (lc2, subPartitions) { - subPartition = (Partition)lfirst(lc2); - subPartRel = partitionGetRelation(partRel, subPartition); - /* check each partition */ - ATRewriteTable(tab, subPartRel, NULL); - releaseDummyRelation(&subPartRel); - } - releasePartitionList(partRel, &subPartitions, NoLock); - releaseDummyRelation(&partRel); - } else { - /* check each partition */ - ATRewriteTable(tab, partRel, NULL); - releaseDummyRelation(&partRel); - } - } - - releasePartitionList(partitionedTableRel, &partitions, NoLock); - heap_close(partitionedTableRel, NoLock); -} - -/* - * @Description: Only check relation data becuase constraints changed - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecOnlyTestCStorePartitionedTable(AlteredTableInfo* tab) -{ -#ifdef ENABLE_MULTIPLE_NODES - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column stored relation doesn't support this feature"))); -#else - Relation partRel = NULL; - Partition partition = NULL; - ListCell* cell = NULL; - Relation partitionedTableRel = NULL; - List* partitions = NULL; - - ForbidToRewriteOrTestCstoreIndex(tab); - - /* get all partitions of target partitioned table */ - partitionedTableRel = heap_open(tab->relid, NoLock); - partitions = relationGetPartitionList(partitionedTableRel, NoLock); - - foreach (cell, partitions) { - partition = (Partition)lfirst(cell); - partRel = partitionGetRelation(partitionedTableRel, partition); - /* check each partition */ - ATOnlyCheckCStoreTable(tab, partRel); - releaseDummyRelation(&partRel); - } - - releasePartitionList(partitionedTableRel, &partitions, NoLock); - heap_close(partitionedTableRel, NoLock); -#endif -} - -/* - * @Description: Only check PSort relation data. - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ForbidToRewriteOrTestCstoreIndex(AlteredTableInfo* tab) -{ - if (tab->relkind == RELKIND_INDEX || tab->relkind == RELKIND_GLOBAL_INDEX) { - Relation rel = index_open(tab->relid, AccessShareLock); - if (rel->rd_rel->relam == PSORT_AM_OID) { - index_close(rel, AccessShareLock); - - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), errdetail("PSort relation doesn't support this feature")))); - } - - if (rel->rd_rel->relam == CBTREE_AM_OID) { - index_close(rel, AccessShareLock); - - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), errdetail("CBtree relation doesn't support this feature")))); - } - - if (rel->rd_rel->relam == CGIN_AM_OID) { - index_close(rel, AccessShareLock); - - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), errdetail("CGinBtree relation doesn't support this feature")))); - } - index_close(rel, AccessShareLock); - } -} - -/* - * @Description: SET TABLESPACE for psort relation - * @Param[IN] lockmode: lock mode used during changing tablespace. - * @Param[IN] newTableSpace: the new/target tablespace. - * @Param[IN] psortOid: Oid of PSort relation - * @See also: - */ -static void PSortChangeTableSpace(Oid psortOid, Oid newTableSpace, LOCKMODE lockmode) -{ - AlteredTableInfo* tabinfo = (AlteredTableInfo*)palloc0(sizeof(AlteredTableInfo)); - - /* fill the needed info */ - Assert(OidIsValid(newTableSpace)); - tabinfo->relid = psortOid; - tabinfo->relkind = RELKIND_RELATION; - tabinfo->newTableSpace = newTableSpace; - - /* treat psort as a column table */ - ExecChangeTableSpaceForCStoreTable(tabinfo, lockmode); - - pfree_ext(tabinfo); -} - -/* - * @Description: change tablespace for row relation. - * PSort index is handled in this branch, because its oid is remembered here. - * @Param[IN] lockmode: lock mode used during changing tablespace. - * @Param[IN] tab: Alter Table Info - * @See also: the comments of function ExecChangeTableSpaceForRowPartition() - */ -static void ExecChangeTableSpaceForRowTable(AlteredTableInfo* tab, LOCKMODE lockmode) -{ - ATExecSetTableSpace(tab->relid, tab->newTableSpace, lockmode); - - /* handle a special index type: PSORT index */ - if (tab->relkind == RELKIND_INDEX || tab->relkind == RELKIND_GLOBAL_INDEX) { - Relation rel = index_open(tab->relid, lockmode); - if (rel->rd_rel->relam == PSORT_AM_OID) { - PSortChangeTableSpace(rel->rd_rel->relcudescrelid, /* psort oid */ - tab->newTableSpace, - lockmode); - } - index_close(rel, NoLock); - Oid heapId = IndexGetRelation(tab->relid, false); - Relation userRel = RelationIdGetRelation(heapId); - UpdatePgObjectChangecsn(heapId, userRel->rd_rel->relkind); - RelationClose(userRel); - } -} - -/* - * @Description: change tablespace for Delta relation. - * @Param[IN] deltaOid: Oid of Delta relation - * @Param[IN] lockmode: lock mode used during changing tablespace. - * @Param[IN] targetTableSpace: the new tablespace. - * @See also: - */ -static inline void ChangeTableSpaceForDeltaRelation(Oid deltaOid, Oid targetTableSpace, LOCKMODE lockmode) -{ - if (OidIsValid(deltaOid)) { - /* ATExecSetTableSpace() requires that targetTableSpace is not InvalidOid */ - targetTableSpace = ConvertToRelfilenodeTblspcOid(targetTableSpace); - Assert(OidIsValid(targetTableSpace)); - - /* lock delta relation with lockmode */ - Relation deltaRel = heap_open(deltaOid, lockmode); - - /* change tablespace for Delta Relation */ - ATExecSetTableSpace(deltaOid, targetTableSpace, lockmode); - - /* unlock until committed */ - relation_close(deltaRel, NoLock); - - /* change tablespace for Delta Index Relation */ - } -} - -/* - * @Description: change tablespace for CUDesc and its index relation. - * @Param[IN] cudescIdxOid: Oid of Cudesc Index relation - * @Param[IN] cudescOid: Oid of Cudesc relation - * @Param[IN] lockmode: lock mode used during changing tablespace - * @Param[IN] targetTableSpace: the new tablespace - * @See also: - */ -static inline void ChangeTableSpaceForCudescRelation( - Oid cudescIdxOid, Oid cudescOid, Oid targetTableSpace, LOCKMODE lockmode) -{ - /* ATExecSetTableSpace() requires that targetTableSpace is valid */ - targetTableSpace = ConvertToRelfilenodeTblspcOid(targetTableSpace); - Assert(OidIsValid(targetTableSpace)); - - /* change tablespace for Cudesc Relation */ - Assert(OidIsValid(cudescOid)); - ATExecSetTableSpace(cudescOid, targetTableSpace, lockmode); - - /* change tablespace for Cudesc Index Relation */ - Assert(OidIsValid(cudescIdxOid)); - ATExecSetTableSpace(cudescIdxOid, targetTableSpace, lockmode); -} - -/* - * @Description: change tablespace for column relation. - * @Param[IN] lockmode: lock mode used during changing tablespace - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecChangeTableSpaceForCStoreTable(AlteredTableInfo* tab, LOCKMODE lockmode) -{ - Relation colRel = NULL; - Relation cudescRel = NULL; - Relation cudescIdxRel = NULL; - Oid cudescOid = InvalidOid; - Oid cudescIdxOid = InvalidOid; - Oid targetTableSpace = tab->newTableSpace; - Oid newrelfilenode = InvalidOid; - - /* here maybe open a heap relation or index relation, so call relation_open() */ - colRel = relation_open(tab->relid, lockmode); - - /* No work if no change in tablespace. */ - if (!NeedToSetTableSpace(colRel, targetTableSpace)) { - relation_close(colRel, NoLock); - return; - } - - /* lock order: - * 1. column relation - * 2. Delta relation [ Delta Index relation ] - * 3. Cudesc relation + Cudesc Index relation - */ - if (OidIsValid(colRel->rd_rel->reldeltarelid)) { - LockRelationOid(colRel->rd_rel->reldeltarelid, lockmode); - } - cudescOid = colRel->rd_rel->relcudescrelid; - cudescRel = heap_open(cudescOid, lockmode); - cudescIdxOid = cudescRel->rd_rel->relcudescidx; - cudescIdxRel = index_open(cudescIdxOid, lockmode); - - /* 1. Handle Delta && Delta Index Relation */ - ChangeTableSpaceForDeltaRelation(colRel->rd_rel->reldeltarelid, targetTableSpace, lockmode); - - /* 2. Handle each column' data */ - Relation pg_class = heap_open(RelationRelationId, RowExclusiveLock); - - /* Get a modifiable copy of the relation's pg_class row */ - HeapTuple tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(tab->relid)); - if (!HeapTupleIsValid(tuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", tab->relid))); - } - - Form_pg_class rd_rel = (Form_pg_class)GETSTRUCT(tuple); - newrelfilenode = CStoreSetTableSpaceForColumnData(colRel, targetTableSpace); - - /* update the pg_class row */ - rd_rel->reltablespace = ConvertToPgclassRelTablespaceOid(targetTableSpace); - rd_rel->relfilenode = newrelfilenode; - simple_heap_update(pg_class, &tuple->t_self, tuple); - CatalogUpdateIndexes(pg_class, tuple); - - tableam_tops_free_tuple(tuple); - heap_close(pg_class, RowExclusiveLock); - - /* Make sure the reltablespace change is visible */ - CommandCounterIncrement(); - - /* 3. Handle Cudesc && Index Relation */ - ChangeTableSpaceForCudescRelation(cudescIdxOid, cudescOid, targetTableSpace, lockmode); - - index_close(cudescIdxRel, NoLock); - heap_close(cudescRel, NoLock); - relation_close(colRel, NoLock); -} - -/* - * @Description: change tablespace for row partition. - * first, forbid to SET TABLESPACE for partitioned table. - * if it's a - * 1) row heap partition, - * 2) row index partition, - * the two are the same, - * step 1: copy data to new tablespace - * step 2: update pg_partition.reltablespace && pg_partition.relfilenode - * step 3: handle toast && toast index if necessary. - * 3) psort index partition, - * it's a column table, so that - * step 1: update pg_partition.reltablespace (ATExecSetTableSpaceForPartitionP3) - * step 2: handle psort as an ordinary column table. (ExecChangeTableSpaceForCStoreTable) - * - * @Param[IN] lockmode: lock mode used during changing tablespace - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecChangeTableSpaceForRowPartition(AlteredTableInfo* tab, LOCKMODE lockmode) -{ - ForbidToChangeTableSpaceOfPartitionedTable(tab); - - /* input lockmode is the lock mode of patitioned table, which is >= AccessShareLock. - * Take and example, t1 is a row partitioned relation and under tblspc1 tablespace. now execute - * ALTER TABLE t1 MOVE PARTITION p1 TO pg_default, ADD COLUMN c_char2 char(5); - * it will triggle Altering-Table-Instantly feature, and needn't rewrite all the tuple data. - * so lock mode is 8, and SET TABLESPACE branch is entered into. So that input lockmode may - * be not AccessShareLock. - * - * Here we should use the lock mode of partition, which is AccessExclusiveLock. - * see also ATExecSetTableSpaceForPartitionP2(). - */ - const LOCKMODE partitionLock = AccessExclusiveLock; - - ATExecSetTableSpaceForPartitionP3(tab->relid, tab->partid, tab->newTableSpace, partitionLock); - - /* handle a special index type: PSORT index */ - if (tab->relkind == RELKIND_INDEX || tab->relkind == RELKIND_GLOBAL_INDEX) { - Relation rel = index_open(tab->relid, NoLock); - if (rel->rd_rel->relam == PSORT_AM_OID) { - Partition part = partitionOpen(rel, tab->partid, partitionLock); - PSortChangeTableSpace(part->pd_part->relcudescrelid, /* psort oid */ - tab->newTableSpace, - partitionLock); - partitionClose(rel, part, NoLock); - } - index_close(rel, NoLock); - } -} - -/* - * @Description: change tablespace for column partition - * @Param[IN] lockmode: lock mode used during changing tablespace - * @Param[IN] tab: Alter Table Info - * @See also: - */ -static void ExecChangeTableSpaceForCStorePartition(AlteredTableInfo* tab, LOCKMODE lockmode) -{ - Relation parentRel = NULL; - Partition partition = NULL; - Relation partitionRel = NULL; - Relation cudescRel = NULL; - Relation cudescIdxRel = NULL; - Oid partOid = tab->partid; - Oid cudescOid = InvalidOid; - Oid cudescIdxOid = InvalidOid; - Oid targetTableSpace = tab->newTableSpace; - Oid newrelfilenode = InvalidOid; - - ForbidToChangeTableSpaceOfPartitionedTable(tab); - - /* input lockmode is the lock mode of patitioned table, which is AccessShareLock. - * Here we should use the lock mode of partition, which is AccessExclusiveLock. - * see also ATExecSetTableSpaceForPartitionP2(). - */ - const LOCKMODE partitionLock = AccessExclusiveLock; - - /* here maybe open a heap relation or index relation, so call relation_open() */ - parentRel = relation_open(tab->relid, NoLock); - partition = partitionOpen(parentRel, partOid, partitionLock); - partitionRel = partitionGetRelation(parentRel, partition); - - /* No work if no change in tablespace. */ - if (!NeedToSetTableSpace(partitionRel, targetTableSpace)) { - releaseDummyRelation(&partitionRel); - partitionClose(parentRel, partition, NoLock); - relation_close(parentRel, NoLock); - return; - } - - /* lock order: - * 1. column relation - * 2. Delta relation [ Delta Index relation ] - * 3. Cudesc relation + Cudesc Index relation - */ - if (OidIsValid(partitionRel->rd_rel->reldeltarelid)) { - LockRelationOid(partitionRel->rd_rel->reldeltarelid, partitionLock); - } - cudescOid = partitionRel->rd_rel->relcudescrelid; - cudescRel = heap_open(cudescOid, partitionLock); - cudescIdxOid = cudescRel->rd_rel->relcudescidx; - cudescIdxRel = index_open(cudescIdxOid, partitionLock); - - /* 1. Handle Delta && Delta Index Relation */ - ChangeTableSpaceForDeltaRelation(partitionRel->rd_rel->reldeltarelid, targetTableSpace, partitionLock); - - /* 2. Handle each column' data */ - Relation pg_partition = heap_open(PartitionRelationId, RowExclusiveLock); - - /* Get a modifiable copy of the relation's pg_partition row */ - HeapTuple tuple = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(partOid)); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for partition %u", partOid))); - Form_pg_partition rd_rel = (Form_pg_partition)GETSTRUCT(tuple); - - newrelfilenode = CStoreSetTableSpaceForColumnData(partitionRel, targetTableSpace); - - /* update the pg_partition row */ - rd_rel->reltablespace = ConvertToPgclassRelTablespaceOid(targetTableSpace); - rd_rel->relfilenode = newrelfilenode; - simple_heap_update(pg_partition, &tuple->t_self, tuple); - CatalogUpdateIndexes(pg_partition, tuple); - - tableam_tops_free_tuple(tuple); - heap_close(pg_partition, RowExclusiveLock); - - /* Make sure the reltablespace change is visible */ - CommandCounterIncrement(); - - /* 3. Handle Cudesc && Index Relation */ - ChangeTableSpaceForCudescRelation(cudescIdxOid, cudescOid, targetTableSpace, partitionLock); - - index_close(cudescIdxRel, NoLock); - heap_close(cudescRel, NoLock); - - releaseDummyRelation(&partitionRel); - partitionClose(parentRel, partition, NoLock); - relation_close(parentRel, NoLock); -} - -/** - * @Description: Whether judge the column is partition column. - * @in rel, A relation. - * @in att_no, Attribute number. - * @return If the the column is partition column, return true, otherwise return false. - */ -bool is_partition_column(Relation rel, AttrNumber att_no) -{ - bool is_part_col = false; - - if (RelationIsValuePartitioned(rel)) { - List* part_col_list = ((ValuePartitionMap*)rel->partMap)->partList; - ListCell* lc = NULL; - foreach (lc, part_col_list) { - if (att_no == lfirst_int(lc)) { - is_part_col = true; - break; - } - } - } else if (RelationIsCommonPartitioned(rel)) { - int2vector* part_key = ((RangePartitionMap*)rel->partMap)->partitionKey; - for (int i = 0; i < part_key->dim1; i++) { - if (att_no == part_key->values[i]) { - is_part_col = true; - break; - } - } - } else if (RelationIsSubPartitioned(rel)) { - int2vector *partKey = ((RangePartitionMap *)rel->partMap)->partitionKey; - for (int i = 0; i < partKey->dim1; i++) { - if (att_no == partKey->values[i]) { - return true; - } - } - List *partOidList = relationGetPartitionOidList(rel); - Oid partOid = linitial_oid(partOidList); - Partition part = partitionOpen(rel, partOid, NoLock); - Relation partRel = partitionGetRelation(rel, part); - int2vector *subPartKey = ((RangePartitionMap *)partRel->partMap)->partitionKey; - for (int i = 0; i < subPartKey->dim1; i++) { - if (att_no == subPartKey->values[i]) { - is_part_col = true; - break; - } - } - releaseDummyRelation(&partRel); - partitionClose(rel, part, NoLock); - if (partOidList != NULL) { - releasePartitionOidList(&partOidList); - } - } - - return is_part_col; -} - -/** - * @Description: Reset every partition's start_ctid/end_ctid of rel - * @in rel, parent relation of partitions. - * @return void - */ -static void ResetPartsRedisCtidRelOptions(Relation rel) -{ - Relation pg_partition = NULL; - ScanKeyData key[2]; - SysScanDesc scan = NULL; - HeapTuple tuple = NULL; - TupleDesc part_tupdesc = NULL; - List* redis_reloptions = NIL; - - pg_partition = heap_open(PartitionRelationId, RowExclusiveLock); - part_tupdesc = RelationGetDescr(pg_partition); - ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, - CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION)); - ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber,F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(rel))); - - scan = systable_beginscan(pg_partition, PartitionParentOidIndexId, true, NULL, 2, key); - redis_reloptions = AlterTableSetRedistribute(rel, REDIS_REL_RESET_CTID, NULL); - while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - HeapTuple dtuple; - Datum repl_val[Natts_pg_partition]; - bool repl_null[Natts_pg_partition]; - bool repl_repl[Natts_pg_partition]; - bool isNull = false; - - Datum newOptions = (Datum)0; - errno_t errorno = EOK; - - errorno = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); - securec_check_c(errorno, "\0", "\0"); - - errorno = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); - securec_check_c(errorno, "\0", "\0"); - - Datum dval = fastgetattr(tuple, Anum_pg_partition_reloptions, part_tupdesc, &isNull); - /* reset redis reloptions info */ - newOptions = transformRelOptions(isNull ? (Datum)0 : dval, redis_reloptions, NULL, NULL, false, false); - - if (newOptions != (Datum)0) { - repl_val[Anum_pg_partition_reloptions - 1] = newOptions; - repl_null[Anum_pg_partition_reloptions - 1] = false; - } else { - repl_null[Anum_pg_partition_reloptions - 1] = true; - } - - repl_repl[Anum_pg_partition_reloptions - 1] = true; - - dtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, part_tupdesc, repl_val, repl_null, repl_repl); - - simple_heap_update(pg_partition, &dtuple->t_self, dtuple); - CatalogUpdateIndexes(pg_partition, dtuple); - /* reset partition reloptions info. */ - if (RELATION_HAS_BUCKET(rel) && RELATION_OWN_BUCKET(rel)) { - Partition part = partitionOpen(rel, HeapTupleGetOid(tuple), AccessExclusiveLock); - Relation partrel = partitionGetRelation(rel, part); - reset_merge_list_on_pgxc_class(partrel); - releaseDummyRelation(&partrel); - partitionClose(rel, part, NoLock); - } - tableam_tops_free_tuple(dtuple); - } - systable_endscan(scan); - list_free_ext(redis_reloptions); - heap_close(pg_partition, RowExclusiveLock); -} - - -/** - * @Description: Reset one partition(oid = part_oid)'s ctid info of rel - * @in rel, parent relation. - * @in part_oid, oid of partition - * @return void - */ -static void ResetOnePartRedisCtidRelOptions(Relation rel, Oid part_oid) -{ - Assert(rel != NULL && OidIsValid(part_oid)); - - ResetRelRedisCtidRelOptions( - rel, part_oid, PARTRELID, Natts_pg_partition, Anum_pg_partition_reloptions, PartitionRelationId); -} - -static void ResetRelRedisCtidInfo(Relation rel, Oid part_oid, HeapTuple tuple, Oid pgcat_oid, Datum* repl_val, - const bool* repl_null, const bool* repl_repl) -{ - Relation pgcatrel; - HeapTuple newtuple; - - pgcatrel = heap_open(pgcat_oid, RowExclusiveLock); - newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(pgcatrel), repl_val, repl_null, repl_repl); - - simple_heap_update(pgcatrel, &newtuple->t_self, newtuple); - CatalogUpdateIndexes(pgcatrel, newtuple); - /* reseet partition reloptions info. */ - if (RELATION_HAS_BUCKET(rel) && RELATION_OWN_BUCKET(rel)) { - if (OidIsValid(part_oid)) { - Partition part = partitionOpen(rel, part_oid, AccessExclusiveLock); - Relation partrel = partitionGetRelation(rel, part); - reset_merge_list_on_pgxc_class(partrel); - releaseDummyRelation(&partrel); - partitionClose(rel, part, NoLock); - } else { - reset_merge_list_on_pgxc_class(rel); - } - } - tableam_tops_free_tuple(newtuple); - ReleaseSysCache(tuple); - heap_close(pgcatrel, RowExclusiveLock); -} - -/** - * @Description: relation(parent relation when partition rel) - * @in rel, relaton or partition's fakeRelation. - * @part_oid, partition's oid - * @in cat_id, catche id of pg_class or pg_partition - * @in att_num, att_num of pgcat(pg_class or pg_partition) - * @in att_inx, att_inx of reloptions - * @Oid pgcat_oid, pg_class or pg_partition - * @return void - */ -static void ResetRelRedisCtidRelOptions(Relation rel, Oid part_oid, int cat_id, int att_num, int att_inx, Oid pgcat_oid) -{ - Datum newOptions = (Datum)0; - Datum oldOptions = (Datum)0; - List* redis_reloptions = NIL; - Datum* repl_val = NULL; - bool* repl_null = NULL; - bool* repl_repl = NULL; - HeapTuple tuple; - bool isnull = false; - - repl_val = (Datum*)palloc0(att_num * sizeof(Datum)); - repl_null = (bool*)palloc0(att_num); - repl_repl = (bool*)palloc0(att_num); - - if (OidIsValid(part_oid)) { - tuple = SearchSysCache1(cat_id, ObjectIdGetDatum(part_oid)); - } else { - tuple = SearchSysCache1(cat_id, ObjectIdGetDatum(rel->rd_id)); - } - if (!HeapTupleIsValid(tuple)) { - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for partition %u", rel->rd_id))); - } - - /* Get the old reloptions */ - oldOptions = SysCacheGetAttr(cat_id, tuple, att_inx, &isnull); - - /* reset redis reloptions info */ - redis_reloptions = AlterTableSetRedistribute(rel, REDIS_REL_RESET_CTID, NULL); - newOptions = transformRelOptions(isnull ? (Datum)0 : oldOptions, redis_reloptions, NULL, NULL, false, false); - list_free_ext(redis_reloptions); - - if (newOptions != (Datum)0) { - repl_val[att_inx - 1] = newOptions; - repl_null[att_inx - 1] = false; - } else - repl_null[att_inx - 1] = true; - - repl_repl[att_inx - 1] = true; - - ResetRelRedisCtidInfo(rel, part_oid, tuple, pgcat_oid, repl_val, repl_null, repl_repl); - - pfree_ext(repl_val); - pfree_ext(repl_null); - pfree_ext(repl_repl); - return; -} -/** - * WLM has 5 system tables, those tables store monitoring data on CN node, will influence node-restore && node-expand. - * So, need to grant truncate permission of those 5 tables to user, to reduce influence. - * - */ -static bool WLMRelationCanTruncate(Relation rel) -{ - const char* targetRelname = get_rel_name(rel->rd_id); - if ((strcmp(targetRelname, WLM_USER_RESOURCE_HISTORY) == 0 || strcmp(targetRelname, WLM_INSTANCE_HISTORY) == 0 || - strcmp(targetRelname, WLM_EC_OPERATOR_INFO) == 0 || strcmp(targetRelname, WLM_OPERATOR_INFO) == 0 || - strcmp(targetRelname, WLM_SESSION_INFO) == 0) && - IsSystemNamespace(RelationGetNamespace(rel))) { - return true; - } - return false; -} - -static bool password_contain_space(const char *pwd) -{ - if (pwd == NULL || strlen(pwd) == 0) { - return true; - } - return false; -} - -void CreateWeakPasswordDictionary(CreateWeakPasswordDictionaryStmt* stmt) -{ - Relation rel; - HeapTuple tup = NULL; - ListCell* pwd_obj = NULL; - bool is_null = false; - - if (!has_createrole_privilege(GetUserId())) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("Permission denied"))); - } - - rel = heap_open(GsGlobalConfigRelationId, RowExclusiveLock); - if (!OidIsValid(rel)) { - ereport(ERROR, - (errcode(ERRCODE_SYSTEM_ERROR), - errmsg("could not open gs_global_config"))); - return; - } - - foreach (pwd_obj, stmt->weak_password_string_list) { - Datum values[Natts_gs_global_config] = {0}; - bool nulls[Natts_gs_global_config] = {false}; - const char* pwd = (const char *)(((Value*)lfirst(pwd_obj))->val.str); - if (password_contain_space(pwd)) { - continue; - } - const char* name = "weak_password"; - TableScanDesc scan = tableam_scan_begin(rel, SnapshotNow, 0, NULL); - TupleDesc tupdesc = RelationGetDescr(rel); - bool flag = false; - - while ((tup = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - Datum ex_datum = heap_getattr(tup, Anum_gs_global_config_value, tupdesc, &is_null); - if (is_null) { - continue; - } - char *ex_pwd = text_to_cstring(DatumGetTextP(ex_datum)); - if (strcmp(ex_pwd, pwd) == 0) { - flag = true; - break; - } - } - - tableam_scan_end(scan); - if (flag == false) { - values[Anum_gs_global_config_name - 1] = DirectFunctionCall1(namein, CStringGetDatum(name)); - values[Anum_gs_global_config_value - 1] = CStringGetTextDatum(pwd); - tup = (HeapTuple) heap_form_tuple(RelationGetDescr(rel), values, nulls); - simple_heap_insert(rel, tup); - } - } - heap_close(rel, RowExclusiveLock); -} - - -/* - * Brief : delete all weak passwords. - */ -void DropWeakPasswordDictionary() -{ - if (!has_createrole_privilege(GetUserId())) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("Permission denied"))); - } - - Relation rel = heap_open(GsGlobalConfigRelationId, RowExclusiveLock); - - HeapTuple tuple = NULL; - bool is_null = false; - - TableScanDesc scan = heap_beginscan(rel, SnapshotNow, 0, NULL); - TupleDesc tupdesc = RelationGetDescr(rel); - - /* if the relation is valid, then delete the records of the role */ - while ((tuple = (HeapTuple) tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { - if (strcmp(DatumGetCString(heap_getattr(tuple, Anum_gs_global_config_name, tupdesc, &is_null)), - "weak_password") == 0) { - Assert(is_null == false); - simple_heap_delete(rel, &tuple->t_self); - } - } - tableam_scan_end(scan); - heap_close(rel, RowExclusiveLock); -} - -/** - * @Description: check for alter table when relation is timeseries store - * @in rel, relation which need alter table. - * @in cmd, alter table command. - * @return void - */ -static void at_timeseries_check(Relation rel, AlterTableCmd* cmd) -{ - if (!g_instance.attr.attr_common.enable_tsdb) { - ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), - errmsg("Cannot alter timeseries table when enable_tsdb is off."))); - } - if (!CStoreSupportATCmd(cmd->subtype)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("timeseries store relation doesn't support this ALTER yet"))); - switch (cmd->subtype) { - case AT_AddPartition: - case AT_DropPartition: - case AT_ResetPartitionno: - case AT_SetRelOptions: - case AT_DropColumn: - case AT_TruncatePartition: - case AT_TruncateSubPartition: - case AT_ChangeOwner: - break; - case AT_AddColumn: { - ColumnDef* def = (ColumnDef*)cmd->def; - if (def->kvtype == ATT_KV_UNDEFINED) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column kvtype should be defined for timeseries store relation"))); - if (def->kvtype == ATT_KV_TIMETAG) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("column kvtype cannot defined as TSTIME"))); - break; - } - default: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("This ALTER command is not support in timeseries store."))); - break; - } -} - -static OnCommitAction GttOncommitOption(const List *options) -{ - ListCell *listptr; - OnCommitAction action = ONCOMMIT_NOOP; - - foreach(listptr, options) { - DefElem *def = reinterpret_cast(lfirst(listptr)); - if (strcmp(def->defname, "on_commit_delete_rows") == 0) { - bool res = false; - char *sval = defGetString(def); - - if (!parse_bool(sval, &res)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("parameter \"on_commit_delete_rows\" requires a Boolean value"))); - } - - if (res) { - action = ONCOMMIT_DELETE_ROWS; - } else { - action = ONCOMMIT_PRESERVE_ROWS; - } - break; - } - } - return action; -} - -/* - * Returns true iff any relation underlying this query is a temporary database - * object (table, view, or materialized view). - * - */ -bool -isQueryUsingTempRelation(Query *query) -{ - return isQueryUsingTempRelation_walker((Node *) query, NULL); -} - -static bool -isQueryUsingTempRelation_walker(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (IsA(node, Query)) - { - Query *query = (Query *) node; - ListCell *rtable = NULL; - - foreach(rtable, query->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *)lfirst(rtable); - - if (rte->rtekind == RTE_RELATION) - { - Relation rel = heap_open(rte->relid, AccessShareLock); - char relpersistence = rel->rd_rel->relpersistence; - - heap_close(rel, AccessShareLock); - if (relpersistence == RELPERSISTENCE_TEMP) - return true; - } - } - - return query_tree_walker(query, - (bool (*)())isQueryUsingTempRelation_walker, - context, - QTW_IGNORE_JOINALIASES); - } - - return expression_tree_walker(node, - (bool (*)())isQueryUsingTempRelation_walker, - context); -} - -void ExecutePurge(PurgeStmt *stmt) -{ - switch (stmt->purtype) { - case PURGE_TABLE: - TrPurgeObject(stmt->purobj, RB_OBJ_TABLE); - break; - case PURGE_INDEX: - TrPurgeObject(stmt->purobj, RB_OBJ_INDEX); - break; - case PURGE_TABLESPACE: { - Oid spcId; - spcId = ConvertToPgclassRelTablespaceOid( - get_tablespace_oid(stmt->purobj->relname, false)); - if (!pg_tablespace_ownercheck(spcId, GetUserId())) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TABLESPACE, - stmt->purobj->relname); - } - RbCltPurgeSpace(spcId); - break; - } - case PURGE_RECYCLEBIN: { - RbCltPurgeRecyclebin(); - break; - } - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized purge type: %d", (int)stmt->purtype))); - } -} - -/* - * TIMECAPSULE: - * TIMECAPSULE TABLE { table_name } TO { TIMESTAMP | CSN } expression - * TIMECAPSULE TABLE { table_name } TO BEFORE DROP [RENAME TO new_tablename] - * TIMECAPSULE TABLE { table_name } TO BEFORE TRUNCATE [ FORCE ] - */ -void ExecuteTimeCapsule(TimeCapsuleStmt *stmt) -{ - switch (stmt->tcaptype) { - case TIMECAPSULE_VERSION: - TvRestoreVersion(stmt); - break; - - case TIMECAPSULE_DROP: - TrRestoreDrop(stmt); - break; - - case TIMECAPSULE_TRUNCATE: - TrRestoreTruncate(stmt); - break; - - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized timecapsule type: %d", (int)stmt->tcaptype))); - } -} - -/* - * AlterCreateChainTables - * If it is a ledger usertable, that should invoking this function. - * then create a history table. - */ -void AlterCreateChainTables(Oid relOid, Datum reloptions, CreateStmt *mainTblStmt) -{ - Relation rel = NULL; - - rel = heap_open(relOid, AccessExclusiveLock); - - /* Ledger user table only support for the regular relation. */ - if (!rel->rd_isblockchain) { - heap_close(rel, NoLock); - return; - } - - create_hist_relation(rel, reloptions, mainTblStmt); - heap_close(rel, NoLock); -} - -void CheckDropViewValidity(ObjectType stmtType, char relKind, const char* relname) -{ - if (relKind != RELKIND_VIEW && relKind != RELKIND_CONTQUERY) { - return; - } - - if ((stmtType == OBJECT_CONTQUERY) || (stmtType == OBJECT_VIEW)) { - return; - } - - char expectedRelKind; - switch (stmtType) { - case OBJECT_TABLE: - expectedRelKind = RELKIND_RELATION; - break; - case OBJECT_INDEX: - expectedRelKind = RELKIND_INDEX; - break; - case OBJECT_SEQUENCE: - expectedRelKind = RELKIND_SEQUENCE; - break; - case OBJECT_LARGE_SEQUENCE: - expectedRelKind = RELKIND_LARGE_SEQUENCE; - break; - case OBJECT_MATVIEW: - expectedRelKind = RELKIND_MATVIEW; - break; - case OBJECT_FOREIGN_TABLE: - expectedRelKind = RELKIND_FOREIGN_TABLE; - break; - case OBJECT_STREAM: - expectedRelKind = RELKIND_STREAM; - break; - default: { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized drop object type: %d", (int)stmtType))); - expectedRelKind = 0; /* keep compiler quiet */ - } break; - } - DropErrorMsgWrongType(relname, relKind, expectedRelKind); -} -void ShrinkCfsChunkRestore(Oid relationId, LOCKMODE lockmode, bool nowait) -{ - Relation relation = try_relation_open(relationId, lockmode); - if (relation == NULL) { - ereport(ERROR,(errcode(ERRCODE_RELATION_OPEN_ERROR), - errmsg("could not open relation with OID %u", relationId))); - } - - if (relation->rd_node.opt == 0) { - relation_close(relation, lockmode); - ereport(ERROR, (errcode(ERRCODE_OPERATE_FAILED), - errmsg("could not deal with uncompressed relation whose OID is %u in cfs shrink.", relationId))); - } - - LockRelFileNode(relation->rd_node, lockmode); - SmgrChunkFragmentsRestore(relation->rd_node, MAIN_FORKNUM, relation->rd_rel->parttype, nowait); - SmgrChunkFragmentsRestoreRecord(relation->rd_node, MAIN_FORKNUM); - UnlockRelFileNode(relation->rd_node, lockmode); - UnlockRelationOid(relationId, lockmode); - relation_close(relation, lockmode); -} - -void ShrinkRealtionChunk(ShrinkStmt* shrink) -{ - ListCell* cell = NULL; - foreach (cell, shrink->relations) { - RangeVar* r = (RangeVar*)lfirst(cell); - Oid reloid = get_relname_relid(r->relname, PG_CATALOG_NAMESPACE); - if (unlikely((long)(OidIsValid(reloid) && reloid < FirstBootstrapObjectId))) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", r->relname), - errhint("just normal compressed table can be shrinked"))); - } - - reloid = RangeVarGetRelid(r, AccessShareLock, true); - if (!OidIsValid(reloid)) { - continue; - } - - ShrinkCfsChunkRestore(reloid, AccessShareLock, shrink->nowait); - } -} - -static int128 EvaluateAutoIncrement(Relation rel, TupleDesc desc, AttrNumber attnum, Datum* value, bool* is_null) -{ - ConstrAutoInc* cons_autoinc = desc->constr->cons_autoinc; - int128 autoinc; - bool modify_value = false; - - if (*is_null) { - autoinc = 0; - modify_value = desc->attrs[attnum - 1].attnotnull; - } else { - autoinc = datum2autoinc(cons_autoinc, *value); - modify_value = (autoinc == 0); - } - /* When datum is NULL/0, auto increase */ - if (autoinc == 0) { - if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - autoinc = tmptable_autoinc_nextval(rel->rd_rel->relfilenode, cons_autoinc->next); - } else { - autoinc = nextval_internal(cons_autoinc->seqoid); - } - if (modify_value) { - *is_null = false; - *value = autoinc2datum(cons_autoinc, autoinc); - } - } - return autoinc; -} - -static void SetRelAutoIncrement(Relation rel, TupleDesc desc, int128 autoinc) -{ - if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - tmptable_autoinc_setval(rel->rd_rel->relfilenode, desc->constr->cons_autoinc->next, autoinc, true); - } else { - autoinc_setval(desc->constr->cons_autoinc->seqoid, autoinc, true); - } -} - -static void CopyTempAutoIncrement(Relation oldrel, Relation newrel) -{ - if (oldrel->rd_rel->relpersistence != RELPERSISTENCE_TEMP || - oldrel->rd_rel->relfilenode == newrel->rd_rel->relfilenode || - !RelHasAutoInc(oldrel)) { - return; - } - int128* value = find_tmptable_cache_autoinc(oldrel->rd_rel->relfilenode); - if (value != NULL) { - tmptable_autoinc_reset(newrel->rd_rel->relfilenode, *value); - } -} - -static void ATAlterCheckModifiyColumnRepeatedly(const AlterTableCmd* cmd, const List* tab_cmds) -{ - ListCell* tcmd = NULL; - foreach (tcmd, tab_cmds) { - AlterTableCmd* acmd = (AlterTableCmd*)lfirst(tcmd); - if (acmd->name != NULL && strcmp(acmd->name, cmd->name) == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("cannot modify or change column \"%s\" twice", cmd->name))); - } - } -} - -void CheckAutoIncrementDatatype(Oid typid, const char* colname) -{ - switch (typid) { - case BOOLOID: - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case INT16OID: - case FLOAT4OID: - case FLOAT8OID: - break; - default: - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("The datatype of column '%s' does not support auto_increment", colname))); - break; - } -} - -void CheckRelAutoIncrementIndex(Oid relid, LOCKMODE lockmode) -{ - List* idxoidlist = NULL; - bool found = false; - Relation rel = relation_open(relid, lockmode); - AttrNumber autoinc_attnum = RelAutoIncAttrNum(rel); - - if (autoinc_attnum <= 0) { - relation_close(rel, lockmode); - return; - } - - if (!rel->rd_rel->relhasindex) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - (errmsg("auto_increment column must be defined as a unique or primary key")))); - } - - idxoidlist = RelationGetIndexList(rel); - relation_close(rel, lockmode); - - foreach_cell(l, idxoidlist) { - Relation idxrel = index_open(lfirst_oid(l), AccessShareLock); - Form_pg_index index = idxrel->rd_index; - - if (IndexIsValid(index) && (index->indisunique || index->indisprimary) && - index->indkey.values[0] == autoinc_attnum) { - found = true; - index_close(idxrel, AccessShareLock); - break; - } - index_close(idxrel, AccessShareLock); - } - - list_free(idxoidlist); - if (!found) { - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - (errmsg("auto_increment column must be defined as a unique or primary key")))); - } -} - -/* Daparse a expression and reparse it. Return new expression node. */ -static Node* ReparseSingleRelExpr(Relation rel, Node* src_expr) -{ - Node* dst_expr = NULL; - List* parsetree = NULL; - SelectStmt* stmt = NULL; - StringInfoData query_string; - List* deparse_context = deparse_context_for(RelationGetRelationName(rel), RelationGetRelid(rel)); - char* expr_string = deparse_expression(src_expr, deparse_context, false, false); - /* construct sql */ - initStringInfo(&query_string); - appendStringInfo(&query_string, "SELECT %s ;", expr_string); - /* parse sql */ - parsetree = raw_parser(query_string.data, NULL); - /* get SelectStmt from parsetree */ - Assert(list_length(parsetree) == 1); - dst_expr = (Node*)linitial(parsetree); - Assert(IsA(dst_expr, SelectStmt)); - stmt = (SelectStmt*)dst_expr; - /* get ResTarget from SelectStmt */ - Assert(list_length(stmt->targetList) == 1); - dst_expr = (Node*)linitial(stmt->targetList); - /* get reparsed expr from ResTarget */ - Assert(IsA(dst_expr, ResTarget)); - dst_expr = ((ResTarget*)dst_expr)->val; - list_free(parsetree); - pfree(query_string.data); - pfree(expr_string); - list_free_deep(deparse_context); - return dst_expr; -} - -/* Rebuild the generated expression because the data type of the column referenced in it has changed. */ -static Node* RebuildGeneratedColumnExpr(Relation rel, AttrNumber gen_attnum) -{ - ParseState* pstate = NULL; - RangeTblEntry *rte = NULL; - Form_pg_attribute pgattr = &rel->rd_att->attrs[gen_attnum - 1]; - Node* gen_expr = build_column_default(rel, gen_attnum); - - Assert(gen_expr); - /* reparse generated column expression */ - gen_expr = ReparseSingleRelExpr(rel, gen_expr); - /* cook generated expression */ - pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); - addRTEtoQuery(pstate, rte, false, true, true); - gen_expr = cookDefault(pstate, gen_expr, pgattr->atttypid, pgattr->atttypmod, pgattr->attcollation, - NameStr(pgattr->attname), ATTRIBUTE_GENERATED_STORED); - /* readd pg_attrdef */ - RemoveAttrDefault(RelationGetRelid(rel), gen_attnum, DROP_RESTRICT, true, true); - StoreAttrDefault(rel, gen_attnum, gen_expr, ATTRIBUTE_GENERATED_STORED, NULL, true); - pfree(pstate); - return gen_expr; -} - -static void ATPrepAlterModifyColumn(List** wqueue, AlteredTableInfo* tab, Relation rel, bool recurse, - bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode) -{ - ColumnDef* def = (ColumnDef*)cmd->def; - Node* tmp_expr = def->raw_default; - char* tmp_name = cmd->name; - if (def->generatedCol != ATTRIBUTE_GENERATED_STORED && (tmp_expr == NULL || !IsA(tmp_expr, AutoIncrement))) { - ATPrepCheckDefault(tmp_expr); - } - - def->raw_default = NULL; - cmd->name = def->colname; - /* For ATPrepAlterColumnType, raw_default is used to convert the original data to the target type. */ - ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode); - cmd->name = tmp_name; - def->raw_default = tmp_expr; -} - -static char* GetCreateViewCommand(const char *rel_name, HeapTuple tup, Form_pg_class reltup, Oid pg_rewrite_oid, Oid view_oid) -{ - StringInfoData buf; - ViewInfoForAdd* view_info = NULL; - char* view_options = NULL; - bool isnull = true; - const char* ns_name = quote_identifier(get_namespace_name(reltup->relnamespace)); - - initStringInfo(&buf); - appendStringInfo(&buf, "CREATE OR REPLACE "); - if (reltup->relpersistence == RELPERSISTENCE_TEMP) { - appendStringInfo(&buf, "TEMPORARY "); - } - if (ns_name) { - appendStringInfo(&buf, "VIEW %s.%s(", ns_name, quote_identifier(NameStr(reltup->relname))); - } else { - appendStringInfo(&buf, "VIEW %s(", quote_identifier(NameStr(reltup->relname))); - } - for (AttrNumber i = 1; i <= reltup->relnatts; i++) { - char* attname = get_relid_attribute_name(view_oid, i); - if (i == reltup->relnatts) { - appendStringInfo(&buf, "%s) ", quote_identifier(attname)); - } else { - appendStringInfo(&buf, "%s, ", quote_identifier(attname)); - } - } - Datum reloptions = SysCacheGetAttr(RELOID, tup, Anum_pg_class_reloptions, &isnull); - if (!isnull) { - Datum sep = CStringGetTextDatum(", "); - Datum txt = OidFunctionCall2(F_ARRAY_TO_TEXT, reloptions, sep); - view_options = TextDatumGetCString(txt); - } - if (view_options && strlen(view_options) > 0) { - appendStringInfo(&buf, "WITH (%s) ", view_options); - } - pfree_ext(view_options); - /* concat CREATE VIEW command with query */ - view_info = GetViewInfoFirstAfter(rel_name, pg_rewrite_oid, true); - if (view_info == NULL) { - pfree_ext(buf.data); - return NULL; /* should not happen */ - } - appendStringInfo(&buf, "AS %s", view_info->query_string); - pfree_ext(view_info->query_string); - pfree_ext(view_info); - return buf.data; -} - -static void ATAlterRecordRebuildView(AlteredTableInfo* tab, Relation rel, Oid pg_rewrite_oid, bool type_changed) -{ - HeapTuple tup; - char* view_def = NULL; - Oid view_oid = get_rewrite_relid(pg_rewrite_oid, true); - /* the view has been recorded */ - if (list_member_oid(tab->changedViewOids, view_oid) || !type_changed) { - return; - } - /* get pg_class tuple by view oid */ - tup = SearchSysCache1(RELOID, ObjectIdGetDatum(view_oid)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("modify or change a column used by materialized view or rule is not supported"))); - } - Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tup); - if (reltup->relkind != RELKIND_VIEW) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("modify or change a column used by materialized view or rule is not supported"))); - } - /* print CREATE VIEW command */ - view_def = GetCreateViewCommand(NameStr(rel->rd_rel->relname), tup, reltup, pg_rewrite_oid, view_oid); - ReleaseSysCache(tup); - if (view_def) { - /* record it */ - tab->changedViewOids = lappend_oid(tab->changedViewOids, view_oid); - tab->changedViewDefs = lappend(tab->changedViewDefs, view_def); - } -} - -static Node* CookRlspolicyQual(Relation rel, Node* src_qual) -{ - ParseState* pstate = make_parsestate(NULL); - RangeTblEntry* rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, false); - addRTEtoQuery(pstate, rte, false, true, true); - /* Transform expr clause */ - Node *cooked_qual = transformWhereClause(pstate, src_qual, EXPR_KIND_POLICY, "POLICY"); - /* Take care of collations */ - assign_expr_collations(pstate, cooked_qual); - pfree(pstate); - return cooked_qual; -} - -/* - * Find a row level security policy by oid. Rebuild qual expression tree by replacing Var node; - */ -static void ATAlterModifyRebuildRlspolicyExpr(Relation rel, Oid pg_rlspolicy_oid) -{ - Relation rlsp_rel; - ScanKeyData scankey; - SysScanDesc scan; - HeapTuple tuple; - Datum values[Natts_pg_rlspolicy] = {0}; - bool nulls[Natts_pg_rlspolicy] = {0}; - bool replaces[Natts_pg_rlspolicy] = {0}; - Datum datum; - bool isnull = false; - char* polqual = NULL; - Node *expr = NULL; - - ScanKeyInit(&scankey, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(pg_rlspolicy_oid)); - rlsp_rel = heap_open(RlsPolicyRelationId, RowExclusiveLock); - scan = systable_beginscan(rlsp_rel, PgRlspolicyOidIndex, true, NULL, 1, &scankey); - tuple = systable_getnext(scan); - if (HeapTupleIsValid(tuple)) { - datum = heap_getattr(tuple, Anum_pg_rlspolicy_polqual, RelationGetDescr(rlsp_rel), &isnull); - if (!isnull) { - polqual = TextDatumGetCString(datum); - /* rebuild polqual expression */ - expr = (Node*)stringToNode_skip_extern_fields(polqual); - expr = ReparseSingleRelExpr(rel, expr); - expr = CookRlspolicyQual(rel, expr); - pfree_ext(polqual); - /* update polqual */ - polqual = nodeToString(expr); - values[Anum_pg_rlspolicy_polqual - 1] = CStringGetTextDatum(polqual); - replaces[Anum_pg_rlspolicy_polqual - 1] = true; - tuple = heap_modify_tuple(tuple, RelationGetDescr(rlsp_rel), values, nulls, replaces); - simple_heap_update(rlsp_rel, &tuple->t_self, tuple); - CatalogUpdateIndexes(rlsp_rel, tuple); - } - } - - systable_endscan(scan); - heap_close(rlsp_rel, RowExclusiveLock); - pfree(DatumGetPointer(values[Anum_pg_rlspolicy_polqual - 1])); - pfree_ext(polqual); -} - -static void ATHandleClassObjectDependOnModifiedColumn(AlteredTableInfo* tab, Relation dep_rel, - ObjectAddress* object) -{ - char relKind = get_rel_relkind(object->objectId); - - if (relKind == RELKIND_INDEX || relKind == RELKIND_GLOBAL_INDEX) { - Assert(object->objectSubId == 0); - Oid refobjid; - if (!list_member_oid(tab->changedConstraintOids, object->objectId) && - CheckIndexIsConstraint(dep_rel, object->objectId, &refobjid)) { - tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, refobjid); - tab->changedConstraintDefs = - lappend(tab->changedConstraintDefs, pg_get_constraintdef_string(refobjid)); - } else if (!list_member_oid(tab->changedIndexOids, object->objectId)) { - /* - * Question: alter table set datatype and table index execute concurrently, data inconsistency - * occurs. The index file is deleted and metadata is left. Because the data type is not locked - * after modification, which ultimately leads to could not open file. Alter table column set - * datatype maybe trigger index operation but index is not locked. When the index data is - * inconsistent, we can use"reindex index" to repair the index. - * Solution: we should lock index at the beginning.The ACCESS_EXCLUSIVE_LOCK for index is used - * because we think ACCESS_EXCLUSIVE_LOCK for data table will block any operation and index - * will be not used to query data. This operation will block individual index operations, - * such as reindex index\set index tablespace. - * Testcase: alter table row_table alter column col_varchar set data type text,alter column - * col_smallint set data type bigint + alter index idx set tablespace. - */ - LockRelationOid(object->objectId, AccessExclusiveLock); - tab->changedIndexOids = lappend_oid(tab->changedIndexOids, object->objectId); - tab->changedIndexDefs = lappend(tab->changedIndexDefs, pg_get_indexdef_string(object->objectId)); - } - } else if (RELKIND_IS_SEQUENCE(relKind)) { - /* - * This must be a SERIAL or AUTO_INCREMENT column's sequence. We need not do anything to it. - */ - Assert(object->objectSubId == 0); - } else if (relKind == RELKIND_RELATION && object->objectSubId != 0 && - GetGenerated(object->objectId, object->objectSubId)) { - if (!list_member_int(tab->changedGeneratedCols, object->objectSubId)) { - tab->changedGeneratedCols = lappend_int(tab->changedGeneratedCols, object->objectSubId); - } - } else { - /* Not expecting any other direct dependencies... */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unexpected object depending on column: %s", getObjectDescription((object))))); - } -} - -static void ATAlterRecordRebuildConstraint(AlteredTableInfo* tab, Oid constraint_oid, Form_pg_depend found_dep) -{ - if (!list_member_oid(tab->changedConstraintOids, constraint_oid)) { - char* defstring = pg_get_constraintdef_string(constraint_oid); - /* - * Put NORMAL dependencies at the front of the list and - * AUTO dependencies at the back. This makes sure that - * foreign-key constraints depending on this column will - * be dropped before unique or primary-key constraints of - * the column; which we must have because the FK - * constraints depend on the indexes belonging to the - * unique constraints. - */ - if (found_dep->deptype == DEPENDENCY_NORMAL) { - tab->changedConstraintOids = lcons_oid(constraint_oid, tab->changedConstraintOids); - tab->changedConstraintDefs = lcons(defstring, tab->changedConstraintDefs); - } else { - tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, constraint_oid); - tab->changedConstraintDefs = lappend(tab->changedConstraintDefs, defstring); - } - } -} - -static void ATAlterRecordRebuildTrigger(AlteredTableInfo* tab, Oid trigger_oid, bool type_changed) -{ - if (!list_member_oid(tab->changedTriggerOids, trigger_oid) && type_changed) { - char* defstring = pg_get_triggerdef_string(trigger_oid); - tab->changedTriggerOids = lappend_oid(tab->changedTriggerOids, trigger_oid); - tab->changedTriggerDefs = lappend(tab->changedTriggerDefs, defstring); - } -} - -static void ATAlterRecordRebuildRlsp(AlteredTableInfo* tab, Oid rlsp_oid, bool type_changed) -{ - if (!list_member_oid(tab->changedRLSPolicies, rlsp_oid) && type_changed) { - tab->changedRLSPolicies = lappend_oid(tab->changedRLSPolicies, rlsp_oid); - } -} - -static void ATHandleObjectsDependOnModifiedColumn(AlteredTableInfo* tab, Relation rel, - Form_pg_attribute pg_attr, AttrNumber attnum, bool type_changed) -{ - ScanKeyData key[3]; - SysScanDesc scan; - HeapTuple dep_tup; - Relation dep_rel = heap_open(DependRelationId, RowExclusiveLock); - - ScanKeyInit( - &key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit( - &key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum((int32)attnum)); - - scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, 3, key); - - while (HeapTupleIsValid(dep_tup = systable_getnext(scan))) { - Form_pg_depend found_dep = (Form_pg_depend)GETSTRUCT(dep_tup); - ObjectAddress found_object; - - /* We don't expect any PIN dependencies on columns */ - if (found_dep->deptype == DEPENDENCY_PIN) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("cannot modify or change a pinned column"))); - } - - found_object.classId = found_dep->classid; - found_object.objectId = found_dep->objid; - found_object.objectSubId = found_dep->objsubid; - switch (getObjectClass(&found_object)) { - case OCLASS_CLASS: - ATHandleClassObjectDependOnModifiedColumn(tab, dep_rel, &found_object); - break; - - case OCLASS_CONSTRAINT: - Assert(found_object.objectSubId == 0); - ATAlterRecordRebuildConstraint(tab, found_object.objectId, found_dep); - break; - - case OCLASS_REWRITE: - ATAlterRecordRebuildView(tab, rel, found_object.objectId, type_changed); - break; - - case OCLASS_TRIGGER: - Assert(found_object.objectSubId == 0); - ATAlterRecordRebuildTrigger(tab, found_object.objectId, type_changed); - break; - - case OCLASS_RLSPOLICY: - Assert(found_object.objectSubId == 0); - ATAlterRecordRebuildRlsp(tab, found_object.objectId, type_changed); - break; - - case OCLASS_DEFAULT: - break; - - case OCLASS_CL_CACHED_COLUMN: - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("modify or change encrypted column is not supported"))); - break; - - case OCLASS_PROC: - case OCLASS_TYPE: - case OCLASS_CAST: - case OCLASS_COLLATION: - case OCLASS_CONVERSION: - case OCLASS_LANGUAGE: - case OCLASS_LARGEOBJECT: - case OCLASS_OPERATOR: - case OCLASS_OPCLASS: - case OCLASS_OPFAMILY: - case OCLASS_AMOP: - case OCLASS_AMPROC: - case OCLASS_SCHEMA: - case OCLASS_TSPARSER: - case OCLASS_TSDICT: - case OCLASS_TSTEMPLATE: - case OCLASS_TSCONFIG: - case OCLASS_ROLE: - case OCLASS_DATABASE: - case OCLASS_TBLSPACE: - case OCLASS_FDW: - case OCLASS_FOREIGN_SERVER: - case OCLASS_USER_MAPPING: - case OCLASS_DEFACL: - case OCLASS_EXTENSION: - case OCLASS_DATA_SOURCE: - case OCLASS_GLOBAL_SETTING_ARGS: - case OCLASS_GS_CL_PROC: - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unexpected object depending on column: %s", getObjectDescription(&found_object)))); - break; - default: - ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized object class: %u", found_object.classId))); - } - } - - systable_endscan(scan); - DelDependencONDataType(rel, dep_rel, pg_attr); - heap_close(dep_rel, RowExclusiveLock); -} - -static NewColumnValue* findNewColumnValue(AlteredTableInfo* tab, const char* col_name) -{ - NewColumnValue* result = NULL; - - foreach_cell(vcell, tab->newvals) { - result = (NewColumnValue*)lfirst(vcell); - if (result->col_name != NULL && strcmp(col_name, result->col_name) == 0) { - return result; - } - } - return NULL; -} - -static int128 getAutoIncrementValue(Relation rel, ColumnDef* def, AttrNumber attnum) -{ - AttrNumber autoinc_attnum = RelAutoIncAttrNum(rel); - if (autoinc_attnum > 0) { - if (autoinc_attnum == attnum) { - if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - return *rel->rd_att->constr->cons_autoinc->next; - } else { - return autoinc_get_nextval(RelAutoIncSeqOid(rel)); - } - } else if (def->raw_default && IsA(def->raw_default, AutoIncrement)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Incorrect table definition, there can be only one auto_increment column")))); - } - } - return 0; -} - -static void ATAlterModifyAutoinColumn(AlteredTableInfo* tab, Relation rel, ColumnDef* def, - AttrNumber attnum, int128 autoinc) -{ - if (autoinc > 0) { /* Column is auto_increment before modified, keep auto_increment value. */ - if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) { - tmptable_autoinc_setval( - rel->rd_rel->relfilenode, rel->rd_att->constr->cons_autoinc->next, autoinc, false); - } else { - autoinc_setval(RelAutoIncSeqOid(rel), autoinc, false); - } - } else { /* Column is not auto_increment before modified, need rewrite table later. */ - Expr* defval = NULL; - NewColumnValue* newval = findNewColumnValue(tab, def->colname); - if (newval != NULL) { - newval->is_autoinc = true; - tab->rewrite = true; - } else { - defval = (Expr*)build_column_default(rel, attnum); - Assert(defval != NULL); - ATExecAppendDefValExpr(attnum, defval, tab, def, true, false); - } - } -} - -static void ATAlterModifyColumnDefault(AlteredTableInfo* tab, Relation rel, ColumnDef* def, - AttrNumber attnum, int128 autoinc) -{ - RawColumnDefault raw_col_def; - - raw_col_def.attnum = attnum; - raw_col_def.raw_default = (Node*)copyObject(def->raw_default); - raw_col_def.generatedCol = def->generatedCol; - raw_col_def.update_expr = (Node*)copyObject(def->update_default); - (void)AddRelationNewConstraints(rel, list_make1(&raw_col_def), NIL, false, true); - CommandCounterIncrement(); - /* AUTO_INCREMENT and GENERATED COLUMN need rewrite table */ - if (RelAutoIncAttrNum(rel) == attnum) { - ATAlterModifyAutoinColumn(tab, rel, def, attnum, autoinc); - if (list_length(tab->changedGeneratedCols) > 0) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("generated column cannot refer to auto_increment column")))); - } - } else if (def->generatedCol == ATTRIBUTE_GENERATED_STORED) { - if (list_length(tab->changedGeneratedCols) > 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("A generated column cannot reference another generated column."))); - } - - NewColumnValue* newval = findNewColumnValue(tab, def->colname); - Expr* defval = (Expr*)build_column_default(rel, attnum); - Assert(defval != NULL); - if (newval != NULL) { - newval->expr = expression_planner(defval); - newval->is_generated = true; - tab->rewrite = true; - } else { - ATExecAppendDefValExpr(attnum, defval, tab, def, false, false); - } - } -} - -/* - * Rebulid generated column expression and update pg_attrdef. - * Set rewrite if need. - */ -static void ATRewriteChangedGeneratedColumn(AlteredTableInfo* tab, Relation rel, AttrNumber gen_attnum) -{ - char *gen_col_name = NameStr(rel->rd_att->attrs[gen_attnum - 1].attname); - NewColumnValue* newval = findNewColumnValue(tab, gen_col_name); - Expr* defval = (Expr*)RebuildGeneratedColumnExpr(rel, gen_attnum); - Assert(defval != NULL); - /* - * Rebuilt generated column expression does not affect the data of the generated column. - * Replace the expression only when the generated column needs to be rewritten. - */ - if (newval == NULL) { - newval = (NewColumnValue*)palloc0(sizeof(NewColumnValue)); - newval->attnum = gen_attnum; - newval->expr = expression_planner(defval); - newval->is_generated = true; - newval->is_autoinc = false; - newval->generate_attnum = 0; - newval->col_name = pstrdup(gen_col_name); - tab->newvals = lappend(tab->newvals, newval); - tab->rewrite = true; - } else if (newval->is_generated) { - newval->expr = expression_planner(defval); - tab->rewrite = true; - } -} - -static bool ModifiedColumnIsPrimaryKey(AlteredTableInfo* tab, AttrNumber attrnum) -{ - foreach_cell(cell, tab->changedConstraintOids) { - Datum* keys = NULL; - Datum conkey_datum; - int key_count; - bool isnull = false; - Oid constraint_oid = lfirst_oid(cell); - HeapTuple tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraint_oid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for constraint %u", constraint_oid))); - } - - if (((Form_pg_constraint)GETSTRUCT(tuple))->contype != CONSTRAINT_PRIMARY) { - ReleaseSysCache(tuple); - continue; - } - - conkey_datum = SysCacheGetAttr(CONSTROID, tuple, Anum_pg_constraint_conkey, &isnull); - if (isnull) { - ReleaseSysCache(tuple); - continue; - } - - deconstruct_array(DatumGetArrayTypeP(conkey_datum), INT2OID, sizeof(int16), true, 's', &keys, NULL, &key_count); - for (int i = 0; i < key_count; i++) { - if (DatumGetInt16(keys[i]) == attrnum) { - pfree_ext(keys); - ReleaseSysCache(tuple); - return true; - } - } - - pfree_ext(keys); - ReleaseSysCache(tuple); - } - - return false; -} - -static void ATExecAlterModifyColumn(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd) -{ - ColumnDef* def = (ColumnDef*)cmd->def; - AttrNumber attnum; - HeapTuple attr_tuple; - HeapTuple type_tuple; - Form_pg_attribute pg_attr; - Form_pg_type pg_type; - Relation att_rel; - Oid typid; - int32 typmod = -1; - Oid collid = InvalidOid; - AclResult aclresult; - int128 autoinc = 0; - char* col_name = def->colname; - bool type_changed = false; - bool is_first_after = cmd->is_first || cmd->after_name != NULL; - - att_rel = heap_open(AttributeRelationId, RowExclusiveLock); - attnum = get_attnum(RelationGetRelid(rel), col_name); - if (attnum == InvalidAttrNumber) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", col_name, RelationGetRelationName(rel)))); - } - - /* Check and get new type and collation */ - type_tuple = typenameType(NULL, def->typname, &typmod); - pg_type = (Form_pg_type)GETSTRUCT(type_tuple); - typid = HeapTupleGetOid(type_tuple); - aclresult = pg_type_aclcheck(typid, GetUserId(), ACL_USAGE); - if (aclresult != ACLCHECK_OK) { - aclcheck_error_type(aclresult, typid); - } - collid = GetColumnDefCollation(NULL, def, typid); - CheckAttributeType(col_name, typid, collid, list_make1_oid(rel->rd_rel->reltype), false); - - /* Check and save AUTO_INCREMENT */ - autoinc = getAutoIncrementValue(rel, def, attnum); - - /* drop old default */ - RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false, true); - - /* Look up the target column */ - attr_tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), col_name); - if (!HeapTupleIsValid(attr_tuple)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", col_name, RelationGetRelationName(rel)))); - } - pg_attr = (Form_pg_attribute)GETSTRUCT(attr_tuple); - type_changed = (pg_attr->atttypid != typid || pg_attr->atttypmod != typmod || pg_attr->attcollation != collid); - /* Check column partkey */ - if (is_partition_column(rel, attnum)) { - if (type_changed) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("modify or change partition key column is not supported"))); - } else if (def->generatedCol) { - ereport(ERROR, - (errmodule(MOD_GEN_COL), errcode(ERRCODE_INVALID_OPERATION), - errmsg("Invalid modify column operation"), - errdetail("cannot modify or change a partition key column as a generated column"))); - } - } - - /* drop comment on column */ - DeleteComments(RelationGetRelid(rel), RelationRelationId, attnum); - /* Working with objects that depend on the column being modified. */ - ATHandleObjectsDependOnModifiedColumn(tab, rel, pg_attr, attnum, type_changed); - /* Primary key column must be not null. */ - def->is_not_null = def->is_not_null ? def->is_not_null : ModifiedColumnIsPrimaryKey(tab, attnum); - if (!pg_attr->attnotnull && def->is_not_null) { - tab->new_notnull = true; - } - - if (is_first_after) { - UpdateNewvalsAttnum(tab, rel, cmd, col_name); - } - - pg_attr->atttypid = typid; - pg_attr->attlen = pg_type->typlen; - pg_attr->atttypmod = typmod; - pg_attr->attbyval = pg_type->typbyval; - pg_attr->attndims = list_length(def->typname->arrayBounds); - pg_attr->attstorage = pg_type->typstorage; - pg_attr->attalign = pg_type->typalign; - pg_attr->attcollation = collid; - pg_attr->attnotnull = def->is_not_null; - pg_attr->attislocal = def->is_local; - pg_attr->attkvtype = def->kvtype; - pg_attr->attinhcount = def->inhcount; - pg_attr->atthasdef = false; - ReleaseSysCache(type_tuple); - simple_heap_update(att_rel, &attr_tuple->t_self, attr_tuple); - CatalogUpdateIndexes(att_rel, attr_tuple); - heap_close(att_rel, RowExclusiveLock); - - /* Install dependencies on new datatype and collation */ - add_column_datatype_dependency(RelationGetRelid(rel), attnum, typid); - add_column_collation_dependency(RelationGetRelid(rel), attnum, collid); - - /* Drop any pg_statistic entry for the column, since it's now wrong type */ - if (RELATION_IS_GLOBAL_TEMP(rel)) { - remove_gtt_att_statistic(RelationGetRelid(rel), attnum); - } else { - RemoveStatistics<'c'>(RelationGetRelid(rel), attnum); - } - - if (def->raw_default || def->update_default) { - CommandCounterIncrement(); - ATAlterModifyColumnDefault(tab, rel, def, attnum, autoinc); - } - - foreach_cell(attcell, tab->changedGeneratedCols) { - CommandCounterIncrement(); - ATRewriteChangedGeneratedColumn(tab, rel, (AttrNumber)lfirst_int(attcell)); - } - list_free_ext(tab->changedGeneratedCols); - - foreach_cell(rlspcell, tab->changedRLSPolicies) { - CommandCounterIncrement(); - ATAlterModifyRebuildRlspolicyExpr(rel, lfirst_oid(rlspcell)); - } - list_free_ext(tab->changedRLSPolicies); - - /* recreate views */ - foreach_cell(view_def_cell, tab->changedViewDefs) { - CommandCounterIncrement(); - char* cmd_str = (char*)lfirst(view_def_cell); - List* raw_parsetree_list = raw_parser(cmd_str); - Node* stmt = (Node*)linitial(raw_parsetree_list); - Assert(IsA(stmt, ViewStmt)); - DefineView((ViewStmt*)stmt, cmd_str); - } - list_free_ext(tab->changedViewOids); - list_free_ext(tab->changedViewDefs); - - if (cmd->is_first || cmd->after_name != NULL) { - tab->is_first_after = true; - CommandCounterIncrement(); - - AlterColumnToFirstAfter(tab, rel, cmd, attnum); - } -} - -static Node* RecookAutoincAttrDefault(Relation rel, int attrno, Oid targettype, int targettypmod) -{ - TupleDesc rd_att = rel->rd_att; - AutoIncrement* aexpr = NULL; - Node* expr = NULL; - AttrDefault* defval = rd_att->constr->defval; - int ndef = rd_att->constr->num_defval; - - CheckAutoIncrementDatatype(targettype, rd_att->attrs[attrno - 1].attname.data); - while (--ndef >= 0) { - if (attrno == defval[ndef].adnum) { - expr = (Node*)stringToNode_skip_extern_fields(defval[ndef].adbin); - break; - } - } - Assert(expr != NULL); - Assert(IsA(expr, AutoIncrement)); - aexpr = (AutoIncrement*)expr; - (void)find_coercion_pathway(INT16OID, targettype, COERCION_ASSIGNMENT, &aexpr->autoincin_funcid); - (void)find_coercion_pathway(targettype, INT16OID, COERCION_ASSIGNMENT, &aexpr->autoincout_funcid); - aexpr->expr = strip_implicit_coercions(aexpr->expr); - aexpr->expr = coerce_to_target_type(NULL, /* no UNKNOWN params */ - aexpr->expr, - exprType(aexpr->expr), - targettype, - targettypmod, - COERCION_ASSIGNMENT, - COERCE_IMPLICIT_CAST, - -1); - return (Node*)aexpr; -} - -/* - * findout view which depend on proc, then rebuild it. It will check view's - * column type and name(checkViewTupleDesc) when rebuild the view. - */ -void RebuildDependViewForProc(Oid proc_oid) -{ - ScanKeyData key[2]; - SysScanDesc scan = NULL; - HeapTuple tup = NULL; - List *oid_list = NIL; - - /* open pg_depend to find which view depend on this proc */ - Relation depRel = heap_open(DependRelationId, AccessShareLock); - - ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ProcedureRelationId)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(proc_oid)); - - scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 2, key); - while (HeapTupleIsValid((tup = systable_getnext(scan)))) { - Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup); - - if (depform->classid == RewriteRelationId && depform->deptype == DEPENDENCY_NORMAL) { - oid_list = lappend_oid(oid_list, depform->objid); - } - } - systable_endscan(scan); - heap_close(depRel, AccessShareLock); - - /* rebuild view by rewrite oid */ - ListCell *cell = NULL; - foreach(cell, oid_list) { - Oid objid = lfirst_oid(cell); - Oid view_oid = get_rewrite_relid(objid, true); - if (!OidIsValid(view_oid)) { - continue; - } - tup = SearchSysCache1(RELOID, ObjectIdGetDatum(view_oid)); - if (!HeapTupleIsValid(tup)) { - continue; - } - Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tup); - if (reltup->relkind != RELKIND_VIEW) { - ReleaseSysCache(tup); - continue; - } - - /* get rebuild view sql */ - char *view_def = GetCreateViewCommand(NameStr(reltup->relname), tup, reltup, objid, view_oid); - ReleaseSysCache(tup); - - CommandCounterIncrement(); - List* raw_parsetree_list = raw_parser(view_def); - Node* stmt = (Node*)linitial(raw_parsetree_list); - Assert(IsA(stmt, ViewStmt)); - DefineView((ViewStmt*)stmt, view_def); - pfree(view_def); - list_free(raw_parsetree_list); - } - list_free_ext(oid_list); -} - -static void check_unsupported_charset_for_column(Oid collation, const char* col_name) -{ - if (!OidIsValid(collation)) { - return; - } - - int attcharset = get_valid_charset_by_collation(collation); - if (attcharset != PG_SQL_ASCII && attcharset != GetDatabaseEncoding()) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("difference between the charset of column %s and the database encoding has not supported", - col_name))); - } -} diff --git a/contrib/whale/plugin_parser/gram.y b/contrib/whale/plugin_parser/gram.y index 0a58b0200..3d8c36c71 100644 --- a/contrib/whale/plugin_parser/gram.y +++ b/contrib/whale/plugin_parser/gram.y @@ -3032,6 +3032,7 @@ opt_boolean_or_string: * is the same, so we don't need to distinguish them here. */ | ColId_or_Sconst { $$ = $1; } + | BINARY { $$ = "binary";} ; /* Timezone values can be: @@ -7662,7 +7663,7 @@ master_key_elem: // len is not filled on purpose ?? $$ = (Node*) n; } - | KEY_PATH '=' ColId + | KEY_PATH '=' ColId_or_Sconst { ClientLogicGlobalParam *n = makeNode (ClientLogicGlobalParam); n->key = ClientLogicGlobalProperty::CMK_KEY_PATH; @@ -9868,6 +9869,34 @@ CreateSeqStmt: n->sequence = $5; n->options = $6; + n->missing_ok = false; + n->ownerId = InvalidOid; +/* PGXC_BEGIN */ + n->is_serial = false; +/* PGXC_END */ + n->uuid = 0; + n->canCreateTempSeq = false; + $$ = (Node *)n; + } + | CREATE OptTemp opt_large_seq SEQUENCE IF_P NOT EXISTS qualified_name OptSeqOptList + { + CreateSeqStmt *n = makeNode(CreateSeqStmt); + $8->relpersistence = $2; + n->is_large = $3; +#ifdef ENABLE_MULTIPLE_NODES + if (n->is_large) { + const char* message = "large sequence is not supported."; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(ERROR, + (errmodule(MOD_PARSER), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("large sequence is not supported."))); + } +#endif + + n->sequence = $8; + n->options = $9; + n->missing_ok = true; n->ownerId = InvalidOid; /* PGXC_BEGIN */ n->is_serial = false; @@ -17674,23 +17703,25 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = true; $$ = (Node *)n; } - | ALTER TABLE relation_expr RENAME TO name + | ALTER TABLE relation_expr RENAME TO qualified_name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_TABLE; n->relation = $3; n->subname = NULL; - n->newname = $6; + n->newname = $6->relname; + n->newschema = $6->schemaname; n->missing_ok = false; $$ = (Node *)n; } - | ALTER TABLE IF_P EXISTS relation_expr RENAME TO name + | ALTER TABLE IF_P EXISTS relation_expr RENAME TO qualified_name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_TABLE; n->relation = $5; n->subname = NULL; - n->newname = $8; + n->newname = $8->relname; + n->newschema = $8->schemaname; n->missing_ok = true; $$ = (Node *)n; } @@ -18861,6 +18892,16 @@ AlterSubscriptionStmt: n->options = list_make1(makeDefElem("enabled", (Node *)makeInteger(TRUE))); $$ = (Node *)n; + } + | ALTER SUBSCRIPTION name DISABLE_P + { + AlterSubscriptionStmt *n = + makeNode(AlterSubscriptionStmt); + n->refresh = false; + n->subname = $3; + n->options = list_make1(makeDefElem("enabled", + (Node *)makeInteger(FALSE))); + $$ = (Node *)n; } ; /***************************************************************************** @@ -19671,7 +19712,7 @@ load_when_option_item: ; /* - three string formats used to be compatible with orafce + three string formats used to be compatible with oracle 1. string 2. 'string' 3. "string" @@ -25562,6 +25603,7 @@ opt_evtime_unit: $$ = list_make1(makeIntConst(INTERVAL_MASK(YEAR) | INTERVAL_MASK(MONTH), @1)); } + ; opt_interval: YEAR_P @@ -29736,9 +29778,9 @@ makeStringConst(char *str, int location) A_Const *n = makeNode(A_Const); - if (NULL == str || (0 == strlen(str) && !ACCEPT_EMPTY_STR)) + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { - if (NULL == str || 0 == strlen(str)) + if (NULL == str || (0 == strlen(str) && !ACCEPT_EMPTY_STR)) { n->val.type = T_Null; n->val.val.str = str; diff --git a/contrib/whale/plugin_utils/adt/numeric.cpp b/contrib/whale/plugin_utils/adt/numeric.cpp index 6412ff70f..159e955d8 100644 --- a/contrib/whale/plugin_utils/adt/numeric.cpp +++ b/contrib/whale/plugin_utils/adt/numeric.cpp @@ -216,7 +216,6 @@ static char* get_str_from_var_sci(NumericVar* var, int rscale); static void apply_typmod(NumericVar* var, int32 typmod); static int32 numericvar_to_int32(const NumericVar* var, bool can_ignore = false); -static double numeric_to_double_no_overflow(Numeric num); static double numericvar_to_double_no_overflow(NumericVar* var); static Datum numeric_abbrev_convert(Datum original_datum, SortSupport ssup); @@ -5397,7 +5396,7 @@ void int64_to_numericvar(int64 val, NumericVar* var) /* * Convert numeric to float8; if out of range, return +/- HUGE_VAL */ -static double numeric_to_double_no_overflow(Numeric num) +double numeric_to_double_no_overflow(Numeric num) { char* tmp = NULL; double val; @@ -7952,6 +7951,45 @@ Datum numeric_interval(PG_FUNCTION_ARGS) CHECK_RETNULL_RETURN_DATUM(result); } +/* Convert numeric to interval by typmod */ +Datum numeric_to_interval(PG_FUNCTION_ARGS) +{ + Datum num = PG_GETARG_DATUM(0); + int32 typmod = PG_GETARG_INT32(1); + Oid collation = PG_GET_COLLATION(); + Datum result; + StringInfoData str; + errno_t errorno = 0; + int str_len; + char* buf = NULL; + char* cp = NULL; + + CHECK_RETNULL_INIT(); + + initStringInfo(&str); + appendStringInfoString(&str, DatumGetCString(CHECK_RETNULL_CALL1(numeric_out_with_zero, collation, num))); + cp = str.data; + + if (*cp == '.' || (*cp == '-' && *(cp + 1) == '.')) { + str_len = str.len + 2; + buf = (char*)palloc0(str_len); + if (*cp == '.') { + errorno = snprintf_s(buf, str_len, str_len - 1, "0.%s", cp + 1); + } else { + errorno = snprintf_s(buf, str_len, str_len - 1, "-0.%s", cp + 2); + } + securec_check_ss(errorno, "\0", "\0"); + resetStringInfo(&str); + appendStringInfoString(&str, buf); + pfree_ext(buf); + } + + result = CHECK_RETNULL_CALL3( + interval_in, collation, CStringGetDatum(str.data), ObjectIdGetDatum(InvalidOid), Int32GetDatum(typmod)); + pfree_ext(str.data); + CHECK_RETNULL_RETURN_DATUM(result); +} + ScalarVector* vnumeric_sum(PG_FUNCTION_ARGS) { ScalarVector* pVector = (ScalarVector*)PG_GETARG_DATUM(0); @@ -20469,4 +20507,4 @@ void numeric_aggfn_info_change(Oid aggfn_oid, Oid *transfn_oid, Oid *transtype, { numeric_transfn_info_change(aggfn_oid, transfn_oid, transtype); numeric_finalfn_info_change(aggfn_oid, finalfn_oid); -} \ No newline at end of file +} diff --git a/contrib/whale/plugin_utils/adt/selfuncs.cpp b/contrib/whale/plugin_utils/adt/selfuncs.cpp index 773041c11..a83b5e3fd 100644 --- a/contrib/whale/plugin_utils/adt/selfuncs.cpp +++ b/contrib/whale/plugin_utils/adt/selfuncs.cpp @@ -151,7 +151,7 @@ #include "utils/selfuncs.h" #include "utils/spccache.h" #include "utils/syscache.h" -#include "plugin_utils/timestamp.h" +#include "utils/timestamp.h" #include "utils/snapmgr.h" #include "utils/typcache.h" #include "utils/memutils.h" @@ -183,7 +183,6 @@ static void convert_bytea_to_scalar( static double convert_one_string_to_scalar(const char* value, int rangelo, int rangehi); static double convert_one_bytea_to_scalar(unsigned char* value, int valuelen, int rangelo, int rangehi); static char* convert_string_datum(Datum value, Oid typid); -static double convert_timevalue_to_scalar(Datum value, Oid typid); static void examine_simple_variable(PlannerInfo* root, Var* var, VariableStatData* vardata); static bool get_variable_range(PlannerInfo* root, VariableStatData* vardata, Oid sortop, Datum* min, Datum* max); static bool get_actual_variable_range(PlannerInfo* root, VariableStatData* vardata, Oid sortop, Datum* min, Datum* max); @@ -4375,7 +4374,7 @@ static double convert_one_bytea_to_scalar(unsigned char* value, int valuelen, in /* * Do convert_to_scalar()'s work for any timevalue data type. */ -static double convert_timevalue_to_scalar(Datum value, Oid typid) +double convert_timevalue_to_scalar(Datum value, Oid typid) { switch (typid) { case TIMESTAMPOID: diff --git a/contrib/whale/tablecmds.cpp b/contrib/whale/tablecmds.cpp index f564ec61c..736434ff2 100644 --- a/contrib/whale/tablecmds.cpp +++ b/contrib/whale/tablecmds.cpp @@ -37,6 +37,7 @@ #include "access/multixact.h" #include "catalog/catalog.h" #include "catalog/dependency.h" +#include "catalog/gs_matview.h" #include "catalog/heap.h" #include "catalog/index.h" #include "catalog/indexing.h" @@ -161,7 +162,7 @@ #include "access/heapam.h" #include "utils/typcache.h" #include "utils/numeric.h" -#include "plugin_utils/timestamp.h" +#include "utils/timestamp.h" #include "catalog/pg_database.h" #include "catalog/pg_authid.h" #include "catalog/pg_auth_members.h" @@ -780,6 +781,8 @@ static int128 EvaluateAutoIncrement(Relation rel, TupleDesc desc, AttrNumber att static void SetRelAutoIncrement(Relation rel, TupleDesc desc, int128 autoinc); static Node* RecookAutoincAttrDefault(Relation rel, int attrno, Oid targettype, int targettypmod); static void check_unsupported_charset_for_column(Oid collation, const char* col_name); +static void AlterTableNamespaceDependentProcess(Relation classRel ,Relation rel, Oid oldNspOid, + Oid nspOid, ObjectAddresses* objsMoved, char* newrelname); inline static bool CStoreSupportATCmd(AlterTableType cmdtype) { @@ -3664,6 +3667,13 @@ void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExec } delrel = try_relation_open(relOid, NoLock); + /*Not allow to drop mlog*/ + if (relkind == RELKIND_RELATION && delrel != NULL && ISMLOG(delrel->rd_rel->relname.data)) { + /*If we can find a base table, it is mlog.*/ + if (get_matview_mlog_baserelid(relOid)!= InvalidOid) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Use 'Drop table' to drop mlog table %s is not allowed.",delrel->rd_rel->relname.data))); + } /* * Open up drop table command for table being redistributed right now. * @@ -6577,7 +6587,7 @@ ObjectAddress RenameRelation(RenameStmt* stmt) #endif /* Do the work */ - RenameRelationInternal(relid, stmt->newname); + RenameRelationInternal(relid, stmt->newname, stmt->newschema); /* * Record the changecsn of the table that defines the index */ @@ -6602,13 +6612,22 @@ ObjectAddress RenameRelation(RenameStmt* stmt) * the sequence name should probably be removed from the * sequence, AFAIK there's no need for it to be there. */ -void RenameRelationInternal(Oid myrelid, const char* newrelname) +void RenameRelationInternal(Oid myrelid, const char* newrelname, char* newschema) { Relation targetrelation; Relation relrelation; /* for RELATION relation */ HeapTuple reltup; Form_pg_class relform; Oid namespaceId; + Oid oldNspOid = InvalidOid; + bool needChangeNsp = false; + ObjectAddresses* objsMoved = NULL; + ObjectAddress thisobj; + bool is_present = false; + + thisobj.classId = RelationRelationId; + thisobj.objectId = myrelid; + thisobj.objectSubId = 0; /* * Grab an exclusive lock on the target table, index, sequence or view, @@ -6616,6 +6635,26 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) */ targetrelation = relation_open(myrelid, AccessExclusiveLock); + if (newschema != NULL) { + if (targetrelation->rd_mlogoid != InvalidOid) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Un-support feature"), + errdetail("table owning matview doesn't support this ALTER yet.")))); + } + + if (targetrelation->rd_rel->relkind == RELKIND_MATVIEW) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER MATERIALIZED VIEW is not yet supported."))); + } + + /* Permission check */ + if (!pg_class_ownercheck(RelationGetRelid(targetrelation), GetUserId())) { + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, RelationGetRelationName(targetrelation)); + } + } + if (RelationIsSubPartitioned(targetrelation)) { ereport( ERROR, @@ -6647,6 +6686,24 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) relform = (Form_pg_class)GETSTRUCT(reltup); + + oldNspOid = namespaceId; + if (newschema != NULL) { + /* Get and lock schema OID and check its permissions. */ + RangeVar* newrv = makeRangeVar(newschema, (char*)newrelname, -1); + Oid newNspOid = RangeVarGetAndCheckCreationNamespace(newrv, NoLock, NULL, '\0'); + + needChangeNsp = (newNspOid != namespaceId); + if (needChangeNsp) { + /* common checks on switching namespaces */ + CheckSetNamespace(namespaceId, newNspOid, RelationRelationId, myrelid); + ledger_check_switch_schema(namespaceId, newNspOid); + objsMoved = new_object_addresses(); + namespaceId = newNspOid; + is_present = object_address_present(&thisobj, objsMoved); + } + } + /* * Check relation name to ensure that it doesn't conflict with existing synonym. */ @@ -6656,8 +6713,17 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) get_namespace_name(namespaceId)))); } - if (get_relname_relid(newrelname, namespaceId) != InvalidOid) - ereport(ERROR, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists", newrelname))); + if (get_relname_relid(newrelname, namespaceId) != InvalidOid) { + if (newschema != NULL) { + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_TABLE), + errmsg("relation \"%s\" already exists in schema \"%s\"", + newrelname, + newschema))); + } else { + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists", newrelname))); + } + } #ifdef ENABLE_MULTIPLE_NODES if (RelationIsTsStore(targetrelation)) { @@ -6684,6 +6750,9 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) */ (void)namestrcpy(&(relform->relname), newrelname); + /* Update pg_class tuple with new nsp. */ + relform->relnamespace = namespaceId; + simple_heap_update(relrelation, &reltup->t_self, reltup); /* keep the system catalog indexes current */ @@ -6700,14 +6769,33 @@ void RenameRelationInternal(Oid myrelid, const char* newrelname) renamePartitionedTable(myrelid, newrelname); } + if (needChangeNsp && !is_present) { + if (changeDependencyFor(RelationRelationId, myrelid, NamespaceRelationId, oldNspOid, namespaceId) != 1) { + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("failed to change schema dependency for relation \"%s\"", NameStr(relform->relname)))); + } + + add_exact_object_address(&thisobj, objsMoved); + } + tableam_tops_free_tuple(reltup); heap_close(relrelation, RowExclusiveLock); - /* - * Also rename the associated type, if any. - */ - if (OidIsValid(targetrelation->rd_rel->reltype)) - RenameTypeInternal(targetrelation->rd_rel->reltype, newrelname, namespaceId); + if (needChangeNsp && !is_present) { + AlterTableNamespaceDependentProcess(relrelation, targetrelation, oldNspOid, namespaceId, objsMoved, + (char*)newrelname); + if (targetrelation->rd_isblockchain) { + rename_hist_by_newnsp(myrelid, newschema); + } + free_object_addresses(objsMoved); + } else { + /* + * Also rename the associated type, if any. + */ + if (OidIsValid(targetrelation->rd_rel->reltype)) + RenameTypeInternal(targetrelation->rd_rel->reltype, newrelname, oldNspOid); + } /* * Also rename the associated constraint, if any. @@ -21492,8 +21580,16 @@ void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, Object AlterRelationNamespaceInternal(classRel, RelationGetRelid(rel), oldNspOid, nspOid, true, objsMoved); + AlterTableNamespaceDependentProcess(classRel, rel, oldNspOid, nspOid, objsMoved, NULL); + + heap_close(classRel, RowExclusiveLock); +} + +static void AlterTableNamespaceDependentProcess(Relation classRel ,Relation rel, Oid oldNspOid, + Oid nspOid, ObjectAddresses* objsMoved, char* newrelname) +{ /* Fix the table's row type too */ - (void)AlterTypeNamespaceInternal(rel->rd_rel->reltype, nspOid, false, false, objsMoved); + (void)AlterTypeNamespaceInternal(rel->rd_rel->reltype, nspOid, false, false, objsMoved, newrelname); /* Change the table's set type too */ TupleDesc tupDesc = rel->rd_att; @@ -21510,8 +21606,6 @@ void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, Object AlterSeqNamespaces(classRel, rel, oldNspOid, nspOid, objsMoved, AccessExclusiveLock); AlterConstraintNamespaces(RelationGetRelid(rel), oldNspOid, nspOid, false, objsMoved); } - - heap_close(classRel, RowExclusiveLock); } /* @@ -21920,6 +22014,51 @@ void RangeVarCallbackOwnsTable(const RangeVar* relation, Oid relId, Oid oldRelId } } +/* + * This is intended as a callback for RangeVarGetRelidExtended(). It allows + * the relation to be locked only if (1) it's a materialized view and + * (2) the current user is the owner (or the superuser). + * This meets the permission-checking needs of and REFRESH MATERIALIZED VIEW; + * we expose it here so that it can be used by all. + */ +void RangeVarCallbackOwnsMatView(const RangeVar* relation, Oid relId, Oid oldRelId, bool target_is_partition, void* arg) +{ + char relkind; + + /* Nothing to do if the relation was not found. */ + if (!OidIsValid(relId)) { + return; + } + + /* + * If the relation does exist, check whether it's an index. But note that + * the relation might have been dropped between the time we did the name + * lookup and now. In that case, there's nothing to do. + */ + relkind = get_rel_relkind(relId); + if (!relkind) { + return; + } + if (relkind != RELKIND_RELATION && + relkind != RELKIND_TOASTVALUE && + relkind != RELKIND_MATVIEW) { + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is not a table or materialized view", relation->relname))); + } + + /* Check permissions */ + AclResult aclresult = pg_class_aclcheck(relId, GetUserId(), ACL_INSERT | ACL_DELETE); + if (aclresult != ACLCHECK_OK) { + aclcheck_error(aclresult, ACL_KIND_CLASS, relation->relname); + } + + bool is_owner = pg_class_ownercheck(relId, GetUserId()); + if (!is_owner) { + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, relation->relname); + } +} + /* * Callback to RangeVarGetRelidExtended(), similar to * RangeVarCallbackOwnsTable() but without checks on the type of the relation. @@ -28041,6 +28180,21 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) ATUnusableGlobalIndex(partTableRel); } } +#ifdef USE_SPQ +void spq_btbuild_update_pg_class(Relation heap, Relation index) +{ + List *options = NIL; + DefElem *opt; + opt = makeNode(DefElem); + opt->type = T_DefElem; + opt->defnamespace = NULL; + opt->defname = "spq_build"; + opt->defaction = DEFELEM_SET; + opt->arg = (Node *)makeString("finish"); + options = lappend(options, opt); + ATExecSetRelOptions(index, options, AT_SetRelOptions, ShareUpdateExclusiveLock); +} +#endif void CheckSrcListSubPartitionForSplit(Relation rel, Oid partOid, Oid subPartOid) { @@ -31270,12 +31424,6 @@ void CreateWeakPasswordDictionary(CreateWeakPasswordDictionaryStmt* stmt) } rel = heap_open(GsGlobalConfigRelationId, RowExclusiveLock); - if (!OidIsValid(rel)) { - ereport(ERROR, - (errcode(ERRCODE_SYSTEM_ERROR), - errmsg("could not open gs_global_config"))); - return; - } foreach (pwd_obj, stmt->weak_password_string_list) { Datum values[Natts_gs_global_config] = {0}; -- Gitee From 2049a2c9279c2ad90852b383f80cd12c22095a55 Mon Sep 17 00:00:00 2001 From: lukeman Date: Sat, 14 Oct 2023 09:27:35 +0800 Subject: [PATCH 020/434] =?UTF-8?q?=E8=A7=A3=E5=86=B3time=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E5=90=8Ctimestamp=E7=B1=BB=E5=9E=8B=E7=9A=84=E4=BA=8C?= =?UTF-8?q?=E5=85=83=E5=92=8Cbetween=E6=AF=94=E8=BE=83=E8=BF=94=E5=9B=9E?= =?UTF-8?q?=E7=BB=93=E6=9E=9C=E5=92=8CMysql=E4=B8=8D=E4=B8=80=E8=87=B4?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/between.out | 49 +++++ .../time_operator_test.out | 169 ++++++++++++++++++ contrib/dolphin/plugin_utils/adt/date.cpp | 149 +++++++++++++++ .../dolphin/plugin_utils/adt/timestamp.cpp | 150 ++++++++++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 50 ++++++ contrib/dolphin/sql/builtin_funcs/between.sql | 9 + .../time_operator_test.sql | 30 ++++ .../upgrade_script/dolphin--2.0--3.0.sql | 107 +++++++++++ 8 files changed, 713 insertions(+) diff --git a/contrib/dolphin/expected/builtin_funcs/between.out b/contrib/dolphin/expected/builtin_funcs/between.out index 6792123c0..3466d5d76 100644 --- a/contrib/dolphin/expected/builtin_funcs/between.out +++ b/contrib/dolphin/expected/builtin_funcs/between.out @@ -671,6 +671,55 @@ explain (costs off) select * from t_b1 where i between '1' and '2'; Index Cond: ((i >= '1'::bpchar) AND (i <= '2'::bpchar)) (3 rows) +--test time cmp of time and date +select date'2018-12-31' between time'23:56:59' and timestamp'2018-12-31 23:56:59'; + b_between_and +--------------- + f +(1 row) + +select date'2018-12-31' not between time'23:56:59' and timestamp'2018-12-31 23:56:59'; + b_not_between_and +------------------- + t +(1 row) + +select date'2018-12-31' between symmetric time'23:56:59' and timestamp'2018-12-31 23:56:59'; + b_sym_between_and +------------------- + f +(1 row) + +select date'2018-12-31' not between symmetric time'23:56:59' and timestamp'2018-12-31 23:56:59'; + b_not_sym_between_and +----------------------- + t +(1 row) + +select time'23:56:59' between date'2018-12-31' and timestamp'2018-12-31 23:56:59'; + b_between_and +--------------- + f +(1 row) + +select time'23:56:59' not between date'2018-12-31' and timestamp'2018-12-31 23:56:59'; + b_not_between_and +------------------- + t +(1 row) + +select time'23:56:59' between datetime'2018-12-31' and timestamptz'2018-12-31 23:56:59'; + b_between_and +--------------- + f +(1 row) + +select time'23:56:59' not between datetime'2018-12-31' and timestamptz'2018-12-31 23:56:59'; + b_not_between_and +------------------- + t +(1 row) + drop schema db_between cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table t_between_and_0007 diff --git a/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out index 2a4de91a9..d18e18e3a 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out @@ -1875,3 +1875,172 @@ select pg_typeof(sysdate(3) / 2); double precision (1 row) +-- CURRENT_DATE (00:00:00) == (current date) 00:00:00 +select CURRENT_DATE = time'00:00:00'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') = time'11:11:11'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') = time'11:11:11'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') = time'11:11:11'; + ?column? +---------- + t +(1 row) + +select CURRENT_DATE <> time'00:00:00'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') <> time'11:11:11'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') <> time'11:11:11'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') <> time'11:11:11'; + ?column? +---------- + f +(1 row) + +select CURRENT_DATE < time'00:00:00'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') < time'11:11:11'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') < time'11:11:11'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') < time'11:11:11'; + ?column? +---------- + f +(1 row) + +select CURRENT_DATE <= time'00:00:00'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') <= time'11:11:11'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') <= time'11:11:11'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') <= time'11:11:11'; + ?column? +---------- + t +(1 row) + +select CURRENT_DATE > time'00:00:00'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') > time'11:11:11'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') > time'11:11:11'; + ?column? +---------- + f +(1 row) + +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') > time'11:11:11'; + ?column? +---------- + f +(1 row) + +select CURRENT_DATE >= time'00:00:00'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') >= time'11:11:11'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') >= time'11:11:11'; + ?column? +---------- + t +(1 row) + +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') >= time'11:11:11'; + ?column? +---------- + t +(1 row) + +select time'11:11:11' < date'2023-01-01'; + ?column? +---------- + f +(1 row) + +select time'11:11:11' < datetime'2023-01-01 11:11:11'; + ?column? +---------- + f +(1 row) + +select time'11:11:11' < timestamp'2023-01-01 11:11:11'; + ?column? +---------- + f +(1 row) + +select time'11:11:11' < timestamptz'2023-01-01 11:11:11'; + ?column? +---------- + f +(1 row) + diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 420c8ed53..5536c9d87 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -6298,4 +6298,153 @@ Datum adddate_datetime_interval_text(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } +#ifdef DOLPHIN +PG_FUNCTION_INFO_V1_PUBLIC(time_eq_timestamp); +extern "C" DLL_PUBLIC Datum time_eq_timestamp(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_ne_timestamp); +extern "C" DLL_PUBLIC Datum time_ne_timestamp(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_lt_timestamp); +extern "C" DLL_PUBLIC Datum time_lt_timestamp(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_le_timestamp); +extern "C" DLL_PUBLIC Datum time_le_timestamp(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_gt_timestamp); +extern "C" DLL_PUBLIC Datum time_gt_timestamp(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_ge_timestamp); +extern "C" DLL_PUBLIC Datum time_ge_timestamp(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_eq_time); +extern "C" DLL_PUBLIC Datum timestamp_eq_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_ne_time); +extern "C" DLL_PUBLIC Datum timestamp_ne_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_lt_time); +extern "C" DLL_PUBLIC Datum timestamp_lt_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_le_time); +extern "C" DLL_PUBLIC Datum timestamp_le_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_gt_time); +extern "C" DLL_PUBLIC Datum timestamp_gt_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_ge_time); +extern "C" DLL_PUBLIC Datum timestamp_ge_time(PG_FUNCTION_ARGS); +/* Convert the time to timestamp type. + Fill in the missing date with the current date */ +static Timestamp time2timestamp(TimeADT timeVal) +{ + Timestamp result; + struct pg_tm tt; + struct pg_tm* tm = &tt; + fsec_t fsec = 0; + GetCurrentDateTime(tm); + time2tm(timeVal, tm, &fsec); + tm2timestamp(tm, fsec, NULL, &result); + return result; +} +/* time_timestamp */ +Datum time_eq_timestamp(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + Timestamp dt1 = time2timestamp(time1); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); +} + +Datum time_ne_timestamp(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + Timestamp dt1 = time2timestamp(time1); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); +} + +Datum time_lt_timestamp(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + Timestamp dt1 = time2timestamp(time1); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); +} + +Datum time_le_timestamp(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + Timestamp dt1 = time2timestamp(time1); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); +} + +Datum time_gt_timestamp(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + Timestamp dt1 = time2timestamp(time1); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); +} + +Datum time_ge_timestamp(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + Timestamp dt1 = time2timestamp(time1); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); +} +/* timestamp_time */ +Datum timestamp_eq_time(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + Timestamp dt2 = time2timestamp(time1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); +} + +Datum timestamp_ne_time(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + Timestamp dt2 = time2timestamp(time1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); +} + +Datum timestamp_lt_time(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + Timestamp dt2 = time2timestamp(time1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); +} + +Datum timestamp_le_time(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + Timestamp dt2 = time2timestamp(time1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); +} + +Datum timestamp_gt_time(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + Timestamp dt2 = time2timestamp(time1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); +} + +Datum timestamp_ge_time(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + Timestamp dt2 = time2timestamp(time1); + + PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); +} +#endif + #endif diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index d1eb03527..cd2ed7bd8 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -11253,4 +11253,154 @@ Datum timediff_date_text(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } +#ifdef DOLPHIN +PG_FUNCTION_INFO_V1_PUBLIC(time_eq_timestamptz); +extern "C" DLL_PUBLIC Datum time_eq_timestamptz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_ne_timestamptz); +extern "C" DLL_PUBLIC Datum time_ne_timestamptz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_lt_timestamptz); +extern "C" DLL_PUBLIC Datum time_lt_timestamptz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_le_timestamptz); +extern "C" DLL_PUBLIC Datum time_le_timestamptz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_gt_timestamptz); +extern "C" DLL_PUBLIC Datum time_gt_timestamptz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(time_ge_timestamptz); +extern "C" DLL_PUBLIC Datum time_ge_timestamptz(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_eq_time); +extern "C" DLL_PUBLIC Datum timestamptz_eq_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_ne_time); +extern "C" DLL_PUBLIC Datum timestamptz_ne_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_lt_time); +extern "C" DLL_PUBLIC Datum timestamptz_lt_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_le_time); +extern "C" DLL_PUBLIC Datum timestamptz_le_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_gt_time); +extern "C" DLL_PUBLIC Datum timestamptz_gt_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_ge_time); +extern "C" DLL_PUBLIC Datum timestamptz_ge_time(PG_FUNCTION_ARGS); + +static Timestamp time2timestamptz(TimeADT timeVal) +{ + TimestampTz result; + struct pg_tm tt; + struct pg_tm* tm = &tt; + fsec_t fsec = 0; + GetCurrentDateTime(tm); + time2tm(timeVal, tm, &fsec); + /* find the current session time zone offset. */ + int tz = DetermineTimeZoneOffset(tm, session_timezone); + tm2timestamp(tm, fsec, &tz, &result); + return result; +} +/* time_timestamp */ +Datum time_eq_timestamptz(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + TimestampTz dt1 = time2timestamptz(time1); + TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) == 0); +} + +Datum time_ne_timestamptz(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + TimestampTz dt1 = time2timestamptz(time1); + TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) != 0); +} + +Datum time_lt_timestamptz(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + TimestampTz dt1 = time2timestamptz(time1); + TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) < 0); +} + +Datum time_le_timestamptz(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + TimestampTz dt1 = time2timestamptz(time1); + TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) <= 0); +} + +Datum time_gt_timestamptz(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + TimestampTz dt1 = time2timestamptz(time1); + TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) > 0); +} + +Datum time_ge_timestamptz(PG_FUNCTION_ARGS) +{ + TimeADT time1 = PG_GETARG_TIMEADT(0); + TimestampTz dt1 = time2timestamptz(time1); + TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); +} +/* timestamptz_time */ +Datum timestamptz_eq_time(PG_FUNCTION_ARGS) +{ + TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + TimestampTz dt2 = time2timestamptz(time1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) == 0); +} + +Datum timestamptz_ne_time(PG_FUNCTION_ARGS) +{ + TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + TimestampTz dt2 = time2timestamptz(time1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) != 0); +} + +Datum timestamptz_lt_time(PG_FUNCTION_ARGS) +{ + TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + TimestampTz dt2 = time2timestamptz(time1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) < 0); +} + +Datum timestamptz_le_time(PG_FUNCTION_ARGS) +{ + TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + TimestampTz dt2 = time2timestamptz(time1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) <= 0); +} + +Datum timestamptz_gt_time(PG_FUNCTION_ARGS) +{ + TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + TimestampTz dt2 = time2timestamptz(time1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) > 0); +} + +Datum timestamptz_ge_time(PG_FUNCTION_ARGS) +{ + TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); + TimeADT time1 = PG_GETARG_TIMEADT(1); + TimestampTz dt2 = time2timestamptz(time1); + + PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); +} +#endif + #endif diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index c6572ace7..5fe9c2bdf 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -4,3 +4,53 @@ CREATE FUNCTION pg_catalog.dolphin_invoke() DROP FUNCTION IF EXISTS pg_catalog.date_cast(cstring, boolean); DROP FUNCTION IF EXISTS pg_catalog.timestamp_cast(cstring, oid, integer, boolean); + +DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp without time zone); +DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp without time zone); +DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp without time zone); +DROP OPERATOR IF EXISTS pg_catalog.<(time, timestamp without time zone); +DROP OPERATOR IF EXISTS pg_catalog.>=(time, timestamp without time zone); +DROP OPERATOR IF EXISTS pg_catalog.>(time, timestamp without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamp (time, timestamp without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_ne_timestamp (time, timestamp without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_le_timestamp (time, timestamp without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_lt_timestamp (time, timestamp without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_ge_timestamp (time, timestamp without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_gt_timestamp (time, timestamp without time zone); +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp without time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp without time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp without time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp without time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp without time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.>(timestamp without time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_eq_time (timestamp without time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_ne_time (timestamp without time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_le_time (timestamp without time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_lt_time (timestamp without time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_ge_time (timestamp without time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_gt_time (timestamp without time zone, time); + +DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp with time zone); +DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp with time zone); +DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp with time zone); +DROP OPERATOR IF EXISTS pg_catalog.<(time, timestamp with time zone); +DROP OPERATOR IF EXISTS pg_catalog.>=(time, timestamp with time zone); +DROP OPERATOR IF EXISTS pg_catalog.>(time, timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamptz (time, timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_ne_timestamptz (time, timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_le_timestamptz (time, timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_lt_timestamptz (time, timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_ge_timestamptz (time, timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_gt_timestamptz (time, timestamp with time zone); +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp with time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp with time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp with time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp with time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp with time zone, time); +DROP OPERATOR IF EXISTS pg_catalog.>(timestamp with time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_eq_time (timestamp with time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_ne_time (timestamp with time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_le_time (timestamp with time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_lt_time (timestamp with time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_ge_time (timestamp with time zone, time); +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_gt_time (timestamp with time zone, time); diff --git a/contrib/dolphin/sql/builtin_funcs/between.sql b/contrib/dolphin/sql/builtin_funcs/between.sql index 2b858ce75..42769244b 100644 --- a/contrib/dolphin/sql/builtin_funcs/between.sql +++ b/contrib/dolphin/sql/builtin_funcs/between.sql @@ -156,5 +156,14 @@ explain (costs off) select * from t_b1 where f between 1 and 2; explain (costs off) select * from t_b1 where g between '1' and '2'; explain (costs off) select * from t_b1 where h between '1' and '2'; explain (costs off) select * from t_b1 where i between '1' and '2'; +--test time cmp of time and date +select date'2018-12-31' between time'23:56:59' and timestamp'2018-12-31 23:56:59'; +select date'2018-12-31' not between time'23:56:59' and timestamp'2018-12-31 23:56:59'; +select date'2018-12-31' between symmetric time'23:56:59' and timestamp'2018-12-31 23:56:59'; +select date'2018-12-31' not between symmetric time'23:56:59' and timestamp'2018-12-31 23:56:59'; +select time'23:56:59' between date'2018-12-31' and timestamp'2018-12-31 23:56:59'; +select time'23:56:59' not between date'2018-12-31' and timestamp'2018-12-31 23:56:59'; +select time'23:56:59' between datetime'2018-12-31' and timestamptz'2018-12-31 23:56:59'; +select time'23:56:59' not between datetime'2018-12-31' and timestamptz'2018-12-31 23:56:59'; drop schema db_between cascade; reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql b/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql index 2affe4167..bad4e97ae 100644 --- a/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql +++ b/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql @@ -750,3 +750,33 @@ select sysdate(3) * 2; select pg_typeof(sysdate(3) * 2); select sysdate(3) / 2; select pg_typeof(sysdate(3) / 2); + +-- CURRENT_DATE (00:00:00) == (current date) 00:00:00 +select CURRENT_DATE = time'00:00:00'; +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') = time'11:11:11'; +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') = time'11:11:11'; +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') = time'11:11:11'; +select CURRENT_DATE <> time'00:00:00'; +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') <> time'11:11:11'; +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') <> time'11:11:11'; +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') <> time'11:11:11'; +select CURRENT_DATE < time'00:00:00'; +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') < time'11:11:11'; +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') < time'11:11:11'; +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') < time'11:11:11'; +select CURRENT_DATE <= time'00:00:00'; +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') <= time'11:11:11'; +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') <= time'11:11:11'; +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') <= time'11:11:11'; +select CURRENT_DATE > time'00:00:00'; +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') > time'11:11:11'; +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') > time'11:11:11'; +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') > time'11:11:11'; +select CURRENT_DATE >= time'00:00:00'; +select (CURRENT_DATE::date + interval'11 hour 11 min 11 sec') >= time'11:11:11'; +select (CURRENT_DATE::datetime + interval'11 hour 11 min 11 sec') >= time'11:11:11'; +select (CURRENT_DATE::timestamp + interval'11 hour 11 min 11 sec') >= time'11:11:11'; +select time'11:11:11' < date'2023-01-01'; +select time'11:11:11' < datetime'2023-01-01 11:11:11'; +select time'11:11:11' < timestamp'2023-01-01 11:11:11'; +select time'11:11:11' < timestamptz'2023-01-01 11:11:11'; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index d13546fbb..03b78c550 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -7,3 +7,110 @@ CREATE OR REPLACE FUNCTION pg_catalog.date_cast(cstring, boolean) RETURNS date L DROP FUNCTION IF EXISTS pg_catalog.timestamp_cast(cstring, oid, integer, boolean); CREATE OR REPLACE FUNCTION pg_catalog.timestamp_cast(cstring, oid, integer, boolean) RETURNS timestamp without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'timestamp_cast'; + +--CREATE TIME_TIMESTAMP'S COMPARATION FUNCTION +DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamp (time, timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_eq_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_eq_timestamp'; +DROP FUNCTION IF EXISTS pg_catalog.time_ne_timestamp (time, timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_ne_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_ne_timestamp'; +DROP FUNCTION IF EXISTS pg_catalog.time_le_timestamp (time, timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_le_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_le_timestamp'; +DROP FUNCTION IF EXISTS pg_catalog.time_lt_timestamp (time, timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_lt_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_lt_timestamp'; +DROP FUNCTION IF EXISTS pg_catalog.time_ge_timestamp (time, timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_ge_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_ge_timestamp'; +DROP FUNCTION IF EXISTS pg_catalog.time_gt_timestamp (time, timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_gt_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_gt_timestamp'; +--CREATE TIME_TIMESTAMP CMP OPERATOR +DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp without time zone); +CREATE OPERATOR pg_catalog.=(leftarg = time, rightarg = timestamp without time zone, procedure = time_eq_timestamp, restrict = eqsel, join = eqjoinsel, MERGES); +DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp without time zone); +CREATE OPERATOR pg_catalog.<>(leftarg = time, rightarg = timestamp without time zone, procedure = time_ne_timestamp, restrict = neqsel, join = neqjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp without time zone); +CREATE OPERATOR pg_catalog.<=(leftarg = time, rightarg = timestamp without time zone, procedure = time_le_timestamp, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<(time, timestamp without time zone); +CREATE OPERATOR pg_catalog.<(leftarg = time, rightarg = timestamp without time zone, procedure = time_lt_timestamp, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>=(time, timestamp without time zone); +CREATE OPERATOR pg_catalog.>=(leftarg = time, rightarg = timestamp without time zone, procedure = time_ge_timestamp, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>(time, timestamp without time zone); +CREATE OPERATOR pg_catalog.>(leftarg = time, rightarg = timestamp without time zone, procedure = time_gt_timestamp, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); +--CREATE TIMESTAMP_TIME'S COMPARATION FUNCTION +DROP FUNCTION IF EXISTS pg_catalog.timestamp_eq_time (timestamp without time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_eq_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_eq_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamp_ne_time (timestamp without time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_ne_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_ne_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamp_le_time (timestamp without time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_le_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_le_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamp_lt_time (timestamp without time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_lt_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_lt_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamp_ge_time (timestamp without time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_ge_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_ge_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamp_gt_time (timestamp without time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_gt_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_gt_time'; +--CREATE TIMESTAMP_TIME CMP OPERATOR +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp without time zone, time); +CREATE OPERATOR pg_catalog.=(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_eq_time, restrict = eqsel, join = eqjoinsel, MERGES); +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp without time zone, time); +CREATE OPERATOR pg_catalog.<>(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_ne_time, restrict = neqsel, join = neqjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp without time zone, time); +CREATE OPERATOR pg_catalog.<=(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_le_time, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp without time zone, time); +CREATE OPERATOR pg_catalog.<(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_lt_time, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp without time zone, time); +CREATE OPERATOR pg_catalog.>=(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_ge_time, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>(timestamp without time zone, time); +CREATE OPERATOR pg_catalog.>(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_gt_time, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); + +--CREATE TIME_TIMESTAMPTZ'S COMPARATION FUNCTION +DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamptz (time, timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_eq_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_eq_timestamptz'; +DROP FUNCTION IF EXISTS pg_catalog.time_ne_timestamptz (time, timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_ne_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_ne_timestamptz'; +DROP FUNCTION IF EXISTS pg_catalog.time_le_timestamptz (time, timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_le_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_le_timestamptz'; +DROP FUNCTION IF EXISTS pg_catalog.time_lt_timestamptz (time, timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_lt_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_lt_timestamptz'; +DROP FUNCTION IF EXISTS pg_catalog.time_ge_timestamptz (time, timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_ge_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_ge_timestamptz'; +DROP FUNCTION IF EXISTS pg_catalog.time_gt_timestamptz (time, timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.time_gt_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_gt_timestamptz'; +--CREATE TIME_TIMESTAMPTZ CMP OPERATOR +DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp with time zone); +CREATE OPERATOR pg_catalog.=(leftarg = time, rightarg = timestamp with time zone, procedure = time_eq_timestamptz, restrict = eqsel, join = eqjoinsel, MERGES); +DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp with time zone); +CREATE OPERATOR pg_catalog.<>(leftarg = time, rightarg = timestamp with time zone, procedure = time_ne_timestamptz, restrict = neqsel, join = neqjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp with time zone); +CREATE OPERATOR pg_catalog.<=(leftarg = time, rightarg = timestamp with time zone, procedure = time_le_timestamptz, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<(time, timestamp with time zone); +CREATE OPERATOR pg_catalog.<(leftarg = time, rightarg = timestamp with time zone, procedure = time_lt_timestamptz, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>=(time, timestamp with time zone); +CREATE OPERATOR pg_catalog.>=(leftarg = time, rightarg = timestamp with time zone, procedure = time_ge_timestamptz, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>(time, timestamp with time zone); +CREATE OPERATOR pg_catalog.>(leftarg = time, rightarg = timestamp with time zone, procedure = time_gt_timestamptz, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); +--CREATE TIMESTAMPTZ_TIME'S COMPARATION FUNCTION +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_eq_time (timestamp with time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_eq_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_eq_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_ne_time (timestamp with time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_ne_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_ne_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_le_time (timestamp with time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_le_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_le_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_lt_time (timestamp with time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_lt_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_lt_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_ge_time (timestamp with time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_ge_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_ge_time'; +DROP FUNCTION IF EXISTS pg_catalog.timestamptz_gt_time (timestamp with time zone, time) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_gt_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_gt_time'; +--CREATE TIMESTAMPTZ_TIME CMP OPERATOR +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp with time zone, time); +CREATE OPERATOR pg_catalog.=(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_eq_time, restrict = eqsel, join = eqjoinsel, MERGES); +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp with time zone, time); +CREATE OPERATOR pg_catalog.<>(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_ne_time, restrict = neqsel, join = neqjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp with time zone, time); +CREATE OPERATOR pg_catalog.<=(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_le_time, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp with time zone, time); +CREATE OPERATOR pg_catalog.<(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_lt_time, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp with time zone, time); +CREATE OPERATOR pg_catalog.>=(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_ge_time, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); +DROP OPERATOR IF EXISTS pg_catalog.>(timestamp with time zone, time); +CREATE OPERATOR pg_catalog.>(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_gt_time, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); + -- Gitee From 3db6fcea5fbdb150beb4d2f0d08c56a27bc986af Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 18 Oct 2023 19:00:21 +0800 Subject: [PATCH 021/434] Fix sql script bug. --- contrib/dolphin/expected/test_shows_1.out | 3 +- .../upgrade_script/dolphin--2.0--3.0.sql | 48 +++++++++---------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/contrib/dolphin/expected/test_shows_1.out b/contrib/dolphin/expected/test_shows_1.out index 911453b7d..76e509eaa 100644 --- a/contrib/dolphin/expected/test_shows_1.out +++ b/contrib/dolphin/expected/test_shows_1.out @@ -91,7 +91,8 @@ show plugins; --?.* --?.* --?.* -(13 rows) +--?.* +(14 rows) -- show tables test show tables; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 03b78c550..b22cb3d3d 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -22,17 +22,17 @@ CREATE OR REPLACE FUNCTION pg_catalog.time_ge_timestamp (time, timestamp without DROP FUNCTION IF EXISTS pg_catalog.time_gt_timestamp (time, timestamp without time zone) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.time_gt_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_gt_timestamp'; --CREATE TIME_TIMESTAMP CMP OPERATOR -DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp without time zone); + CREATE OPERATOR pg_catalog.=(leftarg = time, rightarg = timestamp without time zone, procedure = time_eq_timestamp, restrict = eqsel, join = eqjoinsel, MERGES); -DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp without time zone); + CREATE OPERATOR pg_catalog.<>(leftarg = time, rightarg = timestamp without time zone, procedure = time_ne_timestamp, restrict = neqsel, join = neqjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp without time zone); + CREATE OPERATOR pg_catalog.<=(leftarg = time, rightarg = timestamp without time zone, procedure = time_le_timestamp, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<(time, timestamp without time zone); + CREATE OPERATOR pg_catalog.<(leftarg = time, rightarg = timestamp without time zone, procedure = time_lt_timestamp, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>=(time, timestamp without time zone); + CREATE OPERATOR pg_catalog.>=(leftarg = time, rightarg = timestamp without time zone, procedure = time_ge_timestamp, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>(time, timestamp without time zone); + CREATE OPERATOR pg_catalog.>(leftarg = time, rightarg = timestamp without time zone, procedure = time_gt_timestamp, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); --CREATE TIMESTAMP_TIME'S COMPARATION FUNCTION DROP FUNCTION IF EXISTS pg_catalog.timestamp_eq_time (timestamp without time zone, time) CASCADE; @@ -48,17 +48,17 @@ CREATE OR REPLACE FUNCTION pg_catalog.timestamp_ge_time (timestamp without time DROP FUNCTION IF EXISTS pg_catalog.timestamp_gt_time (timestamp without time zone, time) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.timestamp_gt_time (timestamp without time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamp_gt_time'; --CREATE TIMESTAMP_TIME CMP OPERATOR -DROP OPERATOR IF EXISTS pg_catalog.=(timestamp without time zone, time); + CREATE OPERATOR pg_catalog.=(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_eq_time, restrict = eqsel, join = eqjoinsel, MERGES); -DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp without time zone, time); + CREATE OPERATOR pg_catalog.<>(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_ne_time, restrict = neqsel, join = neqjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp without time zone, time); + CREATE OPERATOR pg_catalog.<=(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_le_time, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<(timestamp without time zone, time); + CREATE OPERATOR pg_catalog.<(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_lt_time, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp without time zone, time); + CREATE OPERATOR pg_catalog.>=(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_ge_time, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>(timestamp without time zone, time); + CREATE OPERATOR pg_catalog.>(leftarg = timestamp without time zone, rightarg = time, procedure = timestamp_gt_time, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); --CREATE TIME_TIMESTAMPTZ'S COMPARATION FUNCTION @@ -75,17 +75,17 @@ CREATE OR REPLACE FUNCTION pg_catalog.time_ge_timestamptz (time, timestamp with DROP FUNCTION IF EXISTS pg_catalog.time_gt_timestamptz (time, timestamp with time zone) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.time_gt_timestamptz (time, timestamp with time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_gt_timestamptz'; --CREATE TIME_TIMESTAMPTZ CMP OPERATOR -DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp with time zone); + CREATE OPERATOR pg_catalog.=(leftarg = time, rightarg = timestamp with time zone, procedure = time_eq_timestamptz, restrict = eqsel, join = eqjoinsel, MERGES); -DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp with time zone); + CREATE OPERATOR pg_catalog.<>(leftarg = time, rightarg = timestamp with time zone, procedure = time_ne_timestamptz, restrict = neqsel, join = neqjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp with time zone); + CREATE OPERATOR pg_catalog.<=(leftarg = time, rightarg = timestamp with time zone, procedure = time_le_timestamptz, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<(time, timestamp with time zone); + CREATE OPERATOR pg_catalog.<(leftarg = time, rightarg = timestamp with time zone, procedure = time_lt_timestamptz, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>=(time, timestamp with time zone); + CREATE OPERATOR pg_catalog.>=(leftarg = time, rightarg = timestamp with time zone, procedure = time_ge_timestamptz, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>(time, timestamp with time zone); + CREATE OPERATOR pg_catalog.>(leftarg = time, rightarg = timestamp with time zone, procedure = time_gt_timestamptz, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); --CREATE TIMESTAMPTZ_TIME'S COMPARATION FUNCTION DROP FUNCTION IF EXISTS pg_catalog.timestamptz_eq_time (timestamp with time zone, time) CASCADE; @@ -101,16 +101,16 @@ CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_ge_time (timestamp with time z DROP FUNCTION IF EXISTS pg_catalog.timestamptz_gt_time (timestamp with time zone, time) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_gt_time (timestamp with time zone, time) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'timestamptz_gt_time'; --CREATE TIMESTAMPTZ_TIME CMP OPERATOR -DROP OPERATOR IF EXISTS pg_catalog.=(timestamp with time zone, time); + CREATE OPERATOR pg_catalog.=(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_eq_time, restrict = eqsel, join = eqjoinsel, MERGES); -DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp with time zone, time); + CREATE OPERATOR pg_catalog.<>(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_ne_time, restrict = neqsel, join = neqjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp with time zone, time); + CREATE OPERATOR pg_catalog.<=(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_le_time, COMMUTATOR = >=, NEGATOR = >, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.<(timestamp with time zone, time); + CREATE OPERATOR pg_catalog.<(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_lt_time, COMMUTATOR = >, NEGATOR = >=, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp with time zone, time); + CREATE OPERATOR pg_catalog.>=(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_ge_time, COMMUTATOR = <=, NEGATOR = <, restrict = scalarltsel, join = scalarltjoinsel); -DROP OPERATOR IF EXISTS pg_catalog.>(timestamp with time zone, time); + CREATE OPERATOR pg_catalog.>(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_gt_time, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); -- Gitee From 7cfacb294cdfb7984aff0983b22c7cf3bc4c095e Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Wed, 18 Oct 2023 15:58:27 +0800 Subject: [PATCH 022/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9blob=E7=AD=89?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E5=AD=97=E7=AC=A6=E4=B8=B2=E8=BE=93=E5=87=BA?= =?UTF-8?q?=E4=BE=9D=E8=B5=96bytea=5Foutput=E7=9A=84=E7=89=B9=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../json_cmp_operator_test.out | 24 ++-- .../dolphin/expected/test_charset_collate.out | 12 +- contrib/dolphin/expected/test_condition.out | 108 +++++++++--------- contrib/dolphin/output/b_proto_jdbc.source | 6 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 29 ++++- .../rollback_script/dolphin--3.0--2.0.sql | 11 ++ .../upgrade_script/dolphin--2.0--3.0.sql | 12 ++ 7 files changed, 126 insertions(+), 76 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out index e3289233e..c1bf4a38a 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out @@ -589,9 +589,9 @@ select `tinyblob`, `json`, `json` < `tinyblob` as `json `tinyblob` as `json<>tinyblob`, `json` = `tinyblob` as `json=tinyblob`, `json` <=> `tinyblob` as `json<=>tinyblob` from test_json_table; - tinyblob | json | json>tinyblob | json>=tinyblob | jsontinyblob | json=tinyblob | json<=>tinyblob ---------------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- - \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + tinyblob | json | json>tinyblob | json>=tinyblob | jsontinyblob | json=tinyblob | json<=>tinyblob +----------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- + 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `blob`, `json`, @@ -609,9 +609,9 @@ select `blob`, `json`, `json` < `blob` as `json `blob` as `json<>blob`, `json` = `blob` as `json=blob`, `json` <=> `blob` as `json<=>blob` from test_json_table; - blob | json | json>blob | json>=blob | jsonblob | json=blob | json<=>blob ---------------+------------------+-----------+------------+-----------+------------+------------+------------+-----------+------------- - \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + blob | json | json>blob | json>=blob | jsonblob | json=blob | json<=>blob +-------+------------------+-----------+------------+-----------+------------+------------+------------+-----------+------------- + 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `mediumblob`, `json`, @@ -629,9 +629,9 @@ select `mediumblob`, `json`, `json` < `mediumblob` as `json `mediumblob` as `json<>mediumblob`, `json` = `mediumblob` as `json=mediumblob`, `json` <=> `mediumblob` as `json<=>mediumblob` from test_json_table; - mediumblob | json | json>mediumblob | json>=mediumblob | jsonmediumblob | json=mediumblob | json<=>mediumblob ---------------+------------------+-----------------+------------------+-----------------+------------------+------------------+------------------+-----------------+------------------- - \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + mediumblob | json | json>mediumblob | json>=mediumblob | jsonmediumblob | json=mediumblob | json<=>mediumblob +------------+------------------+-----------------+------------------+-----------------+------------------+------------------+------------------+-----------------+------------------- + 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `longblob`, `json`, @@ -649,9 +649,9 @@ select `longblob`, `json`, `json` < `longblob` as `json `longblob` as `json<>longblob`, `json` = `longblob` as `json=longblob`, `json` <=> `longblob` as `json<=>longblob` from test_json_table; - longblob | json | json>longblob | json>=longblob | jsonlongblob | json=longblob | json<=>longblob ---------------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- - \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + longblob | json | json>longblob | json>=longblob | jsonlongblob | json=longblob | json<=>longblob +----------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- + 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `text`, `json`, diff --git a/contrib/dolphin/expected/test_charset_collate.out b/contrib/dolphin/expected/test_charset_collate.out index 25da489a9..d62e8770e 100644 --- a/contrib/dolphin/expected/test_charset_collate.out +++ b/contrib/dolphin/expected/test_charset_collate.out @@ -153,8 +153,8 @@ create table test_binary1( ); insert into test_binary1 values('ppp'),('PpP'),('PPP'),('AAA'),('Aaa'),('aaa'),('Å '),('S'); select distinct f1 from test_binary1 order by f1; - f1 ----------- + f1 +----- AAA Aaa PPP @@ -162,7 +162,7 @@ select distinct f1 from test_binary1 order by f1; S aaa ppp - \305\240 + Å  (8 rows) select f1 from test_binary1 where f1 = 'ppp'::blob collate 'binary'; @@ -178,8 +178,8 @@ LINE 1: ...elect f1 from test_binary1 where f1 = 'ppp'::blob collate 'u... alter table test_binary1 add column f2 tinyblob collate 'binary'; insert into test_binary1 (f2) values('ppp'),('PpP'),('PPP'),('AAA'),('Aaa'),('aaa'),('Å '),('S'); select distinct f2 from test_binary1 order by f2; - f2 ----------- + f2 +----- AAA Aaa PPP @@ -187,7 +187,7 @@ select distinct f2 from test_binary1 order by f2; S aaa ppp - \305\240 + Å  (9 rows) diff --git a/contrib/dolphin/expected/test_condition.out b/contrib/dolphin/expected/test_condition.out index 01f01466f..f54c4cd18 100644 --- a/contrib/dolphin/expected/test_condition.out +++ b/contrib/dolphin/expected/test_condition.out @@ -5753,9 +5753,9 @@ select ifnull(tyint, vch) from typeset; (1 row) select ifnull(tyint, blb) from typeset; - ifnull ----------- - \x313237 + ifnull +-------- + 127 (1 row) select ifnull(tyint, txt) from typeset; @@ -5831,9 +5831,9 @@ select ifnull(smint, vch) from typeset; (1 row) select ifnull(smint, blb) from typeset; - ifnull ----------- - \x313237 + ifnull +-------- + 127 (1 row) select ifnull(smint, txt) from typeset; @@ -5903,9 +5903,9 @@ select ifnull(anint, vch) from typeset; (1 row) select ifnull(anint, blb) from typeset; - ifnull ----------- - \x313237 + ifnull +-------- + 127 (1 row) select ifnull(anint, txt) from typeset; @@ -5969,9 +5969,9 @@ select ifnull(bgint, vch) from typeset; (1 row) select ifnull(bgint, blb) from typeset; - ifnull ----------- - \x313237 + ifnull +-------- + 127 (1 row) select ifnull(bgint, txt) from typeset; @@ -6029,9 +6029,9 @@ select ifnull(dcmal, vch) from typeset; (1 row) select ifnull(dcmal, blb) from typeset; - ifnull ----------- - \x313237 + ifnull +-------- + 127 (1 row) select ifnull(dcmal, txt) from typeset; @@ -6083,9 +6083,9 @@ select ifnull(nmric, vch) from typeset; (1 row) select ifnull(nmric, blb) from typeset; - ifnull ----------- - \x313237 + ifnull +-------- + 127 (1 row) select ifnull(nmric, txt) from typeset; @@ -6131,9 +6131,9 @@ select ifnull(flt, vch) from typeset; (1 row) select ifnull(flt, blb) from typeset; - ifnull ------------------- - \x3132372e323133 + ifnull +--------- + 127.213 (1 row) select ifnull(flt, txt) from typeset; @@ -6145,43 +6145,43 @@ select ifnull(flt, txt) from typeset; select ifnull(bt, dt) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(bt, tmstp) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(bt, tm) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(bt, ch) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(bt, vch) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(bt, blb) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(bt, txt) from typeset; ifnull -------- - \x7f + \x7F (1 row) select ifnull(dt, tmstp) from typeset; @@ -6209,9 +6209,9 @@ select ifnull(dt, vch) from typeset; (1 row) select ifnull(dt, blb) from typeset; - ifnull ------------------------- - \x323030312d30342d3139 + ifnull +------------ + 2001-04-19 (1 row) select ifnull(dt, txt) from typeset; @@ -6239,9 +6239,9 @@ select ifnull(tmstp, vch) from typeset; (1 row) select ifnull(tmstp, blb) from typeset; - ifnull ------------------------------------------------- - \x323030312d30342d31392030303a30303a30302d3037 + ifnull +------------------------ + 2001-04-19 00:00:00-07 (1 row) select ifnull(tmstp, txt) from typeset; @@ -6263,9 +6263,9 @@ select ifnull(tm, vch) from typeset; (1 row) select ifnull(tm, blb) from typeset; - ifnull --------------------- - \x32323a32333a3434 + ifnull +---------- + 22:23:44 (1 row) select ifnull(tm, txt) from typeset; @@ -6281,9 +6281,9 @@ select ifnull(ch, vch) from typeset; (1 row) select ifnull(ch, blb) from typeset; - ifnull ----------------------------------------------------------------- - \x323030312d30342d31392032323a32333a34342020202020202020202020 + ifnull +-------------------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(ch, txt) from typeset; @@ -6293,9 +6293,9 @@ select ifnull(ch, txt) from typeset; (1 row) select ifnull(vch, blb) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vch, txt) from typeset; @@ -6305,9 +6305,9 @@ select ifnull(vch, txt) from typeset; (1 row) select ifnull(blb, txt) from typeset; - ifnull --------------- - \x1233454212 + ifnull +------------- + \x123EB\x12 (1 row) select ifnull(bin, smint) from typeset; @@ -6385,13 +6385,13 @@ select ifnull(bin, vch) from typeset; select ifnull(bin, blb) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, txt) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(vbin, smint) from typeset; @@ -6467,15 +6467,15 @@ select ifnull(vbin, vch) from typeset; (1 row) select ifnull(vbin, blb) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, txt) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select pg_typeof(ifnull(tyint, smint)) from typeset; diff --git a/contrib/dolphin/output/b_proto_jdbc.source b/contrib/dolphin/output/b_proto_jdbc.source index ca5bd8b85..65bc44164 100644 --- a/contrib/dolphin/output/b_proto_jdbc.source +++ b/contrib/dolphin/output/b_proto_jdbc.source @@ -170,9 +170,9 @@ c33:CHAR:null update failed:invalid input syntax for tinyint: "abc" delete failed:invalid input syntax for tinyint: "abc" select * from t3; - c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 | c11 | c12 | c13 | c15 | c16 | c18 | c19 | c20 | c21 | c22 | c23 | c24 | c25 | c26 | c27 | c28 | c29 | c30 | c31 | c32 | c33 -----+------+----+----+----+----+----+----+-----+-----+------------+-----+------------+---------+----------+------+------------+----------+----------------------------+-------------------------+------------+----------+------------+------------+----------+--------------------+------+--------+------------+-----+----- - 1 | 2000 | 1 | 4 | 5 | 6 | 7 | 8 | 9.9 | 10 | 0001100011 | t | char | varchar | nvarchar | 2023 | 2023-02-27 | 14:46:30 | 2023-03-07 00:16:16.666-08 | 2023-03-07 16:16:16.666 | \x626c6f62 | 626C6F62 | \x626c6f62 | \x626c6f62 | (1000,0) | ((1000,0),(200,3)) | text | 20.000 | {"k": "v"} | a | a + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 | c11 | c12 | c13 | c15 | c16 | c18 | c19 | c20 | c21 | c22 | c23 | c24 | c25 | c26 | c27 | c28 | c29 | c30 | c31 | c32 | c33 +----+------+----+----+----+----+----+----+-----+-----+------------+-----+------------+---------+----------+------+------------+----------+----------------------------+-------------------------+------+----------+------+------+----------+--------------------+------+--------+------------+-----+----- + 1 | 2000 | 1 | 4 | 5 | 6 | 7 | 8 | 9.9 | 10 | 0001100011 | t | char | varchar | nvarchar | 2023 | 2023-02-27 | 14:46:30 | 2023-03-07 00:16:16.666-08 | 2023-03-07 16:16:16.666 | blob | 626C6F62 | blob | blob | (1000,0) | ((1000,0),(200,3)) | text | 20.000 | {"k": "v"} | a | a (1 row) \! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/proto_jdbc/class/mysql-connector-java-5.1.47.jar:@abs_builddir@/proto_jdbc/class/. MySQLJdbcAutoCommitTest localhost $dp db_jdbc proto_user Gauss@123 diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 06cc8727d..25df037f0 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -203,6 +203,9 @@ extern "C" DLL_PUBLIC Datum bytea2var(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(tinyblob_rawin); extern "C" DLL_PUBLIC Datum tinyblob_rawin(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_blob_rawout); +extern "C" DLL_PUBLIC Datum dolphin_blob_rawout(PG_FUNCTION_ARGS); + PG_FUNCTION_INFO_V1_PUBLIC(mediumblob_rawin); extern "C" DLL_PUBLIC Datum mediumblob_rawin(PG_FUNCTION_ARGS); @@ -923,7 +926,7 @@ Datum rawout(PG_FUNCTION_ARGS) pfree_ext(ans); PG_RETURN_CSTRING(out_string); } else { - return byteaout(fcinfo); + return dolphin_blob_rawout(fcinfo); } } @@ -8578,6 +8581,30 @@ Datum tinyblob_rawin(PG_FUNCTION_ARGS) return result; } +Datum dolphin_blob_rawout(PG_FUNCTION_ARGS) +{ + bytea* vlena = PG_GETARG_BYTEA_PP(0); + char* result = NULL; + char* rp = NULL; + + char* vp = NULL; + int len; + int i; + + len = 1 + VARSIZE_ANY_EXHDR(vlena); /* empty string has 1 char */ + rp = result = (char*)palloc(len); + vp = VARDATA_ANY(vlena); + for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++) { + *rp++ = *vp; + } + *rp = '\0'; + + /* free memory if allocated by the toaster */ + PG_FREE_IF_COPY(vlena, 0); + + PG_RETURN_CSTRING(result); +} + Datum mediumblob_rawin(PG_FUNCTION_ARGS) { #ifdef DOLPHIN diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 5fe9c2bdf..208a06d11 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -54,3 +54,14 @@ DROP FUNCTION IF EXISTS pg_catalog.timestamptz_le_time (timestamp with time zone DROP FUNCTION IF EXISTS pg_catalog.timestamptz_lt_time (timestamp with time zone, time); DROP FUNCTION IF EXISTS pg_catalog.timestamptz_ge_time (timestamp with time zone, time); DROP FUNCTION IF EXISTS pg_catalog.timestamptz_gt_time (timestamp with time zone, time); +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_rawout ( +tinyblob +) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; + +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_rawout ( +mediumblob +) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; + +CREATE OR REPLACE FUNCTION pg_catalog.longblob_rawout ( +longblob +) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 03b78c550..b3204705a 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -114,3 +114,15 @@ CREATE OPERATOR pg_catalog.>=(leftarg = timestamp with time zone, rightarg = tim DROP OPERATOR IF EXISTS pg_catalog.>(timestamp with time zone, time); CREATE OPERATOR pg_catalog.>(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_gt_time, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); +-- The reason for using replace is because we don't want to change the OID +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_rawout ( +tinyblob +) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_blob_rawout'; + +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_rawout ( +mediumblob +) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_blob_rawout'; + +CREATE OR REPLACE FUNCTION pg_catalog.longblob_rawout ( +longblob +) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_blob_rawout'; -- Gitee From 4eea57e72c393ffbee5cf4da10f1f6909b60a775 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Thu, 19 Oct 2023 13:06:32 +0800 Subject: [PATCH 023/434] =?UTF-8?q?1.xerces-c=E4=BE=9D=E8=B5=96=E7=A7=BB?= =?UTF-8?q?=E5=8A=A8=E5=88=B0spq=E4=B8=AD=202.spqplugin=E8=A7=A3=E5=86=B3?= =?UTF-8?q?=E5=86=85=E5=AD=98=E6=B3=84=E9=9C=B2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/CMakeLists.txt | 1 + contrib/spq_plugin/spqplugin.mk | 4 +- contrib/spq_plugin/src/CMakeLists.txt | 3 +- contrib/spq_plugin/src/Makefile | 5 +- contrib/spq_plugin/src/spq_opt.cpp | 56 ++++++++++++- .../naucrates/dxl/xml/CDXLMemoryManager.h | 10 +-- .../naucrates/dxl/xml/SPQCDXLMemoryManager.h | 34 ++++++++ .../libnaucrates/include/naucrates/init.h | 6 ++ .../spq_optimizer/libnaucrates/src/init.cpp | 82 +++++++++++++++---- .../src/parser/CParseHandlerFactory.cpp | 16 ++-- .../libnaucrates/src/xml/Makefile | 5 +- .../src/xml/SPQCDXLMemoryManager.cpp | 36 ++++++++ .../include/spqopt/xforms/CXformFactory.h | 10 +-- .../src/spq_optimizer/libspqopt/src/init.cpp | 11 +-- .../libspqopt/src/xforms/CXformFactory.cpp | 21 +++-- .../include/spqos/error/CMessageRepository.h | 3 - .../include/spqos/memory/CCacheFactory.h | 14 +--- .../include/spqos/memory/CMemoryPool.h | 41 ++++++++++ .../include/spqos/memory/CMemoryPoolManager.h | 40 +++++++++ .../src/spq_optimizer/libspqos/src/_api.cpp | 10 +-- .../libspqos/src/error/CMessageRepository.cpp | 16 ++-- .../libspqos/src/memory/CCacheFactory.cpp | 40 +++++++-- .../libspqos/src/memory/CMemoryPool.cpp | 16 ++++ .../src/memory/CMemoryPoolManager.cpp | 42 ++++++++++ .../src/spq_optimizer_util/SPQOptimizer.cpp | 12 +++ contrib/spq_plugin/src/spqplugin.cpp | 7 +- 26 files changed, 434 insertions(+), 107 deletions(-) create mode 100644 contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/SPQCDXLMemoryManager.h create mode 100644 contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/SPQCDXLMemoryManager.cpp diff --git a/contrib/spq_plugin/CMakeLists.txt b/contrib/spq_plugin/CMakeLists.txt index 7779a4203..4a96ab97b 100644 --- a/contrib/spq_plugin/CMakeLists.txt +++ b/contrib/spq_plugin/CMakeLists.txt @@ -29,6 +29,7 @@ add_subdirectory(src) set(spqplugin_DEF_OPTIONS ${MACRO_OPTIONS}) set(spqplugin_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(spqplugin_LINK_OPTIONS ${LIB_LINK_OPTIONS}) +list(APPEND spqplugin_LINK_OPTIONS -lxerces-c) add_shared_libtarget(spqplugin TGT_spqplugin_SRC TGT_spqplugin_INC "${spqplugin_DEF_OPTIONS}" "${spqplugin_COMPILE_OPTIONS}" "${spqplugin_LINK_OPTIONS}") diff --git a/contrib/spq_plugin/spqplugin.mk b/contrib/spq_plugin/spqplugin.mk index 81e8163f0..0dadc20d7 100644 --- a/contrib/spq_plugin/spqplugin.mk +++ b/contrib/spq_plugin/spqplugin.mk @@ -1,9 +1,9 @@ ifeq (,$(findstring -DUSE_SPQ,$(CPPFLAGS))) override CPPFLAGS := -DUSE_SPQ $(CPPFLAGS) endif -override CPPFLAGS := -I$(abs_top_srcdir)/contrib/spq_plugin/include -lxerces-c $(CPPFLAGS) +override CPPFLAGS := -I$(abs_top_srcdir)/contrib/spq_plugin/include $(CPPFLAGS) override CPPFLAGS :=$(filter-out -fPIE, $(CPPFLAGS)) -fPIC -override LIBS := -lxerces-c $(LIBS) +SHLIB_LINK += -lxerces-c ifneq "$(MAKECMDGOALS)" "clean" ifneq "$(MAKECMDGOALS)" "distclean" diff --git a/contrib/spq_plugin/src/CMakeLists.txt b/contrib/spq_plugin/src/CMakeLists.txt index 0b700fa6c..be9d73253 100644 --- a/contrib/spq_plugin/src/CMakeLists.txt +++ b/contrib/spq_plugin/src/CMakeLists.txt @@ -1,6 +1,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_spq_plugin_SRC) -SET(TGT_spq_plugin_INC ../include) +SET(TGT_spq_plugin_INC ../include + ./spq_optimizer/libnaucrates/include) set(spq_plugin_DEF_OPTIONS ${MACRO_OPTIONS}) set(spq_plugin_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS}) diff --git a/contrib/spq_plugin/src/Makefile b/contrib/spq_plugin/src/Makefile index 0b7b8d1f8..5b8367344 100644 --- a/contrib/spq_plugin/src/Makefile +++ b/contrib/spq_plugin/src/Makefile @@ -5,7 +5,8 @@ include $(top_builddir)/src/Makefile.global include $(top_builddir)/contrib/spq_plugin/spqplugin.mk override CPPFLAGS := -I$(abs_top_srcdir)/contrib/spq_plugin/include \ - $(CPPFLAGS) + -I$(abs_top_srcdir)/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include \ + $(CPPFLAGS) OBJS = guc_spq.o spq_opt.o spqplugin.o @@ -14,4 +15,4 @@ SUBDIRS = executor \ spq_optimizer \ spq_optimizer_util -include $(top_srcdir)/src/gausskernel/common.mk \ No newline at end of file +include $(top_srcdir)/src/gausskernel/common.mk diff --git a/contrib/spq_plugin/src/spq_opt.cpp b/contrib/spq_plugin/src/spq_opt.cpp index b304e8765..7491b0f4c 100644 --- a/contrib/spq_plugin/src/spq_opt.cpp +++ b/contrib/spq_plugin/src/spq_opt.cpp @@ -85,6 +85,40 @@ static void log_optimizer(PlannedStmt *plan, bool fUnexpectedFailure) // } } +static void init_spq_optimizer_context(PlannerGlobal* glob) +{ + glob->plannerContext = (PlannerContext*)palloc0(sizeof(PlannerContext)); + + glob->plannerContext->plannerMemContext = AllocSetContextCreate(CurrentMemoryContext, + "PlannerContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + if (u_sess->opt_cxt.skew_strategy_opt != SKEW_OPT_OFF) { + glob->plannerContext->dataSkewMemContext = AllocSetContextCreate(glob->plannerContext->plannerMemContext, + "DataSkewContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + } + + glob->plannerContext->tempMemCxt = AllocSetContextCreate(glob->plannerContext->plannerMemContext, + "Planner Temp MemoryContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + glob->plannerContext->refCounter = 0; +} + +static void deinit_spq_optimizer_context(PlannerGlobal* glob) +{ + MemoryContextDelete(glob->plannerContext->plannerMemContext); + glob->plannerContext->plannerMemContext = NULL; + glob->plannerContext->dataSkewMemContext = NULL; + glob->plannerContext->tempMemCxt = NULL; + glob->plannerContext->refCounter = 0; +} /* * spq_planner * Plan the query using the SPQOPT planner @@ -122,6 +156,8 @@ PlannedStmt *spq_planner(Query *parse, ParamListInfo boundParams) glob->subplans = NIL; glob->relationOids = NIL; glob->invalItems = NIL; + init_spq_optimizer_context(glob); + MemoryContext old_context = MemoryContextSwitchTo(glob->plannerContext->plannerMemContext); root = makeNode(PlannerInfo); root->parse = parse; @@ -164,8 +200,10 @@ PlannedStmt *spq_planner(Query *parse, ParamListInfo boundParams) * If SPQOPT didn't produce a plan, bail out and fall back to the Postgres * planner. */ - if (!result) + if (!result) { + MemoryContextSwitchTo(old_context); return NULL; + } /* * Post-process the plan. @@ -269,8 +307,20 @@ PlannedStmt *spq_planner(Query *parse, ParamListInfo boundParams) //result->oneoffPlan = glob->oneoffPlan; result->transientPlan = glob->transientPlan; - make_spq_remote_query(root, result, glob); - result->is_spq_optmized = true; + PG_TRY(); + { + make_spq_remote_query(root, result, glob); + result->is_spq_optmized = true; + } + PG_CATCH(); + { + ereport(WARNING, (errmsg("make_spq_remote_query failed."))); + deinit_spq_optimizer_context(glob); + result = nullptr; + } + PG_END_TRY(); + + MemoryContextSwitchTo(old_context); return result; } diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/CDXLMemoryManager.h b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/CDXLMemoryManager.h index 93b487d2f..91c3cbae2 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/CDXLMemoryManager.h +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/CDXLMemoryManager.h @@ -10,8 +10,8 @@ // Provides a wrapper around the SPQOS CMemoryPool interface. //--------------------------------------------------------------------------- -#ifndef SPQDXL_CDXLMemoryManager_H -#define SPQDXL_CDXLMemoryManager_H +#ifndef DXL_CDXLMemoryManager_H +#define DXL_CDXLMemoryManager_H #include #include @@ -50,11 +50,11 @@ public: // MemoryManager interface functions // allocates memory - void *allocate(XMLSize_t // size + virtual void *allocate(XMLSize_t // size ); // deallocates memory - void deallocate(void *pv); + virtual void deallocate(void *pv); // accessor to the underlying memory pool CMemoryPool * @@ -73,6 +73,6 @@ public: }; } // namespace spqdxl -#endif // SPQDXL_CDXLMemoryManager_H +#endif // DXL_CDXLMemoryManager_H // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/SPQCDXLMemoryManager.h b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/SPQCDXLMemoryManager.h new file mode 100644 index 000000000..78c72b249 --- /dev/null +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/SPQCDXLMemoryManager.h @@ -0,0 +1,34 @@ +#ifndef SPQDXL_CDXLMemoryManager_H +#define SPQDXL_CDXLMemoryManager_H + +#include "CDXLMemoryManager.h" + +namespace spqdxl +{ +using namespace spqos; + +XERCES_CPP_NAMESPACE_USE + +//--------------------------------------------------------------------------- +// @class: +// SPQCDXLMemoryManager +// +// @doc: +// Memory manager for parsing DXL documents used in the Xerces XML parser. +// Provides a wrapper around the SPQOS CMemoryPool interface. +// +//--------------------------------------------------------------------------- +class SPQCDXLMemoryManager : public CDXLMemoryManager +{ +public: + SPQCDXLMemoryManager(CMemoryPool *mp):CDXLMemoryManager(mp) { + + } + virtual void *allocate(XMLSize_t xmlsize); + + virtual void deallocate(void *pv); + + }; +} + +#endif \ No newline at end of file diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/init.h b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/init.h index 31462bf73..77c2cacdd 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/init.h +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/init.h @@ -22,6 +22,12 @@ void InitDXL(); // shutdown DXL library support void ShutdownDXL(); +// initialize DXL Memory Manager support +void InitDXLManager(); + +// shutdown DXL Memory Manager support +void ShutdownDXLManager(int code, void* args); + // initialize Xerces parser utils void spqdxl_init(void); diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/init.cpp b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/init.cpp index bf71061fa..0169ab606 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/init.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/init.cpp @@ -11,7 +11,7 @@ //--------------------------------------------------------------------------- #include "knl/knl_session.h" - +#include "knl/knl_instance.h" #include "naucrates/init.h" #include @@ -23,10 +23,13 @@ #include "naucrates/dxl/xml/CDXLMemoryManager.h" #include "naucrates/dxl/xml/dxltokens.h" #include "naucrates/exception.h" +#include "naucrates/dxl/xml/SPQCDXLMemoryManager.h" using namespace spqos; using namespace spqdxl; - +spqos::CMemoryPool* global_memory_pool = nullptr; +pthread_mutex_t dxl_init_lock = PTHREAD_MUTEX_INITIALIZER; +bool dxl_global_init_status = false; //--------------------------------------------------------------------------- // @function: // InitDXL @@ -49,16 +52,6 @@ InitDXL() SPQOS_ASSERT(NULL != u_sess->spq_cxt.pmpXerces); SPQOS_ASSERT(NULL != u_sess->spq_cxt.pmpDXL); - // setup own memory manager - u_sess->spq_cxt.dxl_memory_manager = SPQOS_NEW(u_sess->spq_cxt.pmpXerces) CDXLMemoryManager(u_sess->spq_cxt.pmpXerces); - - // initialize Xerces, if this fails library initialization should crash here - XMLPlatformUtils::Initialize(XMLUni::fgXercescDefaultLocale, // locale - NULL, // nlsHome: location for message files - NULL, // panicHandler - u_sess->spq_cxt.dxl_memory_manager // memoryManager - ); - // initialize DXL tokens CDXLTokens::Init(u_sess->spq_cxt.pmpDXL); @@ -69,6 +62,48 @@ InitDXL() } +//--------------------------------------------------------------------------- +// @function: +// InitDXLManager +// +// @doc: +// Initialize DXL Memory Manager; must be called before any library +// function is called +// +// +//--------------------------------------------------------------------------- +void +InitDXLManager() +{ + if (dxl_global_init_status) { + return ; + } + + pthread_mutex_lock(&dxl_init_lock); + if (!dxl_global_init_status) { + if (SPQOS_OK != spqos::CMemoryPoolManager::DXLInit()) { + ereport(ERROR, (errmsg("SPQ InitDXLManager error, dxl memory manager init failed."))); + pthread_mutex_unlock(&dxl_init_lock); + return; + } + global_memory_pool = CMemoryPoolManager::GetDXLMemoryPoolMgr()->CreateMemoryPool(); + + // setup own memory manager + spqdxl::CDXLMemoryManager *dxl_memory_manager = + SPQOS_NEW(global_memory_pool) + SPQCDXLMemoryManager(global_memory_pool); + // initialize Xerces, if this fails library initialization should crash here + XMLPlatformUtils::Initialize(XMLUni::fgXercescDefaultLocale, // locale + NULL, // nlsHome: location for message files + NULL, // panicHandler + dxl_memory_manager // memoryManager + ); + (void) on_exit(ShutdownDXLManager, NULL); + dxl_global_init_status = true; + } + pthread_mutex_unlock(&dxl_init_lock); +} + //--------------------------------------------------------------------------- // @function: // ShutdownDXL @@ -90,12 +125,25 @@ ShutdownDXL() u_sess->spq_cxt.m_ulpShutdownDXL++; - XMLPlatformUtils::Terminate(); - CDXLTokens::Terminate(); +} - SPQOS_DELETE(u_sess->spq_cxt.dxl_memory_manager); - u_sess->spq_cxt.dxl_memory_manager = NULL; +//--------------------------------------------------------------------------- +// @function: +// ShutdownDXLManager +// +// @doc: +// +// +// +//--------------------------------------------------------------------------- +void +ShutdownDXLManager(int code, void* args) +{ + XMLPlatformUtils::Terminate(); + CMemoryPoolManager::GetDXLMemoryPoolMgr()->ShutdownDXLMgr(); + ::delete global_memory_pool; + global_memory_pool = nullptr; } @@ -142,7 +190,6 @@ spqdxl_init() void spqdxl_terminate() { -#ifdef SPQOS_DEBUG ShutdownDXL(); if (NULL != u_sess->spq_cxt.pmpDXL) @@ -156,7 +203,6 @@ spqdxl_terminate() (CMemoryPoolManager::GetMemoryPoolMgr())->Destroy(u_sess->spq_cxt.pmpXerces); u_sess->spq_cxt.pmpXerces = NULL; } -#endif // SPQOS_DEBUG } // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerFactory.cpp b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerFactory.cpp index 0b155b180..f0b345e7e 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerFactory.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerFactory.cpp @@ -18,28 +18,26 @@ #include "naucrates/dxl/parser/parsehandlers.h" #include "naucrates/dxl/xml/CDXLMemoryManager.h" #include "naucrates/dxl/xml/dxltokens.h" - +#include "knl/knl_session.h" using namespace spqdxl; XERCES_CPP_NAMESPACE_USE -CParseHandlerFactory::TokenParseHandlerFuncMap - *CParseHandlerFactory::m_token_parse_handler_func_map = NULL; // adds a new mapping of token to corresponding parse handler void CParseHandlerFactory::AddMapping( Edxltoken token_type, ParseHandlerOpCreatorFunc *parse_handler_op_func) { - SPQOS_ASSERT(NULL != m_token_parse_handler_func_map); + SPQOS_ASSERT(NULL != u_sess->spq_cxt.m_token_parse_handler_func_map); const XMLCh *token_identifier_str = CDXLTokens::XmlstrToken(token_type); SPQOS_ASSERT(NULL != token_identifier_str); #ifdef SPQOS_DEBUG BOOL success = #endif - m_token_parse_handler_func_map->Insert(token_identifier_str, - parse_handler_op_func); + ((CParseHandlerFactory::TokenParseHandlerFuncMap*)u_sess->spq_cxt.m_token_parse_handler_func_map) + ->Insert(token_identifier_str, parse_handler_op_func); SPQOS_ASSERT(success); } @@ -48,7 +46,7 @@ CParseHandlerFactory::AddMapping( void CParseHandlerFactory::Init(CMemoryPool *mp) { - m_token_parse_handler_func_map = + u_sess->spq_cxt.m_token_parse_handler_func_map = SPQOS_NEW(mp) TokenParseHandlerFuncMap(mp, HASH_MAP_SIZE); // array mapping XML Token -> Parse Handler Creator mappings to hashmap @@ -313,10 +311,10 @@ CParseHandlerFactory::GetParseHandler(CMemoryPool *mp, CParseHandlerManager *parse_handler_mgr, CParseHandlerBase *parse_handler_root) { - SPQOS_ASSERT(NULL != m_token_parse_handler_func_map); + SPQOS_ASSERT(NULL != u_sess->spq_cxt.m_token_parse_handler_func_map); ParseHandlerOpCreatorFunc *create_parse_handler_func = - m_token_parse_handler_func_map->Find(token_identifier_str); + ((CParseHandlerFactory::TokenParseHandlerFuncMap*)u_sess->spq_cxt.m_token_parse_handler_func_map)->Find(token_identifier_str); if (create_parse_handler_func != NULL) { diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/Makefile b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/Makefile index 84ecb9457..955f03253 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/Makefile +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/Makefile @@ -11,6 +11,7 @@ include ../../../spqorca.mk OBJS = CDXLMemoryManager.o \ CDXLSections.o \ CXMLSerializer.o \ - dxltokens.o + dxltokens.o \ + SPQCDXLMemoryManager.o -include $(top_srcdir)/src/gausskernel/common.mk \ No newline at end of file +include $(top_srcdir)/src/gausskernel/common.mk diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/SPQCDXLMemoryManager.cpp b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/SPQCDXLMemoryManager.cpp new file mode 100644 index 000000000..df7554d76 --- /dev/null +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/SPQCDXLMemoryManager.cpp @@ -0,0 +1,36 @@ +#include "knl/knl_session.h" +#include "naucrates/dxl/xml/SPQCDXLMemoryManager.h" +using namespace spqdxl; + +//--------------------------------------------------------------------------- +// @function: +// SPQCDXLMemoryManager::allocate +// +// @doc: +// Memory allocation. +// +//--------------------------------------------------------------------------- +void* +SPQCDXLMemoryManager::allocate(XMLSize_t xmlsize) { + + if (u_sess->spq_cxt.pmpXerces == NULL) { + return SPQOS_NEW_ARRAY(Pmp(), BYTE, xmlsize); + } else { + return SPQOS_NEW_ARRAY(u_sess->spq_cxt.pmpXerces, BYTE, xmlsize); + } + +} + +//--------------------------------------------------------------------------- +// @function: +// SPQCDXLMemoryManager::deallocate +// +// @doc: +// Memory deallocation. +// +//--------------------------------------------------------------------------- +void +SPQCDXLMemoryManager::deallocate(void *pv) +{ + SPQOS_DELETE_DXL_ARRAY(reinterpret_cast(pv)); +} diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/xforms/CXformFactory.h b/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/xforms/CXformFactory.h index a79839bd4..e282f6569 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/xforms/CXformFactory.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/xforms/CXformFactory.h @@ -53,9 +53,6 @@ private: // ensure that xforms are inserted in order ULONG m_lastAddedOrSkippedXformId; - // global instance - static CXformFactory *m_pxff; - // private ctor explicit CXformFactory(CMemoryPool *mp); @@ -104,12 +101,7 @@ public: BOOL IsXformIdUsed(CXform::EXformId exfid); // global accessor - static CXformFactory * - Pxff() - { - return m_pxff; - } - + static CXformFactory *Pxff(); // initialize global factory instance static SPQOS_RESULT Init(); diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/init.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/init.cpp index 7887e7589..efcc575c8 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/init.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/init.cpp @@ -20,12 +20,11 @@ #include "spqopt/mdcache/CMDCache.h" #include "spqopt/xforms/CXformFactory.h" #include "naucrates/init.h" +#include "knl/knl_session.h" using namespace spqos; using namespace spqopt; -static CMemoryPool *mp = NULL; - //--------------------------------------------------------------------------- // @function: @@ -43,10 +42,10 @@ spqopt_init() { { CAutoMemoryPool amp; - mp = amp.Pmp(); + u_sess->spq_cxt.m_xform_mp = amp.Pmp(); // add standard exception messages - (void) spqopt::EresExceptionInit(mp); + (void) spqopt::EresExceptionInit(u_sess->spq_cxt.m_xform_mp); // detach safety (void) amp.Detach(); @@ -69,13 +68,11 @@ spqopt_init() void spqopt_terminate() { -#ifdef SPQOS_DEBUG CMDCache::Shutdown(); - CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(mp); + CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(u_sess->spq_cxt.m_xform_mp); CXformFactory::Pxff()->Shutdown(); -#endif // SPQOS_DEBUG } // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/xforms/CXformFactory.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/xforms/CXformFactory.cpp index f18796c06..f5f54c19f 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/xforms/CXformFactory.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/xforms/CXformFactory.cpp @@ -11,14 +11,11 @@ #include "spqos/base.h" #include "spqos/memory/CMemoryPoolManager.h" - +#include "knl/knl_session.h" #include "spqopt/xforms/xforms.h" using namespace spqopt; -// global instance of xform factory -CXformFactory *CXformFactory::m_pxff = NULL; - //--------------------------------------------------------------------------- // @function: @@ -58,7 +55,7 @@ CXformFactory::CXformFactory(CMemoryPool *mp) //--------------------------------------------------------------------------- CXformFactory::~CXformFactory() { - SPQOS_ASSERT(NULL == m_pxff && "Xform factory has not been shut down"); + SPQOS_ASSERT(NULL == u_sess->spq_cxt.m_pxff && "Xform factory has not been shut down"); // delete all xforms in the array for (ULONG i = 0; i < CXform::ExfSentinel; i++) @@ -369,13 +366,13 @@ CXformFactory::Init() SPQOS_TRY { // create xform factory instance - m_pxff = SPQOS_NEW(mp) CXformFactory(mp); + u_sess->spq_cxt.m_pxff = SPQOS_NEW(mp) CXformFactory(mp); } SPQOS_CATCH_EX(ex) { // destroy memory pool if global instance was not created CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(mp); - m_pxff = NULL; + u_sess->spq_cxt.m_pxff = NULL; if (SPQOS_MATCH_EX(ex, CException::ExmaSystem, CException::ExmiOOM)) { @@ -391,7 +388,7 @@ CXformFactory::Init() SPQOS_CATCH_END; // instantiating the factory - m_pxff->Instantiate(); + u_sess->spq_cxt.m_pxff->Instantiate(); return eres; } @@ -415,12 +412,18 @@ CXformFactory::Shutdown() CMemoryPool *mp = pxff->m_mp; // destroy xform factory - CXformFactory::m_pxff = NULL; + u_sess->spq_cxt.m_pxff = NULL; SPQOS_DELETE(pxff); // release allocated memory pool CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(mp); } +CXformFactory* +CXformFactory::Pxff() +{ + return u_sess->spq_cxt.m_pxff; +} + // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/error/CMessageRepository.h b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/error/CMessageRepository.h index 7519f1eb3..8f4d1cb4c 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/error/CMessageRepository.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/error/CMessageRepository.h @@ -26,9 +26,6 @@ namespace spqos class CMessageRepository { private: - // global singleton - static CMessageRepository *m_repository; - // memory pool CMemoryPool *m_mp; diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CCacheFactory.h b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CCacheFactory.h index 8664a05ba..44fc2bb1d 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CCacheFactory.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CCacheFactory.h @@ -42,8 +42,6 @@ namespace spqos class CCacheFactory { private: - // global instance - static CCacheFactory *m_factory; // memory pool allocated to caches CMemoryPool *m_mp; @@ -58,11 +56,7 @@ private: public: // private dtor - ~CCacheFactory() - { - SPQOS_ASSERT(NULL == m_factory && - "Cache factory has not been shut down"); - } + ~CCacheFactory(); // initialize global memory pool static SPQOS_RESULT Init(); @@ -71,11 +65,7 @@ public: void Shutdown(); // global accessor - inline static CCacheFactory * - GetFactory() - { - return m_factory; - } + static CCacheFactory *GetFactory(); // create a cache instance template diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPool.h b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPool.h index f86b6e5a0..321356e88 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPool.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPool.h @@ -160,6 +160,12 @@ public: // free allocation static void DeleteImpl(void *ptr, EAllocationType eat); + // requested size of allocation + static ULONG UserDXLSizeOfAlloc(const void *ptr); + + // free allocation + static void DeleteDXLImpl(void *ptr, EAllocationType eat); + #ifdef SPQOS_DEBUG // check if the memory pool keeps track of live objects @@ -247,6 +253,27 @@ public: // Free memory. CMemoryPool::DeleteImpl(object_array, CMemoryPool::EatArray); } + + static void + DeleteDXLArray(T *object_array) + { + if (NULL == object_array) + { + return; + } + + // Invoke destructor on each array element in reverse + // order from construction. + const SIZE_T num_elements = + CMemoryPool::UserDXLSizeOfAlloc(object_array) / sizeof(T); + for (SIZE_T idx = num_elements - 1; idx < num_elements; --idx) + { + object_array[idx].~T(); + } + + // Free memory. + CMemoryPool::DeleteDXLImpl(object_array, CMemoryPool::EatArray); + } }; // Specialization for const-qualified types. @@ -265,6 +292,12 @@ public: { CDeleter::DeleteArray(const_cast(object_array)); } + + static void + DeleteDXLArray(const T *object_array) + { + CDeleter::DeleteDXLArray(const_cast(object_array)); + } }; } // namespace delete_detail } // namespace spqos @@ -324,6 +357,14 @@ SPQOS_DELETE_ARRAY(T *object_array) { ::spqos::delete_detail::CDeleter::DeleteArray(object_array); } + +// Delete an array allocated by SPQOS_NEW_DXL_ARRAY(). +template +void + SPQOS_DELETE_DXL_ARRAY(T *object_array) +{ + ::spqos::delete_detail::CDeleter::DeleteDXLArray(object_array); +} #endif // !SPQOS_CMemoryPool_H // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPoolManager.h b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPoolManager.h index a6931d386..4c16ec998 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPoolManager.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/include/spqos/memory/CMemoryPoolManager.h @@ -59,6 +59,8 @@ private: // hash table to maintain created pools CSyncHashtable *m_ht_all_pools; + static CMemoryPoolManager *m_dxl_memory_pool_mgr; + // create new pool of given type virtual CMemoryPool *NewMemoryPool(); @@ -125,6 +127,35 @@ protected: return SPQOS_OK; } + + // Initialize dxl memory pool manager using given types + template + static SPQOS_RESULT + SetupDXLMemoryPoolManager() + { + // raw allocation of memory for internal memory pools + void *alloc_internal = spqos::clib::Malloc(sizeof(PoolType)); + + SPQOS_OOM_CHECK(alloc_internal); + + SPQOS_TRY + { + // create internal memory pool + CMemoryPool *internal = ::new (alloc_internal) PoolType(); + + // instantiate manager + *(GetDXLMemoryPoolMgrPtr()) = ::new ManagerType(internal, EMemoryPoolTracker); + GetDXLMemoryPoolMgr()->Setup(); + } + SPQOS_CATCH_EX(ex) + { + spqos::clib::Free(alloc_internal); + SPQOS_RETHROW(ex); + } + SPQOS_CATCH_END; + return SPQOS_OK; + } + public: // create new memory pool CMemoryPool *CreateMemoryPool(); @@ -143,6 +174,8 @@ public: // delete memory pools and release manager void Shutdown(); + void ShutdownDXLMgr(); + // accessor of memory pool used in global new allocations CMemoryPool * GetGlobalMemoryPool() @@ -187,10 +220,17 @@ public: // initialize global instance static SPQOS_RESULT Init(); + // initialize dxl instance + static SPQOS_RESULT DXLInit(); + // global accessor static CMemoryPoolManager *GetMemoryPoolMgr(); static CMemoryPoolManager **GetMemoryPoolMgrPtr(); + // dxl accessor + static CMemoryPoolManager *GetDXLMemoryPoolMgr(); + static CMemoryPoolManager **GetDXLMemoryPoolMgrPtr(); + }; // class CMemoryPoolManager } // namespace spqos diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/_api.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/_api.cpp index 48179d652..8afaddeb3 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/_api.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/_api.cpp @@ -285,15 +285,13 @@ spqos_terminate() #ifdef SPQOS_DEBUG_COUNTERS CDebugCounter::Shutdown(); #endif -#ifdef SPQOS_DEBUG #ifdef SPQOS_FPSIMULATOR CFSimulator::FSim()->Shutdown(); #endif // SPQOS_FPSIMULATOR - CMessageRepository::GetMessageRepository()->Shutdown(); - CWorkerPoolManager::WorkerPoolManager()->Shutdown(); - CCacheFactory::GetFactory()->Shutdown(); - CMemoryPoolManager::GetMemoryPoolMgr()->Shutdown(); -#endif // SPQOS_DEBUG + CMessageRepository::GetMessageRepository()->Shutdown(); + CWorkerPoolManager::WorkerPoolManager()->Shutdown(); + CCacheFactory::GetFactory()->Shutdown(); + CMemoryPoolManager::GetMemoryPoolMgr()->Shutdown(); } // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CMessageRepository.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CMessageRepository.cpp index 03b2b3a54..8511f778d 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CMessageRepository.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CMessageRepository.cpp @@ -9,6 +9,8 @@ // Singleton to keep error messages; //--------------------------------------------------------------------------- +#include "knl/knl_session.h" + #include "spqos/error/CMessageRepository.h" #include "spqos/common/CSyncHashtableAccessByKey.h" @@ -17,10 +19,6 @@ using namespace spqos; -//--------------------------------------------------------------------------- -// static singleton -//--------------------------------------------------------------------------- -CMessageRepository *CMessageRepository::m_repository = NULL; //--------------------------------------------------------------------------- // @function: @@ -106,7 +104,7 @@ CMessageRepository::LookupMessage(CException exc, ELocale locale) SPQOS_RESULT CMessageRepository::Init() { - SPQOS_ASSERT(NULL == m_repository); + SPQOS_ASSERT(NULL == u_sess->spq_cxt.m_repository); CAutoMemoryPool amp; CMemoryPool *mp = amp.Pmp(); @@ -115,7 +113,7 @@ CMessageRepository::Init() repository->InitDirectory(mp); repository->LoadStandardMessages(); - CMessageRepository::m_repository = repository; + u_sess->spq_cxt.m_repository = repository; // detach safety (void) amp.Detach(); @@ -135,8 +133,8 @@ CMessageRepository::Init() CMessageRepository * CMessageRepository::GetMessageRepository() { - SPQOS_ASSERT(NULL != m_repository); - return m_repository; + SPQOS_ASSERT(NULL != u_sess->spq_cxt.m_repository); + return u_sess->spq_cxt.m_repository; } @@ -154,7 +152,7 @@ void CMessageRepository::Shutdown() { CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(m_mp); - CMessageRepository::m_repository = NULL; + u_sess->spq_cxt.m_repository = NULL; } diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CCacheFactory.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CCacheFactory.cpp index 9070990e3..d8a7335c5 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CCacheFactory.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CCacheFactory.cpp @@ -9,6 +9,7 @@ // Function implementation of CCacheFactory //--------------------------------------------------------------------------- +#include "knl/knl_session.h" #include "spqos/memory/CCacheFactory.h" @@ -18,9 +19,6 @@ using namespace spqos; -// global instance of cache factory -CCacheFactory *CCacheFactory::m_factory = NULL; - //--------------------------------------------------------------------------- // @function: // CCacheFactory::CCacheFactory @@ -33,6 +31,20 @@ CCacheFactory::CCacheFactory(CMemoryPool *mp) : m_mp(mp) { } +//--------------------------------------------------------------------------- +// @function: +// CCacheFactory::~CCacheFactory +// +// @doc: +// Ctor; +// +//--------------------------------------------------------------------------- +CCacheFactory::~CCacheFactory() +{ + SPQOS_ASSERT(NULL == u_sess->spq_cxt.m_factory && + "Cache factory has not been shut down"); +} + //--------------------------------------------------------------------------- // @function: @@ -71,14 +83,14 @@ CCacheFactory::Init() SPQOS_TRY { // create cache factory instance - CCacheFactory::m_factory = SPQOS_NEW(mp) CCacheFactory(mp); + u_sess->spq_cxt.m_factory = SPQOS_NEW(mp) CCacheFactory(mp); } SPQOS_CATCH_EX(ex) { // destroy memory pool if global instance was not created CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(mp); - CCacheFactory::m_factory = NULL; + u_sess->spq_cxt.m_factory = NULL; if (SPQOS_MATCH_EX(ex, CException::ExmaSystem, CException::ExmiOOM)) { @@ -112,10 +124,26 @@ CCacheFactory::Shutdown() CMemoryPool *mp = factory->m_mp; // destroy cache factory - CCacheFactory::m_factory = NULL; + u_sess->spq_cxt.m_factory = NULL; SPQOS_DELETE(factory); // release allocated memory pool CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(mp); } + + +//--------------------------------------------------------------------------- +// @function: +// CCacheFactory::GetFactory +// +// @doc: +// get factory from context +// +//--------------------------------------------------------------------------- +CCacheFactory * +CCacheFactory::GetFactory() +{ + return u_sess->spq_cxt.m_factory; +} + // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPool.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPool.cpp index 15a902bad..9a2fb42cc 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPool.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPool.cpp @@ -36,6 +36,15 @@ CMemoryPool::UserSizeOfAlloc(const void *ptr) return CMemoryPoolManager::GetMemoryPoolMgr()->UserSizeOfAlloc(ptr); } +// get user requested size of allocation +ULONG +CMemoryPool::UserDXLSizeOfAlloc(const void *ptr) +{ + SPQOS_ASSERT(NULL != ptr); + + return CMemoryPoolManager::GetDXLMemoryPoolMgr()->UserSizeOfAlloc(ptr); +} + void CMemoryPool::DeleteImpl(void *ptr, EAllocationType eat) @@ -43,6 +52,13 @@ CMemoryPool::DeleteImpl(void *ptr, EAllocationType eat) CMemoryPoolManager::GetMemoryPoolMgr()->DeleteImpl(ptr, eat); } +void + CMemoryPool::DeleteDXLImpl(void *ptr, EAllocationType eat) +{ + CMemoryPoolManager::GetDXLMemoryPoolMgr()->DeleteImpl(ptr, eat); +} + + #ifdef SPQOS_DEBUG //--------------------------------------------------------------------------- diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPoolManager.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPoolManager.cpp index bd4fe984d..076fb3fcc 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPoolManager.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/memory/CMemoryPoolManager.cpp @@ -30,6 +30,7 @@ using namespace spqos; using namespace spqos::clib; +CMemoryPoolManager *CMemoryPoolManager::m_dxl_memory_pool_mgr = nullptr; // ctor CMemoryPoolManager::CMemoryPoolManager(CMemoryPool *internal, EMemoryPoolType memory_pool_type) @@ -88,6 +89,27 @@ CMemoryPoolManager::GetMemoryPoolMgrPtr() return &(u_sess->spq_cxt.m_memory_pool_mgr); } +// Initialize dxl memory pool manager using CMemoryPoolTracker +SPQOS_RESULT +CMemoryPoolManager::DXLInit() +{ + return SetupDXLMemoryPoolManager(); +} + + +CMemoryPoolManager * + CMemoryPoolManager::GetDXLMemoryPoolMgr() +{ + return m_dxl_memory_pool_mgr; +} + +CMemoryPoolManager ** + CMemoryPoolManager::GetDXLMemoryPoolMgrPtr() +{ + return &(m_dxl_memory_pool_mgr); +} + + CMemoryPool * CMemoryPoolManager::CreateMemoryPool() { @@ -289,4 +311,24 @@ CMemoryPoolManager::Shutdown() Free(internal); } +// Delete dxl memory pools and release manager +void + CMemoryPoolManager::ShutdownDXLMgr() +{ + // cleanup remaining memory pools + Cleanup(); + + // save off pointers for explicit deletion + CMemoryPool *internal = m_internal_memory_pool; + + ::delete m_dxl_memory_pool_mgr; + m_dxl_memory_pool_mgr = nullptr; + +#ifdef SPQOS_DEBUG + internal->AssertEmpty(oswcerr); +#endif // SPQOS_DEBUG + + Free(internal); +} + // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp b/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp index 8e08965a6..6155ba2fc 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp @@ -29,6 +29,7 @@ #include "naucrates/init.h" #include "utils/guc.h" #include "utils/memutils.h" +#include "storage/ipc.h" bool optimizer_trace_fallback = false; @@ -172,6 +173,11 @@ SPQOptimizer::SerializeDXLPlan(Query *query) void SPQOptimizer::InitSPQOPT() { + + if (u_sess->spq_cxt.spq_opt_initialized) { + return; + } + if (u_sess->attr.attr_spq.spq_optimizer_use_gauss_allocators) { CMemoryPoolPallocManager::Init(); @@ -182,6 +188,9 @@ SPQOptimizer::InitSPQOPT() spqos_init(¶ms); spqdxl_init(); spqopt_init(); + u_sess->spq_cxt.spq_opt_initialized = true; + on_proc_exit(UnInitSPQOPT, PointerGetDatum(u_sess)); + } //--------------------------------------------------------------------------- @@ -280,7 +289,10 @@ TerminateSPQOPT() void UnInitSPQOPT(int status, Datum arg) { + knl_session_context* session_back = u_sess; + u_sess = (knl_session_context*) DatumGetPointer(arg); TerminateSPQOPT(); + u_sess = session_back; } // EOF diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index 306d16f38..c7eb5d6f3 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -9,7 +9,6 @@ * ------------------------------------------------------------------------- */ #include "postgres.h" -#include #include #include "nodes/nodeFuncs.h" #include "catalog/pg_inherits_fn.h" @@ -23,6 +22,7 @@ #include "guc_spq.h" #include "spqplugin.h" #include "storage/ipc.h" +#include "naucrates/init.h" PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(spqplugin_invoke); @@ -136,8 +136,6 @@ static void spq_guc_init(knl_u_spq_context* spq_cxt) MemoryContext oldContext = MemoryContextSwitchTo(spq_cxt->spq_worker_context); InitSpqConfigureNames(); MemoryContextSwitchTo(oldContext); - InitSPQOPT(); - on_proc_exit(UnInitSPQOPT, 0); } static bool should_spq_planner(Query *parse) @@ -171,6 +169,7 @@ static bool should_spq_planner(Query *parse) } if (!check_disable_spq_planner_walker((Node *)parse, NULL)) { + InitSPQOPT(); return true; } @@ -222,12 +221,12 @@ PlannedStmt* spq_optimize_query(Query* parse, int cursorOptions, ParamListInfo b if (u_sess->attr.attr_common.max_datanode_for_plan > 0 && IS_PGXC_COORDINATOR && !IsConnFromCoord()) { GetRemoteQuery(result, NULL); } - return result; } void _PG_init(void) { + InitDXLManager(); if (!HOOK_INIT) { backup_spq_planner_hook = spq_planner_hook; spq_planner_hook = spq_optimize_query; -- Gitee From 898b5bfcc90b32ce3a28a7ff7d1802e67585b2c2 Mon Sep 17 00:00:00 2001 From: totaj Date: Thu, 19 Oct 2023 20:28:17 +0800 Subject: [PATCH 024/434] Support ref outer relation's alias. --- .../dolphin/expected/db_b_new_gram_test.out | 75 +++++++++++++++ contrib/dolphin/plugin_parser/parse_expr.cpp | 96 ++++++++++++------- contrib/dolphin/sql/db_b_new_gram_test.sql | 41 ++++++++ 3 files changed, 177 insertions(+), 35 deletions(-) diff --git a/contrib/dolphin/expected/db_b_new_gram_test.out b/contrib/dolphin/expected/db_b_new_gram_test.out index 83e5fb962..85f7f8193 100644 --- a/contrib/dolphin/expected/db_b_new_gram_test.out +++ b/contrib/dolphin/expected/db_b_new_gram_test.out @@ -1233,6 +1233,81 @@ create table body(body int); ERROR: syntax error at or near "body" LINE 1: create table body(body int); ^ +CREATE TABLE `Student` ( + `Sno` varchar(3) NOT NULL, + `Sname` varchar(8) NOT NULL, + `Ssex` varchar(2) NOT NULL, + PRIMARY KEY (`Sno`) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "student_pkey" for table "student" +CREATE TABLE `Score` ( + `S_no` varchar(3) NOT NULL, + `Cno` varchar(5) NOT NULL, + `Degree` decimal(4, 1) DEFAULT NULL, + PRIMARY KEY (`S_no`, `Cno`), + CONSTRAINT `Score_ibfk_1` FOREIGN KEY (`S_no`) REFERENCES `Student` (`Sno`) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "score_pkey" for table "score" +INSERT INTO `Student` (`Sno`, `Sname`, `Ssex`) +VALUES ('101', 'æŽå†›', 'ç”·'), + ('103', '陆å›', 'ç”·'), + ('105', '匡明', 'ç”·'); +INSERT INTO `Score` (`S_no`, `Cno`, `Degree`) +VALUES ('101', '3-105', '64.0'), + ('101', '6-166', '85.0'), + ('103', '3-105', '92.0'), + ('103', '3-245', '86.0'); +select a.Sno as no,a.Sname as name , + (select Degree from Score + where Cno= '3-105' and S_no=no) +from Student as a; + no | name | Degree +-----+------+-------- + 101 | æŽå†› | 64.0 + 103 | é™†å› | 92.0 + 105 | 匡明 | +(3 rows) + +--expr +select (a.Sno+1) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; + no | name | Degree +-----+------+-------- + 102 | æŽå†› | + 104 | é™†å› | + 106 | 匡明 | +(3 rows) + +--multi var +select (a.Sno+a.Sname) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; + no | name | Degree +-----+------+-------- + 101 | æŽå†› | 64.0 + 103 | é™†å› | 92.0 + 105 | 匡明 | +(3 rows) + +--func +select (a.Sno is null) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; + no | name | Degree +----+------+-------- + f | æŽå†› | + f | é™†å› | + f | 匡明 | +(3 rows) + +select hex(a.Sno) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; + no | name | Degree +--------+------+-------- + 313031 | æŽå†› | + 313033 | é™†å› | + 313035 | 匡明 | +(3 rows) + +-- agg not support +select sum(a.Sno) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +ERROR: Aggref found where not expected +drop table Score; +drop table Student; drop schema test_m cascade; NOTICE: drop cascades to table test_unique drop schema db_b_new_gram_test cascade; diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index 483fe3d85..bb168089b 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -723,54 +723,80 @@ static Node* replaceExprAliasIfNecessary(ParseState* pstate, char* colname, Colu bool isFind = false; Expr* matchExpr = NULL; TargetEntry* tle = NULL; - foreach (lc, pstate->p_target_list) { - tle = (TargetEntry*)lfirst(lc); - /* - * 1. in a select stmt in stored procudre, a columnref may be a param(e.g. a declared var or the stored - * procedure's arg), which is not a alias, so can not be matched here. - * 2. in a select stmt in stored procudre such like a[1],a[2],a[3], they have same name, - * so, we should pass this target. - */ - bool isArrayParam = IsA(tle->expr, ArrayRef) && ((ArrayRef*)tle->expr)->refexpr != NULL && - IsA(((ArrayRef*)tle->expr)->refexpr, Param); - if (tle->resname != NULL && !IsA(tle->expr, Param) && !isArrayParam && #ifdef DOLPHIN - strncasecmp(tle->resname, colname, strlen(colname) + 1) == 0) { + int varlevelsup = 0; + int matchVarlevel = 0; + while (pstate != NULL) { +#endif + foreach (lc, pstate->p_target_list) { + tle = (TargetEntry*)lfirst(lc); + /* + * 1. in a select stmt in stored procudre, a columnref may be a param(e.g. a declared var or the stored + * procedure's arg), which is not a alias, so can not be matched here. + * 2. in a select stmt in stored procudre such like a[1],a[2],a[3], they have same name, + * so, we should pass this target. + */ + bool isArrayParam = IsA(tle->expr, ArrayRef) && ((ArrayRef*)tle->expr)->refexpr != NULL && + IsA(((ArrayRef*)tle->expr)->refexpr, Param); + if (tle->resname != NULL && !IsA(tle->expr, Param) && !isArrayParam && +#ifdef DOLPHIN + strncasecmp(tle->resname, colname, strlen(colname) + 1) == 0) { #else - strncmp(tle->resname, colname, strlen(colname) + 1) == 0) { + strncmp(tle->resname, colname, strlen(colname) + 1) == 0) { #endif - if (checkExprHasWindowFuncs((Node*)tle->expr)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("Alias \"%s\" reference with window function included is not supported.", colname), - parser_errposition(pstate, cref->location))); + if (checkExprHasWindowFuncs((Node*)tle->expr)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("Alias \"%s\" reference with window function included is not supported.", colname), + parser_errposition(pstate, cref->location))); #ifndef ENABLE_MULTIPLE_NODES - } else if (ContainRownumExpr((Node*)tle->expr)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("Alias \"%s\" reference with ROWNUM included is invalid.", colname), - parser_errposition(pstate, cref->location))); + } else if (ContainRownumExpr((Node*)tle->expr)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("Alias \"%s\" reference with ROWNUM included is invalid.", colname), + parser_errposition(pstate, cref->location))); #endif - } else if (contain_volatile_functions((Node*)tle->expr)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("Alias \"%s\" reference with volatile function included is not supported.", colname), - parser_errposition(pstate, cref->location))); - } else { - if (!isFind) { - matchExpr = tle->expr; - isFind = true; - } else { + } else if (contain_volatile_functions((Node*)tle->expr)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("Alias \"%s\" is ambiguous.", colname), + errmsg("Alias \"%s\" reference with volatile function included is not supported.", colname), parser_errposition(pstate, cref->location))); - return NULL; + } else { + if (!isFind) { + matchExpr = tle->expr; + isFind = true; +#ifdef DOLPHIN + matchVarlevel = varlevelsup; +#endif + } else { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("Alias \"%s\" is ambiguous.", colname), + parser_errposition(pstate, cref->location))); + return NULL; + } } } } +#ifdef DOLPHIN + pstate = pstate->parentParseState; + varlevelsup++; + } + Node *res = (Node*)copyObject(matchExpr); + if (matchVarlevel == 0) { + return res; + } + List *varList = pull_var_clause(res, PVC_REJECT_AGGREGATES, PVC_RECURSE_PLACEHOLDERS, + PVC_RECURSE_SPECIAL_EXPR, true); + foreach (lc, varList) { + Var *var = (Var*)lfirst(lc); + var->varlevelsup += matchVarlevel; } + list_free(varList); + return res; +#else return (Node*)copyObject(matchExpr); +#endif } static Node* ParseColumnRef(ParseState* pstate, RangeTblEntry* rte, char* colname, ColumnRef* cref) diff --git a/contrib/dolphin/sql/db_b_new_gram_test.sql b/contrib/dolphin/sql/db_b_new_gram_test.sql index 009081c5d..137fbf957 100644 --- a/contrib/dolphin/sql/db_b_new_gram_test.sql +++ b/contrib/dolphin/sql/db_b_new_gram_test.sql @@ -483,6 +483,47 @@ drop table authid; drop table authid_t1; create table body(body int); +CREATE TABLE `Student` ( + `Sno` varchar(3) NOT NULL, + `Sname` varchar(8) NOT NULL, + `Ssex` varchar(2) NOT NULL, + PRIMARY KEY (`Sno`) +); + +CREATE TABLE `Score` ( + `S_no` varchar(3) NOT NULL, + `Cno` varchar(5) NOT NULL, + `Degree` decimal(4, 1) DEFAULT NULL, + PRIMARY KEY (`S_no`, `Cno`), + CONSTRAINT `Score_ibfk_1` FOREIGN KEY (`S_no`) REFERENCES `Student` (`Sno`) +); + +INSERT INTO `Student` (`Sno`, `Sname`, `Ssex`) +VALUES ('101', 'æŽå†›', 'ç”·'), + ('103', '陆å›', 'ç”·'), + ('105', '匡明', 'ç”·'); + +INSERT INTO `Score` (`S_no`, `Cno`, `Degree`) +VALUES ('101', '3-105', '64.0'), + ('101', '6-166', '85.0'), + ('103', '3-105', '92.0'), + ('103', '3-245', '86.0'); +select a.Sno as no,a.Sname as name , + (select Degree from Score + where Cno= '3-105' and S_no=no) +from Student as a; + +--expr +select (a.Sno+1) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +--multi var +select (a.Sno+a.Sname) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +--func +select (a.Sno is null) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +select hex(a.Sno) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +-- agg not support +select sum(a.Sno) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +drop table Score; +drop table Student; drop schema test_m cascade; drop schema db_b_new_gram_test cascade; reset current_schema; -- Gitee From e99f95315874c3ad84c6d071aabafc33e16215be Mon Sep 17 00:00:00 2001 From: totaj Date: Sat, 21 Oct 2023 14:59:56 +0800 Subject: [PATCH 025/434] Fix call bug. --- contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp index 109d9f724..cc1c6153b 100755 --- a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp @@ -3391,7 +3391,7 @@ void ExecuteCallStmt(DolphinCallStmt *stmt, ParamListInfo params, bool atomic) Oid definer = GetUserId(); Form_pg_proc procStruct; - bool topCall = false; + volatile bool topCall = false; /* use volatile to let PG_CATCH get the right value */ /* Get function's pg_proc entry */ tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(fexpr->funcid)); if (!HeapTupleIsValid(tp)) { -- Gitee From 62576517506f591143d8273cf2fb38001c9a4c49 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Mon, 23 Oct 2023 17:49:13 +0800 Subject: [PATCH 026/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9cast=E6=8A=A5?= =?UTF-8?q?=E9=94=99=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs.out | 10 +- .../dolphin/expected/builtin_funcs/conv.out | 12 +- .../expected/builtin_funcs/make_set.out | 2 +- .../dolphin/expected/charset_utf8mb4_b_db.out | 8 +- .../dolphin/expected/column_quote_alias.out | 12 +- contrib/dolphin/expected/db_b_parser3.out | 20 +-- contrib/dolphin/expected/insert_set.out | 2 +- .../ignore_invalid_input.out | 80 +++++------ contrib/dolphin/expected/mysqlmode_strict.out | 72 +++++----- .../dolphin/expected/mysqlmode_strict2.out | 40 +++--- .../json_operator_test.out | 48 +++---- .../string_func_test/db_b_insert_test.out | 8 +- .../string_func_test/db_b_nameconst_test.out | 4 +- contrib/dolphin/expected/test_condition.out | 28 ++-- contrib/dolphin/expected/tinyint_cast.out | 2 +- contrib/dolphin/expected/uint_in.out | 8 +- .../include/plugin_commands/mysqlmode.h | 2 +- contrib/dolphin/output/b_proto_jdbc.source | 4 +- .../dolphin/output/dump_dumpall_test.source | 2 +- contrib/dolphin/output/load.source | 8 +- contrib/dolphin/output/load2.source | 16 +-- contrib/dolphin/output/sqlmode_strict.source | 130 +++++++++--------- 22 files changed, 259 insertions(+), 259 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index 7c4128a5e..b5805e741 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -9,7 +9,7 @@ insert into func_test(functionName, result) values('makedate(''2003'',''61'')', insert into func_test(functionName, result) values('makedate(''12.4'',''12.5'')', makedate('12.4','12.5')); insert into func_test(functionName, result) values('makedate(12.4,12.5)', makedate(12.4,12.5)); insert into func_test(functionName, result) values('makedate(''abcd'', ''61'')', makedate('abcd', '61')); -ERROR: invalid input syntax for bigint: "abcd" +ERROR: invalid input syntax for type bigint: "abcd" LINE 1: ...lt) values('makedate(''abcd'', ''61'')', makedate('abcd', '6... ^ insert into func_test(functionName, result) values('makedate(false, true)', makedate(false, true)); @@ -37,7 +37,7 @@ insert into func_test(functionName, result) values('makedate(2001, 9223372036854 insert into func_test(functionName, result) values ('maketime(''25.5'', ''30.4'', ''30'')', maketime('25.5', '30.4', '30')); insert into func_test(functionName, result) values ('maketime(25.5, 30.4, 30)', maketime(25.5, 30.4, 30)); insert into func_test(functionName, result) values ('maketime(''ABCD'', ''30.4'', ''30.123'')', maketime('ABCD', '30.4', '30.123')); -ERROR: invalid input syntax for bigint: "ABCD" +ERROR: invalid input syntax for type bigint: "ABCD" LINE 1: ...ketime(''ABCD'', ''30.4'', ''30.123'')', maketime('ABCD', '3... ^ insert into func_test(functionName, result) values ('maketime(false, true, true)', maketime(false, true, true)); @@ -87,7 +87,7 @@ insert into func_test(functionName, result) values ('period_add(''202101'', ''10 insert into func_test(functionName, result) values ('period_add(''202101.4'', ''10.5'')', period_add('202101.4', '10.5')); insert into func_test(functionName, result) values ('period_add(202101.4, 10.5)', period_add(202101.4, 10.5)); insert into func_test(functionName, result) values ('period_add(''abcd'', 1)', period_add('abcd', 1)); -ERROR: invalid input syntax for bigint: "abcd" +ERROR: invalid input syntax for type bigint: "abcd" LINE 1: ...lt) values ('period_add(''abcd'', 1)', period_add('abcd', 1)... ^ insert into func_test(functionName, result) values ('period_add(true, false)', period_add(true, false)); @@ -121,7 +121,7 @@ insert into func_test(functionName, result) values ('period_diff(''202101'', ''2 insert into func_test(functionName, result) values ('period_diff(''202101.4'', ''202102.5'')', period_diff('202101.4', '202102.5')); insert into func_test(functionName, result) values ('period_diff(202101.4, 202102.5)', period_diff(202101.4, 202102.5)); insert into func_test(functionName, result) values ('period_diff(''abcd'', ''abcd'')', period_diff('abcd', 'abcd')); -ERROR: invalid input syntax for bigint: "abcd" +ERROR: invalid input syntax for type bigint: "abcd" LINE 1: ...s ('period_diff(''abcd'', ''abcd'')', period_diff('abcd', 'a... ^ insert into func_test(functionName, result) values ('period_diff(true, false)', period_diff(true, false)); @@ -196,7 +196,7 @@ insert into func_test(functionName, result) values ('subdate(''2022-1-1'', 20)', insert into func_test(functionName, result) values ('subdate(''2022-1-1'', ''20.5'')',subdate('2022-1-1', '20.5')); insert into func_test(functionName, result) values ('subdate(''2022-1-1'', 20.5)',subdate('2022-1-1', 20.5)); insert into func_test(functionName, result) values ('subdate(''abcd'', ''abcd'')',subdate('abcd', 'abcd')); -ERROR: invalid input syntax for bigint: "abcd" +ERROR: invalid input syntax for type bigint: "abcd" CONTEXT: SQL function "subdate" during inlining referenced column: result insert into func_test(functionName, result) values ('subdate(''2022-1-1'', true)',subdate('2022-1-1', true)); diff --git a/contrib/dolphin/expected/builtin_funcs/conv.out b/contrib/dolphin/expected/builtin_funcs/conv.out index aa13e74db..3bf6ef76e 100644 --- a/contrib/dolphin/expected/builtin_funcs/conv.out +++ b/contrib/dolphin/expected/builtin_funcs/conv.out @@ -421,15 +421,15 @@ select conv(10,1,27),conv('zzzz',0,5),conv('xy57',37,8),conv(10,-1,27),conv('xy5 (1 row) select conv(10,1/2,23),conv(10,1.333,23),conv('xy57',pi(),8),conv(10,'Asc',27),conv(321,'',8),conv(321,'张三',8); -WARNING: invalid input syntax for integer: "Asc" +WARNING: invalid input syntax for type integer: "Asc" LINE 1: ...23),conv(10,1.333,23),conv('xy57',pi(),8),conv(10,'Asc',27),... ^ CONTEXT: referenced column: conv -WARNING: invalid input syntax for integer: "" +WARNING: invalid input syntax for type integer: "" LINE 1: ...3),conv('xy57',pi(),8),conv(10,'Asc',27),conv(321,'',8),conv... ^ CONTEXT: referenced column: conv -WARNING: invalid input syntax for integer: "张三" +WARNING: invalid input syntax for type integer: "张三" LINE 1: ...pi(),8),conv(10,'Asc',27),conv(321,'',8),conv(321,'张三',8); ^ CONTEXT: referenced column: conv @@ -439,15 +439,15 @@ CONTEXT: referenced column: conv (1 row) select conv(10,27,1/2),conv(10,16,'AscY'),conv(321,7,''),conv(321,10,'张三'); -WARNING: invalid input syntax for integer: "AscY" +WARNING: invalid input syntax for type integer: "AscY" LINE 1: select conv(10,27,1/2),conv(10,16,'AscY'),conv(321,7,''),con... ^ CONTEXT: referenced column: conv -WARNING: invalid input syntax for integer: "" +WARNING: invalid input syntax for type integer: "" LINE 1: ...ect conv(10,27,1/2),conv(10,16,'AscY'),conv(321,7,''),conv(3... ^ CONTEXT: referenced column: conv -WARNING: invalid input syntax for integer: "张三" +WARNING: invalid input syntax for type integer: "张三" LINE 1: ...,1/2),conv(10,16,'AscY'),conv(321,7,''),conv(321,10,'张三'); ^ CONTEXT: referenced column: conv diff --git a/contrib/dolphin/expected/builtin_funcs/make_set.out b/contrib/dolphin/expected/builtin_funcs/make_set.out index ba4d2467d..1734f9358 100644 --- a/contrib/dolphin/expected/builtin_funcs/make_set.out +++ b/contrib/dolphin/expected/builtin_funcs/make_set.out @@ -32,7 +32,7 @@ select make_set(0, 'a', 'b', 'c'); (1 row) select make_set('', 'a', 'b', 'c'); -WARNING: invalid input syntax for bigint: "" +WARNING: invalid input syntax for type bigint: "" LINE 1: select make_set('', 'a', 'b', 'c'); ^ CONTEXT: referenced column: make_set diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index 64b89030c..f379726e2 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -408,25 +408,25 @@ INSERT INTO t_collate_expr(fint) VALUES('01100001' collate gbk_bin collate utf8m INSERT INTO t_collate_expr(fint) VALUES('01100001' collate utf8mb4_general_ci collate gbk_bin); -- do not check collate -- -- test limit select 1 from t_collate_expr limit(to_hex('11') collate "binary"); -WARNING: invalid input syntax for bigint: "b" +WARNING: invalid input syntax for type bigint: "b" ?column? ---------- (0 rows) select 1 from t_collate_expr limit(to_hex('11') collate gbk_bin); -WARNING: invalid input syntax for bigint: "b" +WARNING: invalid input syntax for type bigint: "b" ?column? ---------- (0 rows) select 1 from t_collate_expr limit(to_hex('11') collate utf8mb4_unicode_ci); -WARNING: invalid input syntax for bigint: "b" +WARNING: invalid input syntax for type bigint: "b" ?column? ---------- (0 rows) select 1 from t_collate_expr limit(to_hex('11') collate gbk_bin collate utf8mb4_unicode_ci); -- do not check collate -WARNING: invalid input syntax for bigint: "b" +WARNING: invalid input syntax for type bigint: "b" ?column? ---------- (0 rows) diff --git a/contrib/dolphin/expected/column_quote_alias.out b/contrib/dolphin/expected/column_quote_alias.out index 835ed88f8..e7f0e991b 100644 --- a/contrib/dolphin/expected/column_quote_alias.out +++ b/contrib/dolphin/expected/column_quote_alias.out @@ -143,7 +143,7 @@ CONTEXT: referenced column: smalldatetime (1 row) select int 'keyword_test' from t_alias_case0001_1; -WARNING: invalid input syntax for integer: "keyword_test" +WARNING: invalid input syntax for type integer: "keyword_test" LINE 1: select int 'keyword_test' from t_alias_case0001_1; ^ CONTEXT: referenced column: int4 @@ -153,7 +153,7 @@ CONTEXT: referenced column: int4 (1 row) select tinyint 'keyword_test' from t_alias_case0001_1; -WARNING: invalid input syntax for tinyint: "keyword_test" +WARNING: invalid input syntax for type tinyint: "keyword_test" LINE 1: select tinyint 'keyword_test' from t_alias_case0001_1; ^ CONTEXT: referenced column: int1 @@ -163,7 +163,7 @@ CONTEXT: referenced column: int1 (1 row) select smallint 'keyword_test' from t_alias_case0001_1; -WARNING: invalid input syntax for smallint: "keyword_test" +WARNING: invalid input syntax for type smallint: "keyword_test" LINE 1: select smallint 'keyword_test' from t_alias_case0001_1; ^ CONTEXT: referenced column: int2 @@ -173,7 +173,7 @@ CONTEXT: referenced column: int2 (1 row) select mediumint 'keyword_test' from t_alias_case0001_1; -WARNING: invalid input syntax for integer: "keyword_test" +WARNING: invalid input syntax for type integer: "keyword_test" LINE 1: select mediumint 'keyword_test' from t_alias_case0001_1; ^ CONTEXT: referenced column: int4 @@ -183,7 +183,7 @@ CONTEXT: referenced column: int4 (1 row) select bigint 'keyword_test' from t_alias_case0001_1; -WARNING: invalid input syntax for bigint: "keyword_test" +WARNING: invalid input syntax for type bigint: "keyword_test" LINE 1: select bigint 'keyword_test' from t_alias_case0001_1; ^ CONTEXT: referenced column: int8 @@ -215,7 +215,7 @@ CONTEXT: referenced column: float8 (1 row) select BINARY_INTEGER 'keyword_test' from t_alias_case0001_1; -WARNING: invalid input syntax for integer: "keyword_test" +WARNING: invalid input syntax for type integer: "keyword_test" LINE 1: select BINARY_INTEGER 'keyword_test' from t_alias_case0001_1... ^ CONTEXT: referenced column: int4 diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index 5aa3ca3c1..9b3eac74f 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -2136,7 +2136,7 @@ select 1 xor 0; (1 row) select '-12.3abc' xor null; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor null; ^ CONTEXT: referenced column: xor @@ -2146,7 +2146,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor -100.1; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor -100.1; ^ CONTEXT: referenced column: xor @@ -2156,7 +2156,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor 0; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor 0; ^ CONTEXT: referenced column: xor @@ -2166,7 +2166,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor 5; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor 5; ^ CONTEXT: referenced column: xor @@ -2176,7 +2176,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor 158.3; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor 158.3; ^ CONTEXT: referenced column: xor @@ -2186,7 +2186,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor -8.222e4; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor -8.222e4; ^ CONTEXT: referenced column: xor @@ -2196,7 +2196,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor true; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor true; ^ CONTEXT: referenced column: xor @@ -2206,7 +2206,7 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor false; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor false; ^ CONTEXT: referenced column: xor @@ -2216,11 +2216,11 @@ CONTEXT: referenced column: xor (1 row) select '-12.3abc' xor 'null'; -WARNING: invalid input syntax for integer: "-12.3abc" +WARNING: invalid input syntax for type integer: "-12.3abc" LINE 1: select '-12.3abc' xor 'null'; ^ CONTEXT: referenced column: xor -WARNING: invalid input syntax for integer: "null" +WARNING: invalid input syntax for type integer: "null" LINE 1: select '-12.3abc' xor 'null'; ^ CONTEXT: referenced column: xor diff --git a/contrib/dolphin/expected/insert_set.out b/contrib/dolphin/expected/insert_set.out index e391cab24..9cfd12dee 100644 --- a/contrib/dolphin/expected/insert_set.out +++ b/contrib/dolphin/expected/insert_set.out @@ -77,7 +77,7 @@ select * from test_error; (0 rows) insert into test_error set name = 1, id='@'; -ERROR: invalid input syntax for integer: "@" +ERROR: invalid input syntax for type integer: "@" LINE 1: insert into test_error set name = 1, id='@'; ^ CONTEXT: referenced column: id diff --git a/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out b/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out index 56455e9b5..629d79e64 100644 --- a/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out +++ b/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out @@ -7,7 +7,7 @@ drop table if exists t_tinyint; NOTICE: table "t_tinyint" does not exist, skipping create table t_tinyint(c tinyint); insert ignore into t_tinyint values('12a34'); -WARNING: invalid input syntax for tinyint: "12a34" +WARNING: invalid input syntax for type tinyint: "12a34" LINE 1: insert ignore into t_tinyint values('12a34'); ^ CONTEXT: referenced column: c @@ -22,17 +22,17 @@ LINE 1: insert ignore into t_tinyint values('-12555a34'); ^ CONTEXT: referenced column: c insert ignore into t_tinyint values('aaa123a34'); -WARNING: invalid input syntax for tinyint: "aaa123a34" +WARNING: invalid input syntax for type tinyint: "aaa123a34" LINE 1: insert ignore into t_tinyint values('aaa123a34'); ^ CONTEXT: referenced column: c insert ignore into t_tinyint values('abcde'); -WARNING: invalid input syntax for tinyint: "abcde" +WARNING: invalid input syntax for type tinyint: "abcde" LINE 1: insert ignore into t_tinyint values('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_tinyint values(''); -WARNING: invalid input syntax for tinyint: "" +WARNING: invalid input syntax for type tinyint: "" LINE 1: insert ignore into t_tinyint values(''); ^ CONTEXT: referenced column: c @@ -48,7 +48,7 @@ select * from t_tinyint; (6 rows) update ignore t_tinyint set c = '12a34'; -WARNING: invalid input syntax for tinyint: "12a34" +WARNING: invalid input syntax for type tinyint: "12a34" LINE 1: update ignore t_tinyint set c = '12a34'; ^ CONTEXT: referenced column: c @@ -57,7 +57,7 @@ drop table if exists t_tinyint_unsigned; NOTICE: table "t_tinyint_unsigned" does not exist, skipping create table t_tinyint_unsigned(c tinyint unsigned); insert ignore into t_tinyint_unsigned values('12a34'); -WARNING: invalid input syntax for tinyint unsigned: "12a34" +WARNING: invalid input syntax for type tinyint unsigned: "12a34" LINE 1: insert ignore into t_tinyint_unsigned values('12a34'); ^ CONTEXT: referenced column: c @@ -72,17 +72,17 @@ LINE 1: insert ignore into t_tinyint_unsigned values('-12555a34'); ^ CONTEXT: referenced column: c insert ignore into t_tinyint_unsigned values('aaa123a34'); -WARNING: invalid input syntax for tinyint unsigned: "aaa123a34" +WARNING: invalid input syntax for type tinyint unsigned: "aaa123a34" LINE 1: insert ignore into t_tinyint_unsigned values('aaa123a34'); ^ CONTEXT: referenced column: c insert ignore into t_tinyint_unsigned values('abcde'); -WARNING: invalid input syntax for tinyint unsigned: "abcde" +WARNING: invalid input syntax for type tinyint unsigned: "abcde" LINE 1: insert ignore into t_tinyint_unsigned values('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_tinyint_unsigned values(''); -WARNING: invalid input syntax for tinyint unsigned: "" +WARNING: invalid input syntax for type tinyint unsigned: "" LINE 1: insert ignore into t_tinyint_unsigned values(''); ^ CONTEXT: referenced column: c @@ -98,7 +98,7 @@ select * from t_tinyint_unsigned; (6 rows) update ignore t_tinyint_unsigned set c = '12a34'; -WARNING: invalid input syntax for tinyint unsigned: "12a34" +WARNING: invalid input syntax for type tinyint unsigned: "12a34" LINE 1: update ignore t_tinyint_unsigned set c = '12a34'; ^ CONTEXT: referenced column: c @@ -107,7 +107,7 @@ drop table if exists t_smallint; NOTICE: table "t_smallint" does not exist, skipping create table t_smallint(c smallint); insert ignore into t_smallint values ('12a34'); -WARNING: invalid input syntax for smallint: "12a34" +WARNING: invalid input syntax for type smallint: "12a34" LINE 1: insert ignore into t_smallint values ('12a34'); ^ CONTEXT: referenced column: c @@ -122,17 +122,17 @@ LINE 1: insert ignore into t_smallint values ('-123333333333333a34')... ^ CONTEXT: referenced column: c insert ignore into t_smallint values ('aaa1234a5'); -WARNING: invalid input syntax for smallint: "aaa1234a5" +WARNING: invalid input syntax for type smallint: "aaa1234a5" LINE 1: insert ignore into t_smallint values ('aaa1234a5'); ^ CONTEXT: referenced column: c insert ignore into t_smallint values ('abcde'); -WARNING: invalid input syntax for smallint: "abcde" +WARNING: invalid input syntax for type smallint: "abcde" LINE 1: insert ignore into t_smallint values ('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_smallint values (''); -WARNING: invalid input syntax for smallint: "" +WARNING: invalid input syntax for type smallint: "" LINE 1: insert ignore into t_smallint values (''); ^ CONTEXT: referenced column: c @@ -148,7 +148,7 @@ select * from t_smallint; (6 rows) update ignore t_smallint set c = '12a34'; -WARNING: invalid input syntax for smallint: "12a34" +WARNING: invalid input syntax for type smallint: "12a34" LINE 1: update ignore t_smallint set c = '12a34'; ^ CONTEXT: referenced column: c @@ -157,7 +157,7 @@ drop table if exists t_smallint_unsigned; NOTICE: table "t_smallint_unsigned" does not exist, skipping create table t_smallint_unsigned(c smallint unsigned); insert ignore into t_smallint_unsigned values ('12a34'); -WARNING: invalid input syntax for smallint unsigned: "12a34" +WARNING: invalid input syntax for type smallint unsigned: "12a34" LINE 1: insert ignore into t_smallint_unsigned values ('12a34'); ^ CONTEXT: referenced column: c @@ -172,17 +172,17 @@ LINE 1: insert ignore into t_smallint_unsigned values ('-12333333333... ^ CONTEXT: referenced column: c insert ignore into t_smallint_unsigned values ('aaa1234a5'); -WARNING: invalid input syntax for smallint unsigned: "aaa1234a5" +WARNING: invalid input syntax for type smallint unsigned: "aaa1234a5" LINE 1: insert ignore into t_smallint_unsigned values ('aaa1234a5'); ^ CONTEXT: referenced column: c insert ignore into t_smallint_unsigned values ('abcde'); -WARNING: invalid input syntax for smallint unsigned: "abcde" +WARNING: invalid input syntax for type smallint unsigned: "abcde" LINE 1: insert ignore into t_smallint_unsigned values ('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_smallint_unsigned values (''); -WARNING: invalid input syntax for smallint unsigned: "" +WARNING: invalid input syntax for type smallint unsigned: "" LINE 1: insert ignore into t_smallint_unsigned values (''); ^ CONTEXT: referenced column: c @@ -198,7 +198,7 @@ select * from t_smallint_unsigned; (6 rows) update ignore t_smallint_unsigned set c = '12a34'; -WARNING: invalid input syntax for smallint unsigned: "12a34" +WARNING: invalid input syntax for type smallint unsigned: "12a34" LINE 1: update ignore t_smallint_unsigned set c = '12a34'; ^ CONTEXT: referenced column: c @@ -207,7 +207,7 @@ drop table if exists t_int; NOTICE: table "t_int" does not exist, skipping create table t_int(c int); insert ignore into t_int values ('12a34'); -WARNING: invalid input syntax for integer: "12a34" +WARNING: invalid input syntax for type integer: "12a34" LINE 1: insert ignore into t_int values ('12a34'); ^ CONTEXT: referenced column: c @@ -222,17 +222,17 @@ LINE 1: insert ignore into t_int values ('-1233333333333333333333333... ^ CONTEXT: referenced column: c insert ignore into t_int values ('aaa123a45'); -WARNING: invalid input syntax for integer: "aaa123a45" +WARNING: invalid input syntax for type integer: "aaa123a45" LINE 1: insert ignore into t_int values ('aaa123a45'); ^ CONTEXT: referenced column: c insert ignore into t_int values ('abcde'); -WARNING: invalid input syntax for integer: "abcde" +WARNING: invalid input syntax for type integer: "abcde" LINE 1: insert ignore into t_int values ('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_int values (''); -WARNING: invalid input syntax for integer: "" +WARNING: invalid input syntax for type integer: "" LINE 1: insert ignore into t_int values (''); ^ CONTEXT: referenced column: c @@ -248,7 +248,7 @@ select * from t_int; (6 rows) update ignore t_int set c = '12a34'; -WARNING: invalid input syntax for integer: "12a34" +WARNING: invalid input syntax for type integer: "12a34" LINE 1: update ignore t_int set c = '12a34'; ^ CONTEXT: referenced column: c @@ -257,7 +257,7 @@ drop table if exists t_int_unsigned; NOTICE: table "t_int_unsigned" does not exist, skipping create table t_int_unsigned(c int unsigned); insert ignore into t_int_unsigned values ('12a34'); -WARNING: invalid input syntax for int unsigned: "12a34" +WARNING: invalid input syntax for type int unsigned: "12a34" LINE 1: insert ignore into t_int_unsigned values ('12a34'); ^ CONTEXT: referenced column: c @@ -272,17 +272,17 @@ LINE 1: insert ignore into t_int_unsigned values ('-1233333333333333... ^ CONTEXT: referenced column: c insert ignore into t_int_unsigned values ('aaa123a45'); -WARNING: invalid input syntax for int unsigned: "aaa123a45" +WARNING: invalid input syntax for type int unsigned: "aaa123a45" LINE 1: insert ignore into t_int_unsigned values ('aaa123a45'); ^ CONTEXT: referenced column: c insert ignore into t_int_unsigned values ('abcde'); -WARNING: invalid input syntax for int unsigned: "abcde" +WARNING: invalid input syntax for type int unsigned: "abcde" LINE 1: insert ignore into t_int_unsigned values ('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_int_unsigned values (''); -WARNING: invalid input syntax for int unsigned: "" +WARNING: invalid input syntax for type int unsigned: "" LINE 1: insert ignore into t_int_unsigned values (''); ^ CONTEXT: referenced column: c @@ -298,7 +298,7 @@ select * from t_int_unsigned; (6 rows) update ignore t_int_unsigned set c = '12a34'; -WARNING: invalid input syntax for int unsigned: "12a34" +WARNING: invalid input syntax for type int unsigned: "12a34" LINE 1: update ignore t_int_unsigned set c = '12a34'; ^ CONTEXT: referenced column: c @@ -307,7 +307,7 @@ drop table if exists t_bigint; NOTICE: table "t_bigint" does not exist, skipping create table t_bigint(c bigint); insert ignore into t_bigint values ('12a34'); -WARNING: invalid input syntax for bigint: "12a34" +WARNING: invalid input syntax for type bigint: "12a34" LINE 1: insert ignore into t_bigint values ('12a34'); ^ CONTEXT: referenced column: c @@ -322,17 +322,17 @@ LINE 1: insert ignore into t_bigint values ('-1233333333333333333333... ^ CONTEXT: referenced column: c insert ignore into t_bigint values ('aaa123a45'); -WARNING: invalid input syntax for bigint: "aaa123a45" +WARNING: invalid input syntax for type bigint: "aaa123a45" LINE 1: insert ignore into t_bigint values ('aaa123a45'); ^ CONTEXT: referenced column: c insert ignore into t_bigint values ('abcde'); -WARNING: invalid input syntax for bigint: "abcde" +WARNING: invalid input syntax for type bigint: "abcde" LINE 1: insert ignore into t_bigint values ('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_bigint values (''); -WARNING: invalid input syntax for bigint: "" +WARNING: invalid input syntax for type bigint: "" LINE 1: insert ignore into t_bigint values (''); ^ CONTEXT: referenced column: c @@ -348,7 +348,7 @@ select * from t_bigint; (6 rows) update ignore t_bigint set c = '12a34'; -WARNING: invalid input syntax for bigint: "12a34" +WARNING: invalid input syntax for type bigint: "12a34" LINE 1: update ignore t_bigint set c = '12a34'; ^ CONTEXT: referenced column: c @@ -357,7 +357,7 @@ drop table if exists t_bigint_unsigned; NOTICE: table "t_bigint_unsigned" does not exist, skipping create table t_bigint_unsigned(c bigint unsigned); insert ignore into t_bigint_unsigned values ('12a34'); -WARNING: invalid input syntax for bigint unsigned: "12a34" +WARNING: invalid input syntax for type bigint unsigned: "12a34" LINE 1: insert ignore into t_bigint_unsigned values ('12a34'); ^ CONTEXT: referenced column: c @@ -372,17 +372,17 @@ LINE 1: insert ignore into t_bigint_unsigned values ('-1233333333333... ^ CONTEXT: referenced column: c insert ignore into t_bigint_unsigned values ('aaa123a45'); -WARNING: invalid input syntax for bigint unsigned: "aaa123a45" +WARNING: invalid input syntax for type bigint unsigned: "aaa123a45" LINE 1: insert ignore into t_bigint_unsigned values ('aaa123a45'); ^ CONTEXT: referenced column: c insert ignore into t_bigint_unsigned values ('abcde'); -WARNING: invalid input syntax for bigint unsigned: "abcde" +WARNING: invalid input syntax for type bigint unsigned: "abcde" LINE 1: insert ignore into t_bigint_unsigned values ('abcde'); ^ CONTEXT: referenced column: c insert ignore into t_bigint_unsigned values (''); -WARNING: invalid input syntax for bigint unsigned: "" +WARNING: invalid input syntax for type bigint unsigned: "" LINE 1: insert ignore into t_bigint_unsigned values (''); ^ CONTEXT: referenced column: c @@ -398,7 +398,7 @@ select * from t_bigint_unsigned; (6 rows) update ignore t_bigint_unsigned set c = '12a34'; -WARNING: invalid input syntax for bigint unsigned: "12a34" +WARNING: invalid input syntax for type bigint unsigned: "12a34" LINE 1: update ignore t_bigint_unsigned set c = '12a34'; ^ CONTEXT: referenced column: c diff --git a/contrib/dolphin/expected/mysqlmode_strict.out b/contrib/dolphin/expected/mysqlmode_strict.out index 202640fe1..083eb967f 100644 --- a/contrib/dolphin/expected/mysqlmode_strict.out +++ b/contrib/dolphin/expected/mysqlmode_strict.out @@ -19,7 +19,7 @@ insert into test_tint(a) values(-355); WARNING: tinyint out of range CONTEXT: referenced column: a insert into test_tint(a) values('a888'); -WARNING: invalid input syntax for tinyint: "a888" +WARNING: invalid input syntax for type tinyint: "a888" LINE 1: insert into test_tint(a) values('a888'); ^ CONTEXT: referenced column: a @@ -29,7 +29,7 @@ LINE 1: insert into test_tint(a) values('888aa'); ^ CONTEXT: referenced column: a insert into test_tint(a) values('-88aa'); -WARNING: invalid input syntax for tinyint: "-88aa" +WARNING: invalid input syntax for type tinyint: "-88aa" LINE 1: insert into test_tint(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -74,7 +74,7 @@ insert into test_sint(a) values(-3553434343434343432434); WARNING: smallint out of range CONTEXT: referenced column: a insert into test_sint(a) values('a888'); -WARNING: invalid input syntax for smallint: "a888" +WARNING: invalid input syntax for type smallint: "a888" LINE 1: insert into test_sint(a) values('a888'); ^ CONTEXT: referenced column: a @@ -84,7 +84,7 @@ LINE 1: insert into test_sint(a) values('88123423433834343423434aa')... ^ CONTEXT: referenced column: a insert into test_sint(a) values('-88aa'); -WARNING: invalid input syntax for smallint: "-88aa" +WARNING: invalid input syntax for type smallint: "-88aa" LINE 1: insert into test_sint(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -129,7 +129,7 @@ insert into test_int(a) values(-3553434343434343432434); WARNING: integer out of range CONTEXT: referenced column: a insert into test_int(a) values('a888'); -WARNING: invalid input syntax for integer: "a888" +WARNING: invalid input syntax for type integer: "a888" LINE 1: insert into test_int(a) values('a888'); ^ CONTEXT: referenced column: a @@ -139,7 +139,7 @@ LINE 1: insert into test_int(a) values('88123423433834343423434aa'); ^ CONTEXT: referenced column: a insert into test_int(a) values('-88aa'); -WARNING: invalid input syntax for integer: "-88aa" +WARNING: invalid input syntax for type integer: "-88aa" LINE 1: insert into test_int(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -184,7 +184,7 @@ insert into test_bint(a) values(-3553434343434343432434); WARNING: bigint out of range CONTEXT: referenced column: a insert into test_bint(a) values('a888'); -WARNING: invalid input syntax for bigint: "a888" +WARNING: invalid input syntax for type bigint: "a888" LINE 1: insert into test_bint(a) values('a888'); ^ CONTEXT: referenced column: a @@ -194,7 +194,7 @@ LINE 1: insert into test_bint(a) values('88123423433834343423434aa')... ^ CONTEXT: referenced column: a insert into test_bint(a) values('-88aa'); -WARNING: invalid input syntax for bigint: "-88aa" +WARNING: invalid input syntax for type bigint: "-88aa" LINE 1: insert into test_bint(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -410,7 +410,7 @@ select * from test_tint1; delete from test_tint1; insert into test_tint1 select * from test_varchar7; -WARNING: invalid input syntax for tinyint: "adsfsda" +WARNING: invalid input syntax for type tinyint: "adsfsda" CONTEXT: referenced column: a WARNING: value "-123499" is out of range for type tinyint CONTEXT: referenced column: a @@ -429,9 +429,9 @@ select * from test_tint1; delete from test_tint1; insert into test_tint1 select * from test_char; -WARNING: invalid input syntax for tinyint: "a" +WARNING: invalid input syntax for type tinyint: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "-" +WARNING: invalid input syntax for type tinyint: "-" CONTEXT: referenced column: a select * from test_tint1; a @@ -557,7 +557,7 @@ select * from test_sint1; delete from test_sint1; insert into test_sint1 select * from test_varchar7; -WARNING: invalid input syntax for smallint: "adsfsda" +WARNING: invalid input syntax for type smallint: "adsfsda" CONTEXT: referenced column: a WARNING: value "-123499" is out of range for type smallint CONTEXT: referenced column: a @@ -572,9 +572,9 @@ select * from test_sint1; delete from test_sint1; insert into test_sint1 select * from test_char; -WARNING: invalid input syntax for smallint: "a" +WARNING: invalid input syntax for type smallint: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for smallint: "-" +WARNING: invalid input syntax for type smallint: "-" CONTEXT: referenced column: a select * from test_sint1; a @@ -686,7 +686,7 @@ select * from test_int1; delete from test_int1; insert into test_int1 select * from test_varchar7; -WARNING: invalid input syntax for integer: "adsfsda" +WARNING: invalid input syntax for type integer: "adsfsda" CONTEXT: referenced column: a select * from test_int1; a @@ -699,9 +699,9 @@ select * from test_int1; delete from test_int1; insert into test_int1 select * from test_char; -WARNING: invalid input syntax for integer: "a" +WARNING: invalid input syntax for type integer: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for integer: "-" +WARNING: invalid input syntax for type integer: "-" CONTEXT: referenced column: a select * from test_int1; a @@ -799,7 +799,7 @@ select * from test_bint1; delete from test_bint1; insert into test_bint1 select * from test_varchar7; -WARNING: invalid input syntax for bigint: "adsfsda" +WARNING: invalid input syntax for type bigint: "adsfsda" CONTEXT: referenced column: a select * from test_bint1; a @@ -812,9 +812,9 @@ select * from test_bint1; delete from test_bint1; insert into test_bint1 select * from test_char; -WARNING: invalid input syntax for bigint: "a" +WARNING: invalid input syntax for type bigint: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for bigint: "-" +WARNING: invalid input syntax for type bigint: "-" CONTEXT: referenced column: a select * from test_bint1; a @@ -1448,7 +1448,7 @@ insert into test_tint_strict(a) values(-355); ERROR: tinyint out of range CONTEXT: referenced column: a insert into test_tint_strict(a) values('a888'); -ERROR: invalid input syntax for tinyint: "a888" +ERROR: invalid input syntax for type tinyint: "a888" LINE 1: insert into test_tint_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1458,7 +1458,7 @@ LINE 1: insert into test_tint_strict(a) values('888aa'); ^ CONTEXT: referenced column: a insert into test_tint_strict(a) values('-88aa'); -ERROR: invalid input syntax for tinyint: "-88aa" +ERROR: invalid input syntax for type tinyint: "-88aa" LINE 1: insert into test_tint_strict(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -1495,7 +1495,7 @@ insert into test_sint_strict(a) values(-3553434343434343432434); ERROR: smallint out of range CONTEXT: referenced column: a insert into test_sint_strict(a) values('a888'); -ERROR: invalid input syntax for smallint: "a888" +ERROR: invalid input syntax for type smallint: "a888" LINE 1: insert into test_sint_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1505,7 +1505,7 @@ LINE 1: insert into test_sint_strict(a) values('88123423433834343423... ^ CONTEXT: referenced column: a insert into test_sint_strict(a) values('-88aa'); -ERROR: invalid input syntax for smallint: "-88aa" +ERROR: invalid input syntax for type smallint: "-88aa" LINE 1: insert into test_sint_strict(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -1542,7 +1542,7 @@ insert into test_int_strict(a) values(-3553434343434343432434); ERROR: integer out of range CONTEXT: referenced column: a insert into test_int_strict(a) values('a888'); -ERROR: invalid input syntax for integer: "a888" +ERROR: invalid input syntax for type integer: "a888" LINE 1: insert into test_int_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1552,7 +1552,7 @@ LINE 1: insert into test_int_strict(a) values('881234234338343434234... ^ CONTEXT: referenced column: a insert into test_int_strict(a) values('-88aa'); -ERROR: invalid input syntax for integer: "-88aa" +ERROR: invalid input syntax for type integer: "-88aa" LINE 1: insert into test_int_strict(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -1589,7 +1589,7 @@ insert into test_bint_strict(a) values(-3553434343434343432434); ERROR: bigint out of range CONTEXT: referenced column: a insert into test_bint_strict(a) values('a888'); -ERROR: invalid input syntax for bigint: "a888" +ERROR: invalid input syntax for type bigint: "a888" LINE 1: insert into test_bint_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1599,7 +1599,7 @@ LINE 1: insert into test_bint_strict(a) values('88123423433834343423... ^ CONTEXT: referenced column: a insert into test_bint_strict(a) values('-88aa'); -ERROR: invalid input syntax for bigint: "-88aa" +ERROR: invalid input syntax for type bigint: "-88aa" LINE 1: insert into test_bint_strict(a) values('-88aa'); ^ CONTEXT: referenced column: a @@ -2251,46 +2251,46 @@ DETAIL: Failing row contains (null, null). --test some bug fix create table test_space_to_int1(a tinyint); insert into test_space_to_int1 values('34 55'); -ERROR: invalid input syntax for tinyint: "34 55" +ERROR: invalid input syntax for type tinyint: "34 55" LINE 1: insert into test_space_to_int1 values('34 55'); ^ CONTEXT: referenced column: a create table test_space_to_int2(a smallint); insert into test_space_to_int2 values('34 55'); -ERROR: invalid input syntax for smallint: "34 55" +ERROR: invalid input syntax for type smallint: "34 55" LINE 1: insert into test_space_to_int2 values('34 55'); ^ CONTEXT: referenced column: a create table test_space_to_int4(a int); insert into test_space_to_int4 values('34 55'); -ERROR: invalid input syntax for integer: "34 55" +ERROR: invalid input syntax for type integer: "34 55" LINE 1: insert into test_space_to_int4 values('34 55'); ^ CONTEXT: referenced column: a create table test_space_to_int8(a bigint); insert into test_space_to_int8 values('34 55'); -ERROR: invalid input syntax for bigint: "34 55" +ERROR: invalid input syntax for type bigint: "34 55" LINE 1: insert into test_space_to_int8 values('34 55'); ^ CONTEXT: referenced column: a set dolphin.sql_mode = ''; insert into test_space_to_int1 values('34 55'); -WARNING: invalid input syntax for tinyint: "34 55" +WARNING: invalid input syntax for type tinyint: "34 55" LINE 1: insert into test_space_to_int1 values('34 55'); ^ CONTEXT: referenced column: a insert into test_space_to_int2 values('34 55'); -WARNING: invalid input syntax for smallint: "34 55" +WARNING: invalid input syntax for type smallint: "34 55" LINE 1: insert into test_space_to_int2 values('34 55'); ^ CONTEXT: referenced column: a insert into test_space_to_int4 values('34 55'); -WARNING: invalid input syntax for integer: "34 55" +WARNING: invalid input syntax for type integer: "34 55" LINE 1: insert into test_space_to_int4 values('34 55'); ^ CONTEXT: referenced column: a insert into test_space_to_int8 values('34 55'); -WARNING: invalid input syntax for bigint: "34 55" +WARNING: invalid input syntax for type bigint: "34 55" LINE 1: insert into test_space_to_int8 values('34 55'); ^ CONTEXT: referenced column: a diff --git a/contrib/dolphin/expected/mysqlmode_strict2.out b/contrib/dolphin/expected/mysqlmode_strict2.out index 661a0c28d..125bdf01a 100644 --- a/contrib/dolphin/expected/mysqlmode_strict2.out +++ b/contrib/dolphin/expected/mysqlmode_strict2.out @@ -21,7 +21,7 @@ insert into test_tint(a) values(-355); WARNING: tinyint unsigned out of range CONTEXT: referenced column: a insert into test_tint(a) values('a888'); -WARNING: invalid input syntax for tinyint unsigned: "a888" +WARNING: invalid input syntax for type tinyint unsigned: "a888" LINE 1: insert into test_tint(a) values('a888'); ^ CONTEXT: referenced column: a @@ -80,7 +80,7 @@ insert into test_sint(a) values(-3553434343434343432434); WARNING: smallint unsigned out of range CONTEXT: referenced column: a insert into test_sint(a) values('a888'); -WARNING: invalid input syntax for smallint unsigned: "a888" +WARNING: invalid input syntax for type smallint unsigned: "a888" LINE 1: insert into test_sint(a) values('a888'); ^ CONTEXT: referenced column: a @@ -139,7 +139,7 @@ insert into test_int(a) values(-3553434343434343432434); WARNING: unsigned int out of range CONTEXT: referenced column: a insert into test_int(a) values('a888'); -WARNING: invalid input syntax for int unsigned: "a888" +WARNING: invalid input syntax for type int unsigned: "a888" LINE 1: insert into test_int(a) values('a888'); ^ CONTEXT: referenced column: a @@ -198,7 +198,7 @@ insert into test_bint(a) values(-3553434343434343432434); WARNING: bigint unsigned out of range CONTEXT: referenced column: a insert into test_bint(a) values('a888'); -WARNING: invalid input syntax for bigint unsigned: "a888" +WARNING: invalid input syntax for type bigint unsigned: "a888" LINE 1: insert into test_bint(a) values('a888'); ^ CONTEXT: referenced column: a @@ -408,7 +408,7 @@ select * from test_tint1; delete from test_tint1; insert into test_tint1 select * from test_varchar7; -WARNING: invalid input syntax for tinyint unsigned: "adsfsda" +WARNING: invalid input syntax for type tinyint unsigned: "adsfsda" CONTEXT: referenced column: a WARNING: value "-123499" is out of range for type tinyint unsigned CONTEXT: referenced column: a @@ -427,9 +427,9 @@ select * from test_tint1; delete from test_tint1; insert into test_tint1 select * from test_char; -WARNING: invalid input syntax for tinyint unsigned: "a" +WARNING: invalid input syntax for type tinyint unsigned: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint unsigned: "-" +WARNING: invalid input syntax for type tinyint unsigned: "-" CONTEXT: referenced column: a select * from test_tint1; a @@ -543,7 +543,7 @@ select * from test_sint1; delete from test_sint1; insert into test_sint1 select * from test_varchar7; -WARNING: invalid input syntax for smallint unsigned: "adsfsda" +WARNING: invalid input syntax for type smallint unsigned: "adsfsda" CONTEXT: referenced column: a WARNING: value "-123499" is out of range for type smallint unsigned CONTEXT: referenced column: a @@ -558,9 +558,9 @@ select * from test_sint1; delete from test_sint1; insert into test_sint1 select * from test_char; -WARNING: invalid input syntax for smallint unsigned: "a" +WARNING: invalid input syntax for type smallint unsigned: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for smallint unsigned: "-" +WARNING: invalid input syntax for type smallint unsigned: "-" CONTEXT: referenced column: a select * from test_sint1; a @@ -666,7 +666,7 @@ select * from test_int1; delete from test_int1; insert into test_int1 select * from test_varchar7; -WARNING: invalid input syntax for int unsigned: "adsfsda" +WARNING: invalid input syntax for type int unsigned: "adsfsda" CONTEXT: referenced column: a WARNING: value "-123499" is out of range for type int unsigned CONTEXT: referenced column: a @@ -681,9 +681,9 @@ select * from test_int1; delete from test_int1; insert into test_int1 select * from test_char; -WARNING: invalid input syntax for int unsigned: "a" +WARNING: invalid input syntax for type int unsigned: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for int unsigned: "-" +WARNING: invalid input syntax for type int unsigned: "-" CONTEXT: referenced column: a select * from test_int1; a @@ -781,7 +781,7 @@ select * from test_bint1; delete from test_bint1; insert into test_bint1 select * from test_varchar7; -WARNING: invalid input syntax for bigint unsigned: "adsfsda" +WARNING: invalid input syntax for type bigint unsigned: "adsfsda" CONTEXT: referenced column: a WARNING: value "-123499" is out of range for type bigint unsigned CONTEXT: referenced column: a @@ -796,9 +796,9 @@ select * from test_bint1; delete from test_bint1; insert into test_bint1 select * from test_char; -WARNING: invalid input syntax for bigint unsigned: "a" +WARNING: invalid input syntax for type bigint unsigned: "a" CONTEXT: referenced column: a -WARNING: invalid input syntax for bigint unsigned: "-" +WARNING: invalid input syntax for type bigint unsigned: "-" CONTEXT: referenced column: a select * from test_bint1; a @@ -1434,7 +1434,7 @@ insert into test_tint_strict(a) values(-355); ERROR: tinyint unsigned out of range CONTEXT: referenced column: a insert into test_tint_strict(a) values('a888'); -ERROR: invalid input syntax for tinyint unsigned: "a888" +ERROR: invalid input syntax for type tinyint unsigned: "a888" LINE 1: insert into test_tint_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1483,7 +1483,7 @@ insert into test_sint_strict(a) values(-3553434343434343432434); ERROR: smallint unsigned out of range CONTEXT: referenced column: a insert into test_sint_strict(a) values('a888'); -ERROR: invalid input syntax for smallint unsigned: "a888" +ERROR: invalid input syntax for type smallint unsigned: "a888" LINE 1: insert into test_sint_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1532,7 +1532,7 @@ insert into test_int_strict(a) values(-3553434343434343432434); ERROR: unsigned int out of range CONTEXT: referenced column: a insert into test_int_strict(a) values('a888'); -ERROR: invalid input syntax for int unsigned: "a888" +ERROR: invalid input syntax for type int unsigned: "a888" LINE 1: insert into test_int_strict(a) values('a888'); ^ CONTEXT: referenced column: a @@ -1581,7 +1581,7 @@ insert into test_bint_strict(a) values(-3553434343434343432434); ERROR: bigint unsigned out of range CONTEXT: referenced column: a insert into test_bint_strict(a) values('a888'); -ERROR: invalid input syntax for bigint unsigned: "a888" +ERROR: invalid input syntax for type bigint unsigned: "a888" LINE 1: insert into test_bint_strict(a) values('a888'); ^ CONTEXT: referenced column: a diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index 8d099b98e..2709528ce 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -3329,22 +3329,22 @@ CONTEXT: referenced column: char^json WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: char^json UPDATE test_json_type, test_json_table SET test_json_type.`char>>json` = test_json_table.`char` >> test_json_table.`json`; -WARNING: invalid input syntax for bigint: "1.23a" +WARNING: invalid input syntax for type bigint: "1.23a" CONTEXT: referenced column: char>>json WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: char>>json UPDATE test_json_type, test_json_table SET test_json_type.`char<>json` = test_json_table.`varchar` >> test_json_table.`json`; -WARNING: invalid input syntax for bigint: "1.23a" +WARNING: invalid input syntax for type bigint: "1.23a" CONTEXT: referenced column: varchar>>json WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: varchar>>json UPDATE test_json_type, test_json_table SET test_json_type.`varchar<>json` = test_json_table.`text` >> test_json_table.`json`; -WARNING: invalid input syntax for bigint: "1.23a" +WARNING: invalid input syntax for type bigint: "1.23a" CONTEXT: referenced column: text>>json WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: text>>json UPDATE test_json_type, test_json_table SET test_json_type.`text<>char` = test_json_table.`json` >> test_json_table.`char`; WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' CONTEXT: referenced column: json>>char -WARNING: invalid input syntax for integer: "1.23a" +WARNING: invalid input syntax for type integer: "1.23a" CONTEXT: referenced column: json>>char UPDATE test_json_type, test_json_table SET test_json_type.`json<>varchar` = test_json_table.`json` >> test_json_table.`varchar`; WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' CONTEXT: referenced column: json>>varchar -WARNING: invalid input syntax for integer: "1.23a" +WARNING: invalid input syntax for type integer: "1.23a" CONTEXT: referenced column: json>>varchar UPDATE test_json_type, test_json_table SET test_json_type.`json<>text` = test_json_table.`json` >> test_json_table.`text`; WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' CONTEXT: referenced column: json>>text -WARNING: invalid input syntax for integer: "1.23a" +WARNING: invalid input syntax for type integer: "1.23a" CONTEXT: referenced column: json>>text UPDATE test_json_type, test_json_table SET test_json_type.`json< 'abc' ^ QUERY: SELECT b > 'abc' @@ -340,7 +340,7 @@ referenced column: test1 insert into t_text values(test1(1))// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for integer: "abc" +WARNING: invalid input syntax for type integer: "abc" LINE 1: SELECT b > 'abc' ^ QUERY: SELECT b > 'abc' @@ -371,7 +371,7 @@ return c; end; // select test1(1)// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: SELECT __case__variable_11__ IN ('abc') ^ QUERY: SELECT __case__variable_11__ IN ('abc') @@ -385,7 +385,7 @@ referenced column: test1 insert into t_text values(test1(1))// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: SELECT __case__variable_11__ IN ('abc') ^ QUERY: SELECT __case__variable_11__ IN ('abc') @@ -416,7 +416,7 @@ return c; end; // select test1(1)// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: SELECT a >= 'abc' ^ QUERY: SELECT a >= 'abc' @@ -430,7 +430,7 @@ referenced column: test1 insert into t_text values(test1(1))// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: SELECT a >= 'abc' ^ QUERY: SELECT a >= 'abc' @@ -461,10 +461,10 @@ return c; end; // select test1(1)// -WARNING: invalid input syntax for integer: "abc" +WARNING: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1(tinyint) line 4 at FOR with integer loop variable referenced column: test1 -WARNING: invalid input syntax for integer: "1abc" +WARNING: invalid input syntax for type integer: "1abc" CONTEXT: PL/pgSQL function test1(tinyint) line 4 at FOR with integer loop variable referenced column: test1 test1 @@ -475,7 +475,7 @@ referenced column: test1 insert into t_text values(test1(1))// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1(tinyint) line 4 at FOR with integer loop variable referenced column: a select * from t_text order by 1// @@ -505,7 +505,7 @@ return c; end; // select test1(1)// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_int1 where a='abc' ^ QUERY: select a from t_int1 where a='abc' @@ -519,7 +519,7 @@ referenced column: test1 insert into t_text values(test1(1))// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_int1 where a='abc' ^ QUERY: select a from t_int1 where a='abc' @@ -549,10 +549,10 @@ return c; end; // select test1(1)// -WARNING: invalid input syntax for integer: "abc" +WARNING: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1(tinyint) line 3 at FOR with integer loop variable referenced column: test1 -WARNING: invalid input syntax for integer: "1abc" +WARNING: invalid input syntax for type integer: "1abc" CONTEXT: PL/pgSQL function test1(tinyint) line 3 at FOR with integer loop variable referenced column: test1 test1 @@ -563,7 +563,7 @@ referenced column: test1 insert into t_text values(test1(1))// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1(tinyint) line 3 at FOR with integer loop variable referenced column: a select * from t_text order by 1// @@ -592,7 +592,7 @@ return c; end; // select test1(1)// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc' ^ QUERY: select a from t_uint1 where a='abc' @@ -606,7 +606,7 @@ referenced column: test1 insert into t_text values(test1(1))// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc' ^ QUERY: select a from t_uint1 where a='abc' @@ -642,10 +642,10 @@ return c; end; // select test1()// -WARNING: invalid input syntax for integer: "a" +WARNING: invalid input syntax for type integer: "a" CONTEXT: PL/pgSQL function test1() line 6 at FOREACH over array referenced column: test1 -WARNING: invalid input syntax for integer: "b" +WARNING: invalid input syntax for type integer: "b" CONTEXT: PL/pgSQL function test1() line 6 at FOREACH over array referenced column: test1 test1 @@ -656,7 +656,7 @@ referenced column: test1 insert into t_text values(test1())// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -ERROR: invalid input syntax for integer: "a" +ERROR: invalid input syntax for type integer: "a" CONTEXT: PL/pgSQL function test1() line 6 at FOREACH over array referenced column: a select * from t_text order by 1// @@ -682,13 +682,13 @@ return 'abc'; -- wrong value in return case, report error end; // select test1()// -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1() while casting return value to function's return type referenced column: test1 insert into t_text values(test1())// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1() while casting return value to function's return type referenced column: a select * from t_text order by 1// @@ -716,13 +716,13 @@ return; end; // select test1()// -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1() line 2 at RETURN NEXT referenced column: test1 insert into t_text values(test1())// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1() line 2 at RETURN NEXT referenced column: a select * from t_text order by 1// @@ -748,7 +748,7 @@ return; end; // select test1()// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: (select a from t_uint1 where a='abc') ^ QUERY: (select a from t_uint1 where a='abc') @@ -762,7 +762,7 @@ referenced column: test1 insert into t_text values(test1())// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: (select a from t_uint1 where a='abc') ^ QUERY: (select a from t_uint1 where a='abc') @@ -793,7 +793,7 @@ return res; end; // select test1()// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc' ^ QUERY: select a from t_uint1 where a='abc' @@ -807,7 +807,7 @@ referenced column: test1 insert into t_text values(test1())// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc' ^ QUERY: select a from t_uint1 where a='abc' @@ -839,7 +839,7 @@ return res; end; // select test1()// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc' ^ QUERY: select a from t_uint1 where a='abc' @@ -853,7 +853,7 @@ referenced column: test1 insert into t_text values(test1())// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc' ^ QUERY: select a from t_uint1 where a='abc' @@ -890,7 +890,7 @@ return res; end; // select test1()// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc'; ^ QUERY: select a from t_uint1 where a='abc'; @@ -904,7 +904,7 @@ referenced column: test1 insert into t_text values(test1())// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a from t_uint1 where a='abc'; ^ QUERY: select a from t_uint1 where a='abc'; @@ -942,7 +942,7 @@ return res; end; // select test1()// -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: SELECT a FROM t_uint1 where a = 'abc' ^ QUERY: SELECT a FROM t_uint1 where a = 'abc' @@ -956,7 +956,7 @@ referenced column: test1 insert into t_text values(test1())// -- warning WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: SELECT a FROM t_uint1 where a = 'abc' ^ QUERY: SELECT a FROM t_uint1 where a = 'abc' @@ -995,13 +995,13 @@ return res; end; // select test1()// -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1() line 5 at FETCH referenced column: test1 insert into t_text values(test1())// WARNING: re-compile function 'test1' due to strict mode. CONTEXT: referenced column: a -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1() line 5 at FETCH referenced column: a select * from t_text order by 1// @@ -1027,15 +1027,15 @@ select * from t_text order by 1// drop procedure test1// delimiter ; select a into @a from t_uint1 where a='abc'; -- warning -WARNING: invalid input syntax for tinyint: "abc" +WARNING: invalid input syntax for type tinyint: "abc" LINE 1: select a into @a from t_uint1 where a='abc'; ^ select a into outfile '@abs_srcdir@/data/strict_mode_test.data' from t_uint1 where a='abc'; -- warning -WARNING: invalid input syntax for tinyint: "abc" ---?.* ---?.* +WARNING: invalid input syntax for type tinyint: "abc" +LINE 1: ...phin/data/strict_mode_test.data' from t_uint1 where a='abc'; + ^ select * into new_table from t_uint1 where a = 'abc'; -- ctas, error -ERROR: invalid input syntax for tinyint: "abc" +ERROR: invalid input syntax for type tinyint: "abc" LINE 1: select * into new_table from t_uint1 where a = 'abc'; ^ -- language sql @@ -1095,7 +1095,7 @@ as $$ select 1 + 1; update t_int1 set a = 1 where a = 'abc'; $$; -ERROR: invalid input syntax for tinyint: "abc" +ERROR: invalid input syntax for type tinyint: "abc" LINE 4: update t_int1 set a = 1 where a = 'abc'; ^ --remove auto_recompile_function @@ -1109,11 +1109,11 @@ return b; end; // select test1(1)// -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1(tinyint) line 3 at assignment referenced column: test1 insert into t_text values(test1(1))// -- no recompile warning -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: PL/pgSQL function test1(tinyint) line 3 at assignment referenced column: a drop procedure test1// -- Gitee From 5dc4259b44db8d7991d7cfb8a5b59e5c4bf19b8b Mon Sep 17 00:00:00 2001 From: Mijamind Date: Mon, 23 Oct 2023 11:33:29 +0800 Subject: [PATCH 027/434] =?UTF-8?q?1.=E6=96=B0=E5=A2=9E=E5=8D=8F=E8=B0=83?= =?UTF-8?q?=E7=BA=BF=E7=A8=8B=E6=94=AF=E6=8C=81=E8=87=AA=E9=80=82=E5=BA=94?= =?UTF-8?q?=E6=89=AB=E6=8F=8F=202.string=5Fagg=20bugfix=203.add=20calc=20S?= =?UTF-8?q?pqSeqScan=20dop=20func?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../spq_plugin/src/executor/spq_seqscan.cpp | 147 ++++++++++++++++++ contrib/spq_plugin/src/guc_spq.cpp | 20 +++ contrib/spq_plugin/src/spq/spq_mutate.cpp | 77 +++++++-- .../translate/CTranslatorDXLToPlStmt.cpp | 9 +- .../translate/CTranslatorDXLToScalar.cpp | 21 +-- 5 files changed, 251 insertions(+), 23 deletions(-) diff --git a/contrib/spq_plugin/src/executor/spq_seqscan.cpp b/contrib/spq_plugin/src/executor/spq_seqscan.cpp index 7caab8d63..f69ca0063 100644 --- a/contrib/spq_plugin/src/executor/spq_seqscan.cpp +++ b/contrib/spq_plugin/src/executor/spq_seqscan.cpp @@ -27,6 +27,9 @@ #include "mpmcqueue.h" #include "executor/executor.h" #include "executor/node/nodeSeqscan.h" +#include "pgxc/execRemote.h" +#include "libpq/pqformat.h" +#include "libpq/libpq.h" #include "executor/spq_seqscan.h" #define DECOMPRESS_HEAP_TUPLE(_isCompressed, _heapTuple, _destTupleData, _rd_att, _heapPage) \ @@ -332,6 +335,142 @@ public: } }; +class SpqAdaptiveBlockManager : public SpqBlockManager { +public: + uint32 maxBlockNum; + int plan_node_id; + int64_t iter_no; + bool isBlockEnd; + uint32 end; + bool connected; + gsocket forward_conn; + gsocket backward_conn; +public: + SpqAdaptiveBlockManager(uint32 maxBlockNum, ScanDirection direction, int plan_node_id, uint32 step) + : SpqBlockManager(direction, step), maxBlockNum(maxBlockNum), plan_node_id(plan_node_id) + { + isBlockEnd = false; + iter_no = 0; + end = InvalidBlockNumber; + connected = false; + } + + void BuildConnect() + { + QCConnKey key = { + .query_id = u_sess->debug_query_id, + .plan_node_id = plan_node_id, + .node_id = 0, + .type = SPQ_QC_CONNECTION, + }; + + constexpr int MAX_RETRY_TIME = 100000; + bool found = false; + QCConnEntry* entry; + int retry = 0; + while (!found && retry < MAX_RETRY_TIME) { + pthread_rwlock_wrlock(&g_instance.spq_cxt.adp_connects_lock); + entry = (QCConnEntry*)hash_search(g_instance.spq_cxt.adp_connects, (void*)&key, HASH_FIND, &found); + if (!found) { + pthread_rwlock_unlock(&g_instance.spq_cxt.adp_connects_lock); + pg_usleep(100); + ++retry; + continue; + } + backward_conn = entry->backward; + BackConnInfo fcmsg; + if (entry->forward.idx == 0) { + fcmsg.node_idx = backward_conn.idx; + fcmsg.version = backward_conn.ver; + fcmsg.streamcap = entry->streamcap; + fcmsg.query_id = u_sess->debug_query_id; + fcmsg.stream_key = { + .queryId = entry->key.query_id, + .planNodeId = entry->key.plan_node_id, + .producerSmpId = 0, + .consumerSmpId = 0, + }; + fcmsg.backward = &backward_conn; + int error = gs_r_build_reply_connection(&fcmsg, backward_conn.ver, &entry->forward.sid); + if (error != 0) { + gs_close_gsocket(&entry->forward); + ereport(ERROR, ((errmsg("spq try build dual channel backward direction failed")))); + } + entry->forward.idx = backward_conn.idx; + entry->forward.ver = backward_conn.ver; + entry->forward.type = GSOCK_PRODUCER; + } + forward_conn = entry->forward; + pthread_rwlock_unlock(&g_instance.spq_cxt.adp_connects_lock); + break; + } + if (backward_conn.idx == 0) { + gs_close_gsocket(&backward_conn); + ereport(ERROR, ((errmsg("spq try build dual channel forward direction failed")))); + } + } + + SpqAdpScanPagesRes adps_get_adps_response(uint32 nblocks, int64_t iter_no) + { + if (!connected) { + BuildConnect(); + connected = true; + } + SpqAdpScanPagesRes seqRes; + SpqAdpScanPagesReq req = { + .plan_node_id = plan_node_id, + .direction = SpqBlockManager::direction, + .nblocks = nblocks, + .cur_scan_iter_no = iter_no, + }; + + int rc = gs_send(&forward_conn, (char*)&req, sizeof(SpqAdpScanPagesReq), -1, true); + if (rc <= 0) { + ereport(ERROR, (errmsg("spq seq scan: try send adaptive request failed"))); + } + + do { + rc = gs_recv(&backward_conn, (char*)&seqRes, sizeof(SpqAdpScanPagesRes)); + } while (rc == 0 || errno == ECOMMTCPNODATA); + + if (rc < 0) { + ereport(ERROR, (errmsg("spq seq scan: try recv adaptive request failed"))); + } + + return seqRes; + } + + SpqState GetBlockIDs(uint32 &start, uint32 &end) + { + SpqAdpScanPagesRes response = adps_get_adps_response(maxBlockNum, iter_no); + if (response.success == false) { + isBlockEnd = true; + return SPQ_QUERY_END; + } + start = response.page_start; + end = response.page_end; + this->end = end; + + return SPQ_SUCCESS; + } + + bool IsBlockEnd() + { + if (ScanDirectionIsNoMovement(direction)) { + // has no direction, means will not get new page for scanning. + return true; + } else { + return isBlockEnd; + } + } + + void Rescan() + { + ++iter_no; + isBlockEnd = false; + } +}; + TupleTableSlot* SpqScanNext(ScanState* node) { if (node->ps.type != T_SpqSeqScanState) { @@ -361,6 +500,9 @@ TupleTableSlot* SpqScanNext(ScanState* node) CHECK_FOR_INTERRUPTS(); uint32 start, end; state = blockManager->GetBlockIDs(start, end); + if (state == SpqState::SPQ_QUERY_END) { + return NULL; + } if (state != SpqState::SPQ_SUCCESS) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("block manager get block ids error, code: %d", state))); @@ -450,6 +592,11 @@ SpqSeqScanState* ExecInitSpqSeqScan(SpqSeqScan* node, EState* estate, int eflags seqScan->ss_currentScanDesc->rs_nblocks, estate->es_direction, FETCH_BLOCK_NUM); + } else if (node->isAdaptiveScan) { + blockManager = New(CurrentMemoryContext) SpqAdaptiveBlockManager(seqScan->ss_currentScanDesc->rs_nblocks, + estate->es_direction, + node->scan.plan.plan_node_id, + FETCH_BLOCK_NUM); } else { int sliceNumber; int instanceID; diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 60bda85eb..90cfd606c 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1100,6 +1100,26 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_adaptive_scan", + "Enable spq adaptive scan for dynamic table scan range.", + NULL, + &u_sess->attr.attr_spq.spq_enable_adaptive_scan, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("spqplugin.spq_optimizer_calc_multiple_dop", + "Enable spq select 1 or n dop.", + NULL, + &u_sess->attr.attr_spq.spq_optimizer_calc_multiple_dop, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesInt() diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index e2e24d91d..e8cbe9f30 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -23,6 +23,11 @@ #include "nodes/pg_list.h" #include "optimizer/var.h" #include "optimizer/tlist.h" +#include "utils/spccache.h" +#include "optimizer/cost.h" +#include "spq/spq_util.h" +#include "parser/parsetree.h" + /* * Is the node a "subclass" of Plan? */ @@ -579,6 +584,56 @@ List* make_distributed_key_by_groupingset(PlannerInfo* root, Plan *subplan, List } return distributed; } +int exec_by_multiple_dop(PlannerInfo* root, Plan *spqplan) +{ + if (u_sess->attr.attr_spq.spq_optimizer_calc_multiple_dop == false) { + return spqplan->dop; + } + if (!IsA(spqplan, SpqSeqScan)) { + return spqplan->dop; + } + int cur_dop = u_sess->opt_cxt.query_dop > 1 ? u_sess->opt_cxt.query_dop : 1; + if (cur_dop == 1) { + return cur_dop; + } + /* cost_seqscan use for reference + 1. QualCost(startup_cost) is not used because baserestrictinfo is not initialized. + 2. cpu_run_cost is not used because reltarget is not initialized. + baserestrictinfo & reltarget need to call a series of set_baserel_size_estimates function + */ + Cost run_cost_single = 0; + Cost run_cost_mul = 0; + Cost cpu_per_tuple = 0.0; + SeqScan *seqscan = (SeqScan *)spqplan; + Oid reloid = getrelid(seqscan->scanrelid, root->glob->finalrtable); + Assert(reloid != InvalidOid); + Relation relation = heap_open(reloid, NoLock); + Oid reltablespace = RelationGetForm(relation)->reltablespace; + RelPageType curpages = RelationGetNumberOfBlocks(relation); + double rows = (double)relation->rd_rel->reltuples; + double spc_seq_page_cost; + + get_tablespace_page_costs(reltablespace, NULL, &spc_seq_page_cost); + // cant init qpqual_cost + + run_cost_mul += u_sess->opt_cxt.smp_thread_cost * (cur_dop - 1); + + run_cost_single += spc_seq_page_cost * curpages; + if (u_sess->attr.attr_sql.enable_seqscan_dopcost) { + run_cost_mul += spc_seq_page_cost * curpages / cur_dop; + } else { + run_cost_mul += spc_seq_page_cost * curpages; + } + cpu_per_tuple = u_sess->attr.attr_sql.cpu_tuple_cost; + run_cost_single += cpu_per_tuple * clamp_row_est(rows); + run_cost_mul += cpu_per_tuple * clamp_row_est(rows / getSpqsegmentCount()); + if (run_cost_mul > run_cost_single ) { + spqplan->dop = 1; + spqplan->parallel_enabled = false; + } + heap_close(relation, NoLock); + return spqplan->dop; +} Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion) { @@ -598,7 +653,7 @@ Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion) plan->multiple = 1.0; // set by redistribute_keys? - stream->smpDesc.producerDop = subplan->dop; + stream->smpDesc.producerDop = exec_by_multiple_dop(root, subplan); stream->smpDesc.consumerDop = u_sess->opt_cxt.query_dop; plan->dop = stream->smpDesc.consumerDop; @@ -641,7 +696,7 @@ Plan* make_sort(Motion *motion, Plan *subplan) node->nullsFirst = motion->nullsFirst; return (Plan*)node; } -Plan* create_spq_local_gather(Plan* plan, Motion *motion) +Plan* create_spq_local_gather(PlannerInfo* root, Plan* plan, Motion *motion) { if (IsA(plan, Stream)) { Stream* st = (Stream*)plan; @@ -658,7 +713,7 @@ Plan* create_spq_local_gather(Plan* plan, Motion *motion) stream_node->is_dummy = false; stream_node->sort = NULL; stream_node->smpDesc.consumerDop = 1; - stream_node->smpDesc.producerDop = plan->dop; + stream_node->smpDesc.producerDop = exec_by_multiple_dop(root, plan); stream_node->smpDesc.distriType = LOCAL_ROUNDROBIN; stream_node->distribute_keys = NIL; @@ -678,10 +733,10 @@ Plan* create_spq_local_gather(Plan* plan, Motion *motion) stream_node->streamID = motion->motionID; return stream_plan; } -Plan* make_gather_Remote(Plan *lefttree, Motion *motion) { +Plan* make_gather_Remote(PlannerInfo* root, Plan *lefttree, Motion *motion) { if (lefttree->dop > 1) { - lefttree = create_spq_local_gather(lefttree, motion); + lefttree = create_spq_local_gather(root, lefttree, motion); } RemoteQuery* remote_query = makeNode(RemoteQuery); remote_query->combine_type = COMBINE_TYPE_NONE; @@ -710,7 +765,7 @@ Plan* make_gather_Remote(Plan *lefttree, Motion *motion) { return (Plan*)remote_query; } -Plan* make_gather_stream(Plan *subplan, Motion *motion) { +Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion) { /* Set stream struct parameter. */ //double size = (PLAN_LOCAL_ROWS(subplan)) * (subplan->plan_width) / 8192.0; Stream *stream_node = makeNode(Stream); @@ -720,7 +775,7 @@ Plan* make_gather_stream(Plan *subplan, Motion *motion) { stream_node->is_dummy = false; stream_node->sort = NULL; stream_node->smpDesc.consumerDop = u_sess->opt_cxt.query_dop; - stream_node->smpDesc.producerDop = subplan->dop; /* plan->dop */ + stream_node->smpDesc.producerDop = exec_by_multiple_dop(root, subplan); /* plan->dop */ stream_node->smpDesc.distriType = REMOTE_DIRECT_DISTRIBUTE; stream_node->distribute_keys = NIL; /* Set plan struct parameter. */ @@ -800,9 +855,9 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) if (motion->motionType == MOTIONTYPE_GATHER) { if (backtop) { top = backtop; - return make_gather_Remote(subplan, motion); + return make_gather_Remote(root, subplan, motion); } - Plan *gather_stream = make_gather_stream(subplan, motion); + Plan *gather_stream = make_gather_stream(root, subplan, motion); top = backtop; return gather_stream; } else { @@ -816,7 +871,9 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) if (plan->righttree) { plan->righttree = replace_motion_stream_recurse(root, plan->righttree, top); } - plan->dop = u_sess->opt_cxt.query_dop; + if (plan->lefttree == nullptr && plan->righttree == nullptr) { + plan->dop = u_sess->opt_cxt.query_dop; + } plan->parallel_enabled = (plan->dop > 1); return plan; } diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index ced7f3416..006bcae0d 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -328,9 +328,9 @@ CTranslatorDXLToPlStmt::GetPlannedStmtFromDXL(const CDXLNode *dxlnode, //m_dxl_to_plstmt_context->GetCurrentMotionId() - 1; planned_stmt->commandType = m_cmd_type; - /* SPQ: for get param type list */ - List* paramList = m_dxl_to_plstmt_context->GetParamTypes(); - planned_stmt->nParamExec = spqdb::ListLength(paramList); + /* SPQ: for get param type list */ + List* paramList = m_dxl_to_plstmt_context->GetParamTypes(); + planned_stmt->nParamExec = spqdb::ListLength(paramList); /*SPQOS_ASSERT(plan->nMotionNodes >= 0); if (0 == plan->nMotionNodes && !m_is_tgt_tbl_distributed) @@ -376,6 +376,7 @@ CTranslatorDXLToPlStmt::GetPlannedStmtFromDXL(const CDXLNode *dxlnode, } } }*/ + planned_stmt->enable_adaptive_scan = u_sess->attr.attr_spq.spq_enable_adaptive_scan; return planned_stmt; } @@ -609,7 +610,7 @@ CTranslatorDXLToPlStmt::TranslateDXLTblScan( SpqSeqScan *spq_scan = MakeNode(SpqSeqScan); spq_scan->scan.scanrelid = index; spq_scan->isFullTableScan = false; - spq_scan->isAdaptiveScan = false; + spq_scan->isAdaptiveScan = u_sess->attr.attr_spq.spq_enable_adaptive_scan; spq_scan->isDirectRead = false; plan = &(spq_scan->scan.plan); plan_return = (Plan *) spq_scan; diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp index 3cdf93077..9a63b7cad 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp @@ -15,6 +15,7 @@ #include "postgres.h" #include "catalog/pg_collation.h" +#include "catalog/pg_proc.h" #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" @@ -519,22 +520,24 @@ CTranslatorDXLToScalar::TranslateDXLScalarAggrefToScalar( switch (dxlop->GetDXLAggStage()) { case EdxlaggstageNormal: - aggref->aggsplittype = AGGSTAGE_NORMAL; + aggref->aggsplittype = AGGSTAGE_NORMAL; break; case EdxlaggstagePartial: - aggref->aggsplittype = AGGSTAGE_PARTIAL; + aggref->aggsplittype = AGGSTAGE_PARTIAL; aggref->agghas_collectfn = false; break; case EdxlaggstageIntermediate: - aggref->aggsplittype = AGGSTAGE_PARTIAL; - aggref->agghas_collectfn = true; - aggref->aggstage = aggref->aggstage + 1; - break; - case EdxlaggstageFinal: - aggref->aggsplittype = AGGSTAGE_FINAL; + aggref->aggsplittype = AGGSTAGE_PARTIAL; aggref->agghas_collectfn = true; + aggref->aggstage = aggref->aggstage + 1; + break; + case EdxlaggstageFinal: + aggref->aggsplittype = AGGSTAGE_FINAL; + if (aggref->aggfnoid != STRINGAGGFUNCOID) { + aggref->agghas_collectfn = true; + } SPQOS_ASSERT(aggref->aggstage == 0); - aggref->aggstage = aggref->aggstage + 1; + aggref->aggstage = aggref->aggstage + 1; break; default: SPQOS_RAISE( -- Gitee From 1c2780b515ac1441bcb5db6162d41c2059ca8907 Mon Sep 17 00:00:00 2001 From: suncan <1006949218@qq.com> Date: Tue, 24 Oct 2023 15:31:24 +0800 Subject: [PATCH 028/434] =?UTF-8?q?set=20=E7=B1=BB=E5=9E=8B=E6=94=AF?= =?UTF-8?q?=E6=8C=81=E4=B8=BA=E7=A9=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_optimizer/commands/copy.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/contrib/dolphin/plugin_optimizer/commands/copy.cpp b/contrib/dolphin/plugin_optimizer/commands/copy.cpp index 23b23a928..cbcc28a5e 100644 --- a/contrib/dolphin/plugin_optimizer/commands/copy.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/copy.cpp @@ -5859,6 +5859,10 @@ static int CopyFromCompressAndInsertBatch(PageCompress* pcState, EState* estate, // bool IsTypeAcceptEmptyStr(Oid typeOid) { + if (type_is_set(typeOid)) { + return true; + } + switch (typeOid) { case VARCHAROID: case NVARCHAR2OID: -- Gitee From f5878392389b73dcb39b4499471ca89d0f7d8f2b Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Tue, 24 Oct 2023 11:43:33 +0800 Subject: [PATCH 029/434] fixed ad4a77e from https://gitee.com/luo_zihao5524/Plugin/pulls/1097 sync addc19a2e0a2aace4afebac7e083322b37e58e3f --- contrib/dolphin/include/builtin_funcs.ini | 16 + contrib/dolphin/include/plugin_catalog/heap.h | 3 +- .../dolphin/include/plugin_commands/defrem.h | 3 + .../dolphin/include/plugin_nodes/parsenodes.h | 15 + .../include/plugin_nodes/parsenodes_common.h | 12 + .../dolphin/include/plugin_parser/kwlist.h | 2 + .../include/plugin_parser/parse_type.h | 25 +- .../dolphin/include/plugin_parser/scanner.h | 1 + .../dolphin/include/plugin_utils/plpgsql.h | 111 +++- contrib/dolphin/plugin_catalog/heap.cpp | 124 ++++- .../commands/functioncmds.cpp | 506 +++++++++++++++--- .../plugin_optimizer/commands/typecmds.cpp | 55 ++ .../dolphin/plugin_optimizer/plan/planner.cpp | 14 +- contrib/dolphin/plugin_parser/gram.y | 160 +++++- contrib/dolphin/plugin_parser/parse_type.cpp | 318 ++++++++--- contrib/dolphin/plugin_parser/scan.l | 1 + contrib/dolphin/plugin_pl/plpgsql/src/gram.y | 179 +++++-- .../dolphin/plugin_pl/plpgsql/src/pl_comp.cpp | 294 +++++++++- .../plugin_pl/plpgsql/src/pl_handler.cpp | 226 +++++--- contrib/dolphin/plugin_utility.cpp | 80 +++ contrib/dolphin/plugin_utils/adt/json.cpp | 4 + contrib/dolphin/plugin_utils/adt/regproc.cpp | 34 ++ .../dolphin/plugin_utils/adt/ruleutils.cpp | 10 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 20 + contrib/dolphin/tablecmds.cpp | 64 ++- 25 files changed, 1937 insertions(+), 340 deletions(-) diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index 06396d5b3..422b8dd16 100644 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -12452,6 +12452,22 @@ AddFuncGroup( "ubtvacuumcleanup", 1, AddBuiltinFunc(_0(4762), _1("ubtvacuumcleanup"), _2(2), _3(true), _4(false), _5(ubtvacuumcleanup), _6(2281), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 2281, 2281), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("ubtvacuumcleanup"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "undefinedin", 1, + AddBuiltinFunc(_0(UNDEFINEDINPUT), _1("undefinedin"), _2(1), _3(true), _4(false), _5(undefinedin), _6(4408), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2275), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedin"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "undefinedout", 1, + AddBuiltinFunc(_0(UNDEFINEDOUTPUT), _1("undefinedout"), _2(1), _3(true), _4(false), _5(undefinedout), _6(2275), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 705), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedout"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "undefinedrecv", 1, + AddBuiltinFunc(_0(UNDEFINEDRECV), _1("undefinedrecv"), _2(1), _3(true), _4(false), _5(undefinedrecv), _6(4408), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2281), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedrecv"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "undefinedsend", 1, + AddBuiltinFunc(_0(UNDEFINEDSEND), _1("undefinedsend"), _2(1), _3(true), _4(false), _5(undefinedsend), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 705), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedsend"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "unique_key_recheck", 1, AddBuiltinFunc(_0(1250), _1("unique_key_recheck"), _2(0), _3(true), _4(false), _5(unique_key_recheck), _6(2279), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("unique_key_recheck"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("deferred UNIQUE constraint check"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/contrib/dolphin/include/plugin_catalog/heap.h b/contrib/dolphin/include/plugin_catalog/heap.h index 272cba627..b3b4391ff 100644 --- a/contrib/dolphin/include/plugin_catalog/heap.h +++ b/contrib/dolphin/include/plugin_catalog/heap.h @@ -135,7 +135,8 @@ extern Oid heap_create_with_catalog(const char *relname, List* ceLst = NULL, StorageType storage_type = HEAP_DISK, LOCKMODE partLockMode = AccessExclusiveLock, - ObjectAddress *typaddress= NULL); + ObjectAddress *typaddress= NULL, + List* depend_extend = NIL); extern void heap_create_init_fork(Relation rel); diff --git a/contrib/dolphin/include/plugin_commands/defrem.h b/contrib/dolphin/include/plugin_commands/defrem.h index 76658e59a..c4f1f6639 100644 --- a/contrib/dolphin/include/plugin_commands/defrem.h +++ b/contrib/dolphin/include/plugin_commands/defrem.h @@ -58,6 +58,7 @@ extern void RemoveFunctionById(Oid funcOid); extern void remove_encrypted_proc_by_id(Oid funcOid); extern void RemovePackageById(Oid pkgOid, bool isBody = false); extern void DeleteFunctionByPackageOid(Oid package_oid); +extern void DeleteFunctionByFuncTuple(HeapTuple func_tup); extern void SetFunctionReturnType(Oid funcOid, Oid newRetType); extern void SetFunctionArgType(Oid funcOid, int argIndex, Oid newArgType); extern ObjectAddress AlterFunctionOwner(List* name, List* argtypes, Oid newOwnerId); @@ -79,6 +80,7 @@ extern void IsThereOpClassInNamespace(const char *opcname, Oid opcmethod, Oid opcnamespace); extern void IsThereOpFamilyInNamespace(const char *opfname, Oid opfmethod, Oid opfnamespace); +extern void RecompileFunction(CompileStmt* stmt); /* commands/operatorcmds.c */ extern void CreatePackageCommand(CreatePackageStmt* parsetree, const char* queryString); @@ -92,6 +94,7 @@ extern ObjectAddress AlterOperatorOwner(List* name, TypeName* typeName1, TypeNam extern void AlterOperatorOwner_oid(Oid operOid, Oid newOwnerId); extern ObjectAddress AlterOperatorNamespace(List* names, List* argtypes, const char* newschema); extern Oid AlterOperatorNamespace_oid(Oid operOid, Oid newNspOid); +extern void RecompilePackage(CompileStmt* stmt); /* commands/aggregatecmds.c */ extern ObjectAddress DefineAggregate(List* name, List* args, bool oldstyle, List* parameters); diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index 612cf6f2c..1973c6e7d 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -1387,6 +1387,21 @@ typedef struct AlterFunctionStmt { #endif } AlterFunctionStmt; +enum CompileEntry { + COMPILE_PROCEDURE, + COMPILE_FUNCTION, + COMPILE_PACKAGE, + COMPILE_PKG_SPECIFICATION, + COMPILE_PKG_BODY +}; + +typedef struct CompileStmt { + NodeTag type; + List* objName; + List* funcArgs; + CompileEntry compileItem; +} CompileStmt; + typedef struct InlineCodeBlock { NodeTag type; char* source_text; /* source text of anonymous code block */ diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 83d29a974..2257d5386 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -155,6 +155,16 @@ typedef struct DropRoleStmt { DropBehavior behavior; /* CASCADE or RESTRICT */ } DropRoleStmt; +typedef struct TypeDependExtend { + Oid typeOid; /* real depend type OID */ + Oid undefDependObjOid; /* undefined oid in gs_dependencies_obj when the column's type is undefined */ + bool dependUndefined; + char* schemaName; + char* packageName; + char* objectName; + char typType; + char typCategory; +} TypeDependExtend; /* * TypeName - specifies a type in definitions * @@ -181,6 +191,7 @@ typedef struct TypeName { int end_location; /* %TYPE and date specified, token end location */ bool pct_rowtype; /* %ROWTYPE specified? */ int charset; + TypeDependExtend* dependExtend = NULL; } TypeName; typedef enum FunctionParameterMode { @@ -2285,6 +2296,7 @@ typedef struct CreateFunctionStmt { List* withClause; /* a list of DefElem */ bool isProcedure; /* true if it is a procedure */ char* inputHeaderSrc; + char* funcHeadSrc; bool isPrivate; /* in package, it's true is a private procedure*/ bool isFunctionDeclare; /* in package,it's true is a function delcare*/ bool isExecuted; diff --git a/contrib/dolphin/include/plugin_parser/kwlist.h b/contrib/dolphin/include/plugin_parser/kwlist.h index 2e8c7ef04..9041ed2ad 100644 --- a/contrib/dolphin/include/plugin_parser/kwlist.h +++ b/contrib/dolphin/include/plugin_parser/kwlist.h @@ -161,6 +161,7 @@ PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD) PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD) PG_KEYWORD("compact", COMPACT, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("compatible_illegal_chars", COMPATIBLE_ILLEGAL_CHARS, UNRESERVED_KEYWORD) +PG_KEYWORD("compile", COMPILE, UNRESERVED_KEYWORD) PG_KEYWORD("complete", COMPLETE, UNRESERVED_KEYWORD) PG_KEYWORD("completion", COMPLETION, UNRESERVED_KEYWORD) PG_KEYWORD("compress", COMPRESS, UNRESERVED_KEYWORD) @@ -831,6 +832,7 @@ PG_KEYWORD("sounds", SOUNDS, TYPE_FUNC_NAME_KEYWORD) #endif PG_KEYWORD("source", SOURCE_P, UNRESERVED_KEYWORD) PG_KEYWORD("space", SPACE, UNRESERVED_KEYWORD) +PG_KEYWORD("specification", SPECIFICATION, UNRESERVED_KEYWORD) PG_KEYWORD("spill", SPILL, UNRESERVED_KEYWORD) PG_KEYWORD("split", SPLIT, UNRESERVED_KEYWORD) PG_KEYWORD("sql", SQL_P, UNRESERVED_KEYWORD) diff --git a/contrib/dolphin/include/plugin_parser/parse_type.h b/contrib/dolphin/include/plugin_parser/parse_type.h index 0b1214a01..cd17400ab 100644 --- a/contrib/dolphin/include/plugin_parser/parse_type.h +++ b/contrib/dolphin/include/plugin_parser/parse_type.h @@ -20,14 +20,18 @@ typedef HeapTuple Type; -extern Type LookupTypeName(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool print_notice = true); +extern Type LookupTypeNameSupportUndef(ParseState *pstate, const TypeName *typeName, + int32 *typmod_p, bool print_notice = true); +extern Type LookupTypeName(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool print_notice = true, + TypeDependExtend* dependExtend = NULL); extern Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool temp_ok, - bool print_notice = true); + bool print_notice = true, TypeDependExtend* dependExtend = NULL); extern Oid LookupPctTypeInPackage(RangeVar* rel, Oid pkgOid, const char* field); extern Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid = InvalidOid, Oid namespaceId = InvalidOid); -extern Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p); +extern Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p, TypeDependExtend* dependExtend = NULL); extern Oid typenameTypeId(ParseState* pstate, const TypeName* typname); -extern void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p); +extern void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p, + TypeDependExtend* dependExtend = NULL); extern char* TypeNameToString(const TypeName* typname); extern char* TypeNameListToString(List* typenames); @@ -62,12 +66,19 @@ extern bool IsTypeSupportedByORCRelation(_in_ Oid typeOid); extern bool IsTypeSupportedByTsStore(_in_ int kvtype, _in_ Oid typeOid); extern bool IsTypeSupportedByUStore (_in_ Oid typeOid, _in_ int32 typeMod); extern TypeName *typeStringToTypeName(const char *str); -extern void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p); +extern void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p, TypeDependExtend* depenExtend = NULL); extern bool IsTypeTableInInstallationGroup(const Type type_tup); -extern HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p); +extern HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p, + TypeDependExtend* depend_extend = NULL); extern char* CastPackageTypeName(const char* typName, Oid pkgOid, bool isPackage, bool isPublic = true); extern bool IsBinaryType(Oid typid); #define ISCOMPLEX(typeid) (typeidTypeRelid(typeid) != InvalidOid) extern void check_type_supports_multi_charset(Oid typid, bool allow_array); - +extern char* ParseTypeName(const char* typName, Oid pkgOid); +typedef enum TypeTupStatus { + NormalTypeTup = 0, + UndefineTypeTup = 1, + InvalidTypeTup = 2 +} TypeTupStatus; +extern TypeTupStatus GetTypeTupStatus(Type typ); #endif /* PARSE_TYPE_H */ diff --git a/contrib/dolphin/include/plugin_parser/scanner.h b/contrib/dolphin/include/plugin_parser/scanner.h index 3c7266f81..fc9bc4571 100644 --- a/contrib/dolphin/include/plugin_parser/scanner.h +++ b/contrib/dolphin/include/plugin_parser/scanner.h @@ -136,6 +136,7 @@ typedef struct core_yy_extra_type { bool include_ora_comment; /* dont igore comment when ture */ int func_param_begin; /* function and procedure param string start pos,exclude left parenthesis */ int func_param_end; /* function and procedure param string end pos,exclude right parenthesis */ + int return_pos_end; bool isPlpgsqlKeyWord; const PlpgsqlKeywordValue* plKeywordValue; bool is_delimiter_name; diff --git a/contrib/dolphin/include/plugin_utils/plpgsql.h b/contrib/dolphin/include/plugin_utils/plpgsql.h index 6a5b74115..c791a4b48 100644 --- a/contrib/dolphin/include/plugin_utils/plpgsql.h +++ b/contrib/dolphin/include/plugin_utils/plpgsql.h @@ -290,6 +290,35 @@ typedef enum { PLPGSQL_CURSOR_NAME } PLpgSQL_con_info_item_value; +/* + * GsDependency object type + */ +typedef enum { + GSDEPEND_OBJECT_TYPE_INVALID = 0, + GSDEPEND_OBJECT_TYPE_UNDEFIND, + GSDEPEND_OBJECT_TYPE_VARIABLE, + GSDEPEND_OBJECT_TYPE_TYPE, + GSDEPEND_OBJECT_TYPE_FUNCTION, + GSDEPEND_OBJECT_TYPE_PROCHEAD, + GSDEPEND_OBJECT_TYPE_PKG, + GSDEPEND_OBJECT_TYPE_PKG_BODY, + GSDEPEND_OBJECT_TYPE_PKG_RECOMPILE +} GsDependObjectType; + +/* +* GsDependency reference object position type +*/ +#define GSDEPEND_REFOBJ_POS_INVALID 0 +#define GSDEPEND_REFOBJ_POS_IN_TYPE 1 +#define GSDEPEND_REFOBJ_POS_IN_PKGSPEC 2 +#define GSDEPEND_REFOBJ_POS_IN_PROCHEAD 4 +#define GSDEPEND_REFOBJ_POS_IN_PROCBODY 8 +#define GSDEPEND_REFOBJ_POS_IN_PKGBODY 16 +#define GSDEPEND_REFOBJ_POS_IN_PKGRECOMPILE_OBJ (GSDEPEND_REFOBJ_POS_IN_PKGSPEC | \ + GSDEPEND_REFOBJ_POS_IN_PKGBODY | GSDEPEND_REFOBJ_POS_IN_PROCBODY) +#define GSDEPEND_REFOBJ_POS_IN_PKGALL_OBJ (GSDEPEND_REFOBJ_POS_IN_PKGRECOMPILE_OBJ) +#define GSDEPEND_REFOBJ_POS_IN_PROCALL (GSDEPEND_REFOBJ_POS_IN_PROCHEAD | GSDEPEND_REFOBJ_POS_IN_PROCBODY) + /********************************************************************** * Node and structure definitions **********************************************************************/ @@ -303,6 +332,46 @@ typedef struct PLpgSQL_datum { /* Generic datum array item */ bool ispkg; } PLpgSQL_datum; +/* + * DependenciesDatum is the common supertype for DependenciesUndefined, DependenciesVariable, + * DependenciesType, DependenciesProchead + */ +typedef struct DependenciesDatum { /* Generic datum array item */ + NodeTag type; +} DependenciesDatum; + +/* + * PLpgSQL dependencies undefined/type/variable/function/procedure + */ +typedef struct DependenciesUndefined { /* Generic datum array item */ + NodeTag type; +} DependenciesUndefined; + +typedef struct DependenciesVariable { + NodeTag type; + char* typName; + int32 typMod; + char* extraInfo; +} DependenciesVariable; + +typedef struct DependenciesType{ + NodeTag type; + char typType; + char typCategory; + char* attrInfo; + bool isRel; + char* elemTypName; + char* idxByTypName; +} DependenciesType; + +typedef struct DependenciesProchead{ + NodeTag type; + bool undefined; + char* proName; + char* proArgSrc; + char* funcHeadSrc; +} DependenciesProchead; + typedef enum PLpgSQL_trigtype { PLPGSQL_DML_TRIGGER, @@ -404,6 +473,8 @@ typedef struct { /* openGauss data type */ * then convert to tuple descriptior. */ Oid cursorCompositeOid = InvalidOid; + Oid tableofOid; + TypeDependExtend* dependExtend; } PLpgSQL_type; typedef struct { @@ -1162,6 +1233,10 @@ typedef struct PLpgSQL_function { /* Complete compiled function */ bool is_autonomous; bool is_plpgsql_func_with_outparam; bool is_insert_gs_source; + /* gs depend */ + bool isValid; + bool is_need_recompile; + Oid namespaceOid; } PLpgSQL_function; class AutonomousSession; @@ -1363,6 +1438,13 @@ typedef struct plpgsql_pkg_hashent { } plpgsql_pkg_HashEnt; +#define PACKAGE_INVALID 0x0 +#define PACKAGE_SPEC_VALID 0x1 +#define PACKAGE_SPEC_INVALID 0xFE +#define PACKAGE_BODY_VALID 0x2 +#define PACKAGE_BODY_INVALID 0xFD +#define PACKAGE_VALID 0x3 + typedef struct PLpgSQL_package { /* Complete compiled package */ char* pkg_signature; Oid pkg_oid; @@ -1408,6 +1490,15 @@ typedef struct PLpgSQL_package { /* Complete compiled package */ knl_u_plpgsql_pkg_context* u_pkg; Oid namespaceOid; bool isInit; + + /** + * gs_dependencies_fn.h + */ + NodeTag type; + List* preRefObjectOidList; + List* preSelfObjectList; + unsigned char status; + bool is_need_recompile; } PLpgSQL_package; @@ -1665,7 +1756,7 @@ typedef struct plpgsql_hashent { DListCell* cell; /* Dlist cell for delete function compile results. */ } plpgsql_HashEnt; -extern PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator); +extern PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator, bool isRecompile = false); extern void delete_function(PLpgSQL_function* func, bool fromPackage = false); extern PLpgSQL_function* plpgsql_compile_nohashkey(FunctionCallInfo fcinfo); /* parse trigger func */ extern PLpgSQL_function* plpgsql_compile_inline(char* proc_source); @@ -1684,10 +1775,10 @@ extern bool plpgsql_parse_tripword(char* word1, char* word2, char* word3, PLwdat extern bool plpgsql_parse_quadword(char* word1, char* word2, char* word3, char* word4, PLwdatum* wdatum, PLcword* cword, int* tok_flag); extern PLpgSQL_type* plpgsql_parse_wordtype(char* ident); -extern PLpgSQL_type* plpgsql_parse_cwordtype(List* idents); +extern PLpgSQL_type* plpgsql_parse_cwordtype(List* idents, TypeDependExtend* dependExtend = NULL); extern PLpgSQL_type* plpgsql_parse_wordrowtype(char* ident); extern PLpgSQL_type* plpgsql_parse_cwordrowtype(List* idents); -extern PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation); +extern PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation, TypeDependExtend* type_depend_extend = NULL); extern PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation); extern PLpgSQL_type* plpgsql_build_nested_datatype(); extern const char *plpgsql_code_int2cstring(int sqlcode); @@ -1712,12 +1803,13 @@ extern bool plpgsql_check_colocate(Query* query, RangeTblEntry* rte, void* plpgs extern void plpgsql_HashTableDeleteAll(); extern void plpgsql_hashtable_delete_and_check_invalid_item(int classId, Oid objId); extern void delete_package_and_check_invalid_item(Oid pkgOid); +extern void plpgsql_hashtable_clear_invalid_obj(bool need_clear = false); extern void plpgsql_HashTableDelete(PLpgSQL_function* func); extern bool plpgsql_get_current_value_stp_with_exception(); extern void plpgsql_restore_current_value_stp_with_exception(bool saved_current_stp_with_exception); extern void plpgsql_set_current_value_stp_with_exception(); extern void delete_pkg_in_HashTable(Oid pkgOid); -extern PLpgSQL_package* plpgsql_pkg_compile(Oid pkgOid, bool for_validator, bool isSpec, bool isCreate=false); +extern PLpgSQL_package* plpgsql_pkg_compile(Oid pkgOid, bool for_validator, bool isSpec, bool isCreate=false, bool isRecompile = false); extern PLpgSQL_datum* plpgsql_pkg_adddatum(const List* wholeName, char** objname, char** pkgname); extern int plpgsql_pkg_adddatum2ns(const List* name); extern bool plpgsql_check_insert_colocate( @@ -1834,7 +1926,7 @@ extern PLpgSQL_nsitem* plpgsql_ns_lookup( extern PLpgSQL_nsitem* plpgsql_ns_lookup_label(PLpgSQL_nsitem* ns_cur, const char* name); extern void free_func_tableof_index(); extern void free_temp_func_tableof_index(List* temp_tableof_index); - +extern char* GetPackageSchemaName(Oid packageOid); /* ---------- * Other functions in pl_funcs.c @@ -2019,4 +2111,13 @@ extern void stp_reserve_subxact_resowner(ResourceOwner resowner); extern void stp_cleanup_subxact_resowner(int64 minStackId); extern void stp_cleanup_subxact_resource(int64 stackId); extern void InsertGsSource(Oid objId, Oid nspid, const char* name, const char* type, bool status); +extern void examine_parameter_list(List* parameters, Oid languageOid, const char* queryString, + oidvector** parameterTypes, TypeDependExtend** type_depend_extend, ArrayType** allParameterTypes, + ArrayType** parameterModes, ArrayType** parameterNames, + List** parameterDefaults, Oid* requiredResultType, List** defargpos, bool fenced, bool* has_undefined = NULL); +extern void compute_return_type( + TypeName* returnType, Oid languageOid, Oid* prorettype_p, bool* returnsSet_p, bool fenced, int startLineNumber, + TypeDependExtend* type_depend_extend, bool is_refresh_head); +void plpgsql_free_override_stack(int depth); + #endif /* PLPGSQL_H */ diff --git a/contrib/dolphin/plugin_catalog/heap.cpp b/contrib/dolphin/plugin_catalog/heap.cpp index 0a785007e..9fd2d7385 100644 --- a/contrib/dolphin/plugin_catalog/heap.cpp +++ b/contrib/dolphin/plugin_catalog/heap.cpp @@ -116,6 +116,8 @@ #include "foreign/fdwapi.h" #include "instruments/generate_report.h" #include "catalog/gs_encrypted_columns.h" +#include "catalog/gs_dependencies_fn.h" +#include "utils/plpgsql.h" #ifdef PGXC #include "catalog/pgxc_class.h" @@ -1003,6 +1005,34 @@ void InsertPgAttributeTuple(Relation pg_attribute_rel, Form_pg_attribute new_att heap_freetuple(tup); } +static bool make_gs_depend_param_body(GsDependParamBody* gs_depend_param_body, const char* typ_name, + const char relkind, const Oid namespace_oid) +{ + bool need_build_depend = false; + int cw = CompileWhich(); + need_build_depend = (relkind == RELKIND_RELATION || relkind == RELKIND_COMPOSITE_TYPE) && + (cw == PLPGSQL_COMPILE_PACKAGE_PROC || cw == PLPGSQL_COMPILE_PACKAGE || cw == PLPGSQL_COMPILE_PROC); + if (!need_build_depend) { + return false; + } + gs_depend_param_body->dependNamespaceOid = namespace_oid; + if (NULL != u_sess->plsql_cxt.curr_compile_context && + NULL != u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package) { + PLpgSQL_package* pkg = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package; + gs_depend_param_body->dependPkgOid = pkg->pkg_oid; + gs_depend_param_body->dependPkgName = pkg->pkg_signature; + } + char* real_typ_name = ParseTypeName((char*)typ_name, gs_depend_param_body->dependPkgOid); + if (real_typ_name == NULL) { + gs_depend_param_body->dependName = pstrdup(typ_name); + } else { + gs_depend_param_body->dependName = real_typ_name; + } + gs_depend_param_body->refPosType = GSDEPEND_REFOBJ_POS_IN_TYPE; + gs_depend_param_body->type = GSDEPEND_OBJECT_TYPE_TYPE; + return true; +} + /* -------------------------------- * AddNewAttributeTuples * @@ -1010,7 +1040,8 @@ void InsertPgAttributeTuple(Relation pg_attribute_rel, Form_pg_attribute new_att * tuples to pg_attribute. * -------------------------------- */ -static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relkind, bool oidislocal, int oidinhcount, bool hasbucket, bool hasuids) +static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relkind, + bool oidislocal, int oidinhcount, bool hasbucket, bool hasuids, List* depend_extend, const char* typ_name, Oid namespace_oid) { Form_pg_attribute attr; int i; @@ -1025,7 +1056,13 @@ static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relki rel = heap_open(AttributeRelationId, RowExclusiveLock); indstate = CatalogOpenIndexes(rel); - + GsDependParamBody gs_depend_param_body; + gsplsql_init_gs_depend_param_body(&gs_depend_param_body); + bool need_build_depend = false; + if (enable_plpgsql_gsdependency()) { + need_build_depend = make_gs_depend_param_body(&gs_depend_param_body, typ_name, relkind, namespace_oid); + } + ListCell* depend_extend_cell = list_head(depend_extend); /* * First we add the user attributes. This is also a convenient place to * add dependencies on their datatypes and collations. @@ -1053,7 +1090,17 @@ static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relki referenced.classId = TypeRelationId; referenced.objectId = attr->atttypid; referenced.objectSubId = 0; - recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); + + if (need_build_depend) { + if (NULL != depend_extend_cell) { + gs_depend_param_body.dependExtend = (TypeDependExtend*)lfirst(depend_extend_cell); + } else { + gs_depend_param_body.dependExtend = NULL; + } + recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL, &gs_depend_param_body); + } else { + recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); + } /* The default collation is pinned, so don't bother recording it */ if (OidIsValid(attr->attcollation) && attr->attcollation != DEFAULT_COLLATION_OID) { @@ -1063,6 +1110,12 @@ static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relki recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } } + if (need_build_depend && NULL != depend_extend_cell) { + depend_extend_cell = lnext(depend_extend_cell); + } + } + if (need_build_depend) { + pfree_ext(gs_depend_param_body.dependName); } /* @@ -2609,7 +2662,7 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable int oidinhcount, OnCommitAction oncommit, Datum reloptions, bool use_user_acl, bool allow_system_table_mods, PartitionState *partTableState, int8 row_compress, HashBucketInfo *bucketinfo, bool record_dependce, List *ceLst, StorageType storage_type, - LOCKMODE partLockMode, ObjectAddress *typaddress) + LOCKMODE partLockMode, ObjectAddress *typaddress, List* depend_extend) { Relation pg_class_desc; Relation new_rel_desc; @@ -3011,7 +3064,7 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable * now add tuples to pg_attribute for the attributes in our new relation. */ AddNewAttributeTuples( - relid, new_rel_desc->rd_att, relkind, oidislocal, oidinhcount, relhasbucket, relhasuids); + relid, new_rel_desc->rd_att, relkind, oidislocal, oidinhcount, relhasbucket, relhasuids, depend_extend, relname, relnamespace); if (ceLst != NULL) { AddNewGsSecEncryptedColumnsTuples(relid, ceLst); } @@ -8226,3 +8279,64 @@ void AddOrDropUidsAttr(Oid relOid, bool oldRelHasUids, bool newRelHasUids) } } +static void heap_serialize_rel_attribute(Relation att_rel, Oid rel_oid, + int att_idx, StringInfoData* concat_name, bool* depend_undefined) +{ + ScanKeyData skey[2]; + SysScanDesc scan; + HeapTuple tuple; + bool is_null = false; + int key_num = 0; + ScanKeyInit(&skey[key_num++], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(rel_oid)); + ScanKeyInit(&skey[key_num++], Anum_pg_attribute_attnum, BTEqualStrategyNumber, + F_INT4EQ, Int32GetDatum(att_idx)); + scan = systable_beginscan(att_rel, AttributeRelidNumIndexId, true, SnapshotSelf, key_num, skey); + tuple = systable_getnext(scan); + if (!HeapTupleIsValid(tuple)) { + systable_endscan(scan); + return; + } + Datum id_dropped_datum = heap_getattr(tuple, Anum_pg_attribute_attisdropped, + RelationGetDescr(att_rel), &is_null); + if (is_null || DatumGetBool(id_dropped_datum)) { + systable_endscan(scan); + return; + } + Datum att_name_datum = heap_getattr(tuple, Anum_pg_attribute_attname, + RelationGetDescr(att_rel), &is_null); + if (!is_null) { + appendStringInfoString(concat_name, DatumGetName(att_name_datum)->data); + } + Datum typ_oid_datum = heap_getattr(tuple, Anum_pg_attribute_atttypid, + RelationGetDescr(att_rel), &is_null); + appendStringInfoString(concat_name, ":"); + Oid typ_oid = DatumGetObjectId(typ_oid_datum); + if (!is_null && OidIsValid(typ_oid) && typ_oid != UNDEFINEDOID) { + (void)MakeTypeNamesStrForTypeOid(DatumGetObjectId(typ_oid_datum), depend_undefined, concat_name); + } else if (NULL != depend_undefined) { + *depend_undefined = true; + } + appendStringInfoString(concat_name, ","); + systable_endscan(scan); +} + +char* heap_serialize_row_attr(Oid rel_oid, bool* depend_undefined) +{ + Relation rel; + StringInfoData concat_name; + char rel_kind = get_rel_relkind(rel_oid); + if (rel_kind != RELKIND_COMPOSITE_TYPE && rel_kind != RELKIND_RELATION) { + return NULL; + } + int att_num = get_relnatts(rel_oid); + rel = heap_open(AttributeRelationId, AccessShareLock); + initStringInfo(&concat_name); + for (int i = 1; i <= att_num; i++) { + heap_serialize_rel_attribute(rel, rel_oid, i, &concat_name, depend_undefined); + } + heap_close(rel, AccessShareLock); + char* ret = pstrdup(concat_name.data); + FreeStringInfo(&concat_name); + return ret; +} diff --git a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp index cc1c6153b..ea1ee443a 100755 --- a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp @@ -95,6 +95,8 @@ #include "tcop/utility.h" #include "tsearch/ts_type.h" #include "commands/comment.h" +#include "catalog/gs_dependencies_fn.h" +#include "utils/sec_rls_utils.h" #ifdef DOLPHIN #include "plugin_commands/mysqlmode.h" #endif @@ -144,11 +146,12 @@ static void CreateFunctionComment(Oid funcOid, List* options, bool lock = false) * validator, so as not to produce a NOTICE and then an ERROR for the same * condition.) */ -static void compute_return_type( - TypeName* returnType, Oid languageOid, Oid* prorettype_p, bool* returnsSet_p, bool fenced, int startLineNumber) +void compute_return_type( + TypeName* returnType, Oid languageOid, Oid* prorettype_p, bool* returnsSet_p, bool fenced, int startLineNumber, + TypeDependExtend* type_depend_extend, bool is_refresh_head) { - Oid rettype; - Type typtup; + Oid rettype = InvalidOid; + Type typtup = NULL; AclResult aclresult; Oid typowner = InvalidOid; ObjectAddress address; @@ -159,7 +162,11 @@ static void compute_return_type( */ bool isalter = false; - typtup = LookupTypeName(NULL, returnType, NULL); + if (enable_plpgsql_gsdependency()) { + typtup = LookupTypeName(NULL, returnType, NULL, true, type_depend_extend); + } else { + typtup = LookupTypeName(NULL, returnType, NULL); + } /* * If the type is relation, then we check @@ -170,8 +177,8 @@ static void compute_return_type( (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("return type '%s' must be in installation group", TypeNameToString(returnType)))); } - - if (typtup) { + TypeTupStatus typStatus = GetTypeTupStatus(typtup); + if (NormalTypeTup == typStatus) { if (!((Form_pg_type)GETSTRUCT(typtup))->typisdefined) { if (languageOid == SQLlanguageId) ereport(ERROR, @@ -226,43 +233,53 @@ static void compute_return_type( (languageOid != INTERNALlanguageId && languageOid != ClanguageId)) { const char* message = "type does not exist"; InsertErrorMessage(message, startLineNumber); - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type \"%s\" does not exist", typnam))); - } - - /* Reject if there's typmod decoration, too */ - if (returnType->typmods != NIL) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("type modifier cannot be specified for shell type \"%s\"", typnam))); + if (UndefineTypeTup == typStatus) { + if (!is_refresh_head) { + ereport(WARNING, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type \"%s\" does not exist", typnam))); + } + rettype = typeTypeId(typtup); + ReleaseSysCache(typtup); + } else { + if (!is_refresh_head) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type \"%s\" does not exist", typnam))); + } + } + } else { + /* Reject if there's typmod decoration, too */ + if (returnType->typmods != NIL) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("type modifier cannot be specified for shell type \"%s\"", typnam))); - /* Otherwise, go ahead and make a shell type */ - ereport(NOTICE, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("type \"%s\" is not yet defined", typnam), - errdetail("Creating a shell type definition."))); - namespaceId = QualifiedNameGetCreationNamespace(returnType->names, &typname); + /* Otherwise, go ahead and make a shell type */ + ereport(NOTICE, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("type \"%s\" is not yet defined", typnam), + errdetail("Creating a shell type definition."))); + namespaceId = QualifiedNameGetCreationNamespace(returnType->names, &typname); - if (u_sess->attr.attr_sql.enforce_a_behavior) { - typowner = GetUserIdFromNspId(namespaceId); + if (u_sess->attr.attr_sql.enforce_a_behavior) { + typowner = GetUserIdFromNspId(namespaceId); - if (!OidIsValid(typowner)) + if (!OidIsValid(typowner)) + typowner = GetUserId(); + else if (typowner != GetUserId()) + isalter = true; + } else { typowner = GetUserId(); - else if (typowner != GetUserId()) - isalter = true; - } else { - typowner = GetUserId(); - } - aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); - if (isalter) { - aclresult = pg_namespace_aclcheck(namespaceId, typowner, ACL_CREATE); + } + aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + if (isalter) { + aclresult = pg_namespace_aclcheck(namespaceId, typowner, ACL_CREATE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + } + address = TypeShellMake(typname, namespaceId, typowner); + rettype = address.objectId; + Assert(OidIsValid(rettype)); } - address = TypeShellMake(typname, namespaceId, typowner); - rettype = address.objectId; - Assert(OidIsValid(rettype)); } aclresult = pg_type_aclcheck(rettype, GetUserId(), ACL_USAGE); @@ -286,9 +303,10 @@ static void compute_return_type( * requiredResultType is set to InvalidOid if there are no OUT parameters, * else it is set to the OID of the implied result type. */ -static void examine_parameter_list(List* parameters, Oid languageOid, const char* queryString, - oidvector** parameterTypes, ArrayType** allParameterTypes, ArrayType** parameterModes, ArrayType** parameterNames, - List** parameterDefaults, Oid* requiredResultType, List** defargpos, bool fenced) +void examine_parameter_list(List* parameters, Oid languageOid, const char* queryString, + oidvector** parameterTypes, TypeDependExtend** type_depend_extend, ArrayType** allParameterTypes, + ArrayType** parameterModes, ArrayType** parameterNames, + List** parameterDefaults, Oid* requiredResultType, List** defargpos, bool fenced, bool* has_undefined) { int parameterCount = list_length(parameters); Oid* inTypes = NULL; @@ -316,6 +334,9 @@ static void examine_parameter_list(List* parameters, Oid languageOid, const char allTypes = (Datum*)palloc(parameterCount * sizeof(Datum)); paramModes = (Datum*)palloc(parameterCount * sizeof(Datum)); paramNames = (Datum*)palloc0(parameterCount * sizeof(Datum)); + if (enable_plpgsql_gsdependency()) { + *type_depend_extend = (TypeDependExtend*)palloc0((parameterCount) * sizeof(TypeDependExtend)); + } } *parameterDefaults = NIL; @@ -335,8 +356,16 @@ static void examine_parameter_list(List* parameters, Oid languageOid, const char AclResult aclresult; char* objname = NULL; objname = strVal(linitial(t->names)); - typtup = LookupTypeName(NULL, t, NULL); - if (!HeapTupleIsValid(typtup)) { + if (enable_plpgsql_gsdependency()) { + typtup = LookupTypeName(NULL, t, NULL, true, (*type_depend_extend) + i); + if (NULL != has_undefined && !*has_undefined && (*type_depend_extend)[i].dependUndefined) { + *has_undefined = true; + } + } else { + typtup = LookupTypeName(NULL, t, NULL); + } + int typ_tup_status = GetTypeTupStatus(typtup); + if (NormalTypeTup != typ_tup_status) { toid = findPackageParameter(objname); } /* @@ -348,6 +377,15 @@ static void examine_parameter_list(List* parameters, Oid languageOid, const char (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("argument type '%s' must be in installation group", TypeNameToString(t)))); } + + if (enable_plpgsql_gsdependency() && UndefineTypeTup == typ_tup_status) { + if (OidIsValid(toid)) { + gsplsql_delete_unrefer_depend_obj_oid((*type_depend_extend)[i].undefDependObjOid, false); + (*type_depend_extend)[i].undefDependObjOid = InvalidOid; + ReleaseSysCache(typtup); + typtup = NULL; + } + } if (typtup) { if (!((Form_pg_type)GETSTRUCT(typtup))->typisdefined) { @@ -1006,6 +1044,8 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid namespaceId = InvalidOid; AclResult aclresult; oidvector* parameterTypes = NULL; + TypeDependExtend* param_type_depend_ext = NULL; + TypeDependExtend* ret_type_depend_ext = NULL; ArrayType* allParameterTypes = NULL; ArrayType* parameterModes = NULL; ArrayType* parameterNames = NULL; @@ -1199,25 +1239,49 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, * Convert remaining parameters of CREATE to form wanted by * ProcedureCreate. */ - examine_parameter_list(stmt->parameters, languageOid, queryString, ¶meterTypes, &allParameterTypes, - ¶meterModes, ¶meterNames, ¶meterDefaults, &requiredResultType, &defargpos, fenced); - - prodefaultargpos = GetDefaultArgPos(defargpos); - - if (stmt->returnType) { - /* explicit RETURNS clause */ - compute_return_type(stmt->returnType, languageOid, &prorettype, &returnsSet, fenced, stmt->startLineNumber); - } else if (OidIsValid(requiredResultType)) { - /* default RETURNS clause from OUT parameters */ - prorettype = requiredResultType; - returnsSet = false; + if (stmt->isOraStyle) { + set_function_style_a(); } else { - ereport( - ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("function result type must be specified"))); - /* Alternative possibility: default to RETURNS VOID */ - prorettype = VOIDOID; - returnsSet = false; + set_function_style_pg(); + } + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + Oid old_curr_object_nspoid = u_sess->plsql_cxt.curr_object_nspoid; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + u_sess->plsql_cxt.curr_object_nspoid = namespaceId; + examine_parameter_list(stmt->parameters, languageOid, queryString, ¶meterTypes, ¶m_type_depend_ext, &allParameterTypes, + ¶meterModes, ¶meterNames, ¶meterDefaults, &requiredResultType, &defargpos, fenced); + + prodefaultargpos = GetDefaultArgPos(defargpos); + + if (stmt->returnType) { + /* explicit RETURNS clause */ + InstanceTypeNameDependExtend(&ret_type_depend_ext); + compute_return_type(stmt->returnType, languageOid, &prorettype, &returnsSet, fenced, stmt->startLineNumber, + ret_type_depend_ext, false); + } else if (OidIsValid(requiredResultType)) { + /* default RETURNS clause from OUT parameters */ + InstanceTypeNameDependExtend(&ret_type_depend_ext); + prorettype = requiredResultType; + returnsSet = false; + } else { + ereport( + ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("function result type must be specified"))); + /* Alternative possibility: default to RETURNS VOID */ + prorettype = VOIDOID; + returnsSet = false; + } + set_create_plsql_type(oldCreatePlsqlType); + u_sess->plsql_cxt.curr_object_nspoid = old_curr_object_nspoid; + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + u_sess->plsql_cxt.curr_object_nspoid = old_curr_object_nspoid; + PG_RE_THROW(); } + PG_END_TRY(); if (returnsSet) { Oid typerelid = typeidTypeRelid(prorettype); @@ -1296,15 +1360,20 @@ ObjectAddress CreateFunction(CreateFunctionStmt* stmt, const char* queryString, package, proIsProcedure, stmt->inputHeaderSrc, - stmt->isPrivate); + stmt->isPrivate, + param_type_depend_ext, + ret_type_depend_ext, + stmt); CreateFunctionComment(address.objectId, functionOptions); - + pfree_ext(param_type_depend_ext); + pfree_ext(ret_type_depend_ext); u_sess->plsql_cxt.procedure_start_line = 0; u_sess->plsql_cxt.procedure_first_line = 0; u_sess->plsql_cxt.isCreateFunction = false; if (u_sess->plsql_cxt.debug_query_string != NULL && !OidIsValid(pkg_oid)) { pfree_ext(u_sess->plsql_cxt.debug_query_string); + u_sess->plsql_cxt.has_error = false; } return address; } @@ -1587,9 +1656,8 @@ void RemoveFunctionById(Oid funcOid) Form_pg_proc procedureStruct = (Form_pg_proc)GETSTRUCT(tup); isagg = procedureStruct->proisagg; - -#ifdef ENABLE_MOT char* funcName = pstrdup(NameStr(procedureStruct->proname)); +#ifdef ENABLE_MOT bool isNull = false; Datum prokindDatum = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_prokind, &isNull); bool proIsProcedure = isNull ? false : PROC_IS_PRO(CharGetDatum(prokindDatum)); @@ -1601,12 +1669,24 @@ void RemoveFunctionById(Oid funcOid) PrepareCFunctionLibrary(tup); } +#ifndef ENABLE_MULTIPLE_NODES + GsDependObjDesc func_head_obj; + if (t_thrd.proc->workingVersionNum >= SUPPORT_GS_DEPENDENCY_VERSION_NUM) { + Oid pro_namespace = procedureStruct->pronamespace; + bool is_null; + Datum pro_package_id_datum = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_packageid, &is_null); + Oid proc_packageid = DatumGetObjectId(pro_package_id_datum); + func_head_obj = gsplsql_construct_func_head_obj(funcOid, pro_namespace, proc_packageid); + } +#endif + simple_heap_delete(relation, &tup->t_self); ReleaseSysCache(tup); heap_close(relation, RowExclusiveLock); + CacheInvalidateFunction(funcOid, InvalidOid); /* * If there's a pg_aggregate tuple, delete that too. */ @@ -1639,6 +1719,21 @@ void RemoveFunctionById(Oid funcOid) } pfree_ext(funcName); #endif +#ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum >= SUPPORT_GS_DEPENDENCY_VERSION_NUM) { + CommandCounterIncrement(); + func_head_obj.type = GSDEPEND_OBJECT_TYPE_PROCHEAD; + gsplsql_remove_dependencies_object(&func_head_obj); + func_head_obj.refPosType = GSDEPEND_REFOBJ_POS_IN_PROCALL; + gsplsql_remove_gs_dependency(&func_head_obj); + if (enable_plpgsql_gsdependency_guc()) { + func_head_obj.name = funcName; + func_head_obj.type = GSDEPEND_OBJECT_TYPE_FUNCTION; + gsplsql_remove_ref_dependency(&func_head_obj); + } + free_gs_depend_obj_desc(&func_head_obj); + } +#endif } /* * Guts of function deletion. @@ -1682,6 +1777,25 @@ void RemovePackageById(Oid pkgOid, bool isBody) HeapTuple pkgtup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkgOid)); if (!HeapTupleIsValid(pkgtup)) /* should not happen */ ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for package %u", pkgOid))); +#ifndef ENABLE_MULTIPLE_NODES + GsDependObjDesc pkg; + if (t_thrd.proc->workingVersionNum >= SUPPORT_GS_DEPENDENCY_VERSION_NUM) { + bool is_null; + Datum schema_name_datum = SysCacheGetAttr(PACKAGEOID, pkgtup, Anum_gs_package_pkgnamespace, &is_null); + pkg.schemaName = get_namespace_name(DatumGetObjectId(schema_name_datum)); + Datum pkg_name_datum = SysCacheGetAttr(PACKAGEOID, pkgtup, Anum_gs_package_pkgname, &is_null); + pkg.packageName = pstrdup(NameStr(*DatumGetName(pkg_name_datum))); + pkg.name = NULL; + pkg.type = GSDEPEND_OBJECT_TYPE_INVALID; + if (isBody) { + pkg.type = GSDEPEND_OBJECT_TYPE_PKG_BODY; + pkg.refPosType = GSDEPEND_REFOBJ_POS_IN_PKGBODY; + } else { + pkg.type = GSDEPEND_OBJECT_TYPE_PKG; + pkg.refPosType = GSDEPEND_REFOBJ_POS_IN_PKGALL_OBJ; + } + } +#endif if (!isBody) { /* if replace package specification,delete all function in this package first. @@ -1718,14 +1832,58 @@ void RemovePackageById(Oid pkgOid, bool isBody) heap_close(relation, RowExclusiveLock); + CacheInvalidateFunction(InvalidOid, pkgOid); /* * If there's a pg_aggregate tuple, delete that too. */ /* Recode time of delete package. */ if (pkgOid != InvalidOid) { - DeletePgObject(pkgOid, OBJECT_TYPE_PKGSPEC); + if (isBody) { + DeletePgObject(pkgOid, OBJECT_TYPE_PKGSPEC); + } else { + DeletePgObject(pkgOid, OBJECT_TYPE_PKGSPEC); + DeletePgObject(pkgOid, OBJECT_TYPE_PKGBODY); + } +#ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum >= SUPPORT_GS_DEPENDENCY_VERSION_NUM) { + CommandCounterIncrement(); + gsplsql_remove_gs_dependency(&pkg); + if (enable_plpgsql_gsdependency_guc()) { + gsplsql_remove_ref_dependency(&pkg); + } + pfree_ext(pkg.packageName); + pfree_ext(pkg.schemaName); + } +#endif + } +} + +void DeleteFunctionByFuncTuple(HeapTuple proctup) +{ + Oid funcOid = InvalidOid; + if (HeapTupleIsValid(proctup)) { + funcOid = HeapTupleGetOid(proctup); + if (!OidIsValid(funcOid)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmodule(MOD_PLSQL), + errmsg("cache lookup failed for relid %u", funcOid))); + } + } else { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmodule(MOD_PLSQL), + errmsg("cache lookup failed for relid %u", funcOid))); } + (void)deleteDependencyRecordsFor(ProcedureRelationId, funcOid, true); + DeleteTypesDenpendOnPackage(ProcedureRelationId, funcOid); + /* the 'shared dependencies' also change when update. */ + deleteSharedDependencyRecordsFor(ProcedureRelationId, funcOid, 0); + + /* send invalid message for for relation holding replaced function as trigger */ + InvalidRelcacheForTriggerFunction(funcOid, ((Form_pg_proc)GETSTRUCT(proctup))->prorettype); + RemoveFunctionById(funcOid); } void DeleteFunctionByPackageOid(Oid package_oid) @@ -1741,29 +1899,7 @@ void DeleteFunctionByPackageOid(Oid package_oid) SysScanDesc scan = systable_beginscan(pg_proc_rel, InvalidOid, false, NULL, 1, &entry); while ((oldtup = systable_getnext(scan)) != NULL) { HeapTuple proctup = heap_copytuple(oldtup); - Oid funcOid = InvalidOid; - if (HeapTupleIsValid(proctup)) { - funcOid = HeapTupleGetOid(proctup); - if (!OidIsValid(funcOid)) { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmodule(MOD_PLSQL), - errmsg("cache lookup failed for relid %u", funcOid))); - } - } else { - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmodule(MOD_PLSQL), - errmsg("cache lookup failed for relid %u", funcOid))); - } - (void)deleteDependencyRecordsFor(ProcedureRelationId, funcOid, true); - DeleteTypesDenpendOnPackage(ProcedureRelationId, funcOid); - /* the 'shared dependencies' also change when update. */ - deleteSharedDependencyRecordsFor(ProcedureRelationId, funcOid, 0); - - /* send invalid message for for relation holding replaced function as trigger */ - InvalidRelcacheForTriggerFunction(funcOid, ((Form_pg_proc)GETSTRUCT(proctup))->prorettype); - RemoveFunctionById(funcOid); + DeleteFunctionByFuncTuple(proctup); heap_freetuple(proctup); } systable_endscan(scan); @@ -2142,6 +2278,202 @@ bool IsFunctionTemp(AlterFunctionStmt* stmt) return false; } +static inline void SetFuncValid(Oid func_oid, bool is_procedure) +{ + SetPgObjectValid(func_oid, OBJECT_TYPE_PROC, GetCurrCompilePgObjStatus()); + if (!GetCurrCompilePgObjStatus()) { + ereport(WARNING, (errmodule(MOD_PLSQL), + errmsg("%s %s recompile with compilation errors.", + is_procedure ? "Procedure" : "Functions", + get_func_name(func_oid)))); + } +} + +static inline bool CheckBeforeRecompile(Oid func_oid) +{ + if (OidIsValid(gsplsql_get_pkg_oid_by_func_oid(func_oid))) { + ereport(WARNING, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is a function in package", get_func_name(func_oid)), + errhint("Replace the ALTER FUNCTION with ALTER PACKAGE."))); + return false; + } + if (gsplsql_is_undefined_func(func_oid)) { + ereport(WARNING, (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("\"%s\" header is undefined, you can try to recreate.", get_func_name(func_oid)))); + return false; + } + return true; +} + +static void CheckIsTriggerAndAssign(Oid func_oid, FunctionCallInfo fcinfo, TriggerData* trigdata) +{ + + HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_oid)); + if (!HeapTupleIsValid(tuple)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errdetail("cache lookup failed for function %u", func_oid))); + } + Form_pg_proc proc = (Form_pg_proc)GETSTRUCT(tuple); + char functyptype = get_typtype(proc->prorettype); + if (functyptype == TYPTYPE_PSEUDO) { + if (proc->prorettype == TRIGGEROID || (proc->prorettype == OPAQUEOID && proc->pronargs == 0)) { + error_t rc = memset_s(&trigdata, sizeof(TriggerData), 0, sizeof(TriggerData)); + securec_check(rc, "", ""); + trigdata->type = T_TriggerData; + fcinfo->context = (Node*)trigdata; + } + } + ReleaseSysCache(tuple); +} + +void RecompileSingleFunction(Oid func_oid, bool is_procedure) +{ + FunctionCallInfoData fake_fcinfo; + FmgrInfo flinfo; + if (!CheckBeforeRecompile(func_oid)) { + return; + } + error_t rc = memset_s(&fake_fcinfo, sizeof(fake_fcinfo), 0, sizeof(fake_fcinfo)); + securec_check(rc, "", ""); + rc = memset_s(&flinfo, sizeof(flinfo), 0, sizeof(flinfo)); + securec_check(rc, "", ""); + + fake_fcinfo.flinfo = &flinfo; + fake_fcinfo.arg = (Datum*)palloc0(sizeof(Datum)); + fake_fcinfo.arg[0] = ObjectIdGetDatum(func_oid); + flinfo.fn_oid = func_oid; + flinfo.fn_mcxt = CurrentMemoryContext; + + _PG_init(); + PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); + int save_compile_status = getCompileStatus(); + bool save_need_create_depend = u_sess->plsql_cxt.need_create_depend; + u_sess->plsql_cxt.isCreateFunction = false; + u_sess->plsql_cxt.compile_has_warning_info = false; + int save_searchpath_stack = list_length(u_sess->catalog_cxt.overrideStack); + + PG_TRY(); + { + u_sess->plsql_cxt.createPlsqlType = CREATE_PLSQL_TYPE_RECOMPILE; + if (GetPgObjectValid(func_oid, OBJECT_TYPE_PROC)) { + u_sess->plsql_cxt.need_create_depend = false; + } else { + u_sess->plsql_cxt.need_create_depend = true; + } + SetCurrCompilePgObjStatus(true); + TriggerData trigdata; + CheckIsTriggerAndAssign(func_oid, &fake_fcinfo, &trigdata); + PLpgSQL_function* func = plpgsql_compile(&fake_fcinfo, true, true); + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; + if(func != NULL) { + SetFuncValid(func->fn_oid, is_procedure); + } + SetCurrCompilePgObjStatus(true); + if (enable_plpgsql_gsdependency_guc()) { + u_sess->plsql_cxt.createPlsqlType = CREATE_PLSQL_TYPE_END; + } + } + PG_CATCH(); + { + if (enable_plpgsql_gsdependency_guc()) { + u_sess->plsql_cxt.createPlsqlType = CREATE_PLSQL_TYPE_END; + } + SetCurrCompilePgObjStatus(true); + u_sess->plsql_cxt.curr_compile_context = save_compile_context; + u_sess->plsql_cxt.compile_status = save_compile_status; + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; + + clearCompileContextList(save_compile_list_length); + plpgsql_free_override_stack(save_searchpath_stack); + PG_RE_THROW(); + } + PG_END_TRY(); +} + +static bool IsNeedRecompile(Oid oid) +{ + bool is_null; + HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(oid)); + if (!HeapTupleIsValid(tuple)) { + return false; + } + Datum pro_lang_datum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_prolang, &is_null); + char* lang = get_language_name(DatumGetObjectId(pro_lang_datum)); + ReleaseSysCache(tuple); + return (!IsSystemObjOid(oid) && !IsMaskingFunctionOid(oid) && !IsRlsFunction(oid) && + strcasecmp(lang, "plpgsql") == 0); +} + +static inline void ReportRecompileFuncWarning(CompileStmt* stmt) +{ + ereport(WARNING, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("%s %s does not exist, if it is a stored %s, use ALTER %s.", + stmt->compileItem == COMPILE_FUNCTION ? "Functions" : "Procedure", + NameListToString(stmt->objName), + stmt->compileItem == COMPILE_FUNCTION ? "procedure" : "functions", + stmt->compileItem == COMPILE_FUNCTION ? "PROCEDURE" : "FUNCTION"))); +} + +static void RecompileFunctionWithArgs(CompileStmt* stmt) +{ + Oid func_oid = LookupFuncNameTypeNames(stmt->objName, stmt->funcArgs, false); + if (!OidIsValid(func_oid)) { + ReportRecompileFuncWarning(stmt); + return; + } + if (PROC_IS_FUNC(get_func_prokind(func_oid)) && stmt->compileItem == COMPILE_PROCEDURE) { + ReportRecompileFuncWarning(stmt); + } + if (PROC_IS_PRO(get_func_prokind(func_oid)) && stmt->compileItem == COMPILE_FUNCTION) { + ReportRecompileFuncWarning(stmt); + } + if (!IsNeedRecompile(func_oid)) { + RecompileSingleFunction(func_oid, stmt->compileItem == COMPILE_PROCEDURE); + return; + } + return; +} + +void RecompileFunction(CompileStmt* stmt) +{ + if (stmt->funcArgs != NULL) { + RecompileFunctionWithArgs(stmt); + return; + } + int num = 0; + FuncCandidateList clist = NULL; + StringInfoData err_string; + initStringInfo(&err_string); + clist = FuncnameGetCandidates(stmt->objName, -1, NULL, false, false, false, false, + stmt->compileItem == COMPILE_FUNCTION ? 'f' : 'p'); + if (clist == NULL) { + ReportRecompileFuncWarning(stmt); + return; + } + for (; clist; clist = clist->next) { + if (IsNeedRecompile(clist->oid) && !OidIsValid(gsplsql_get_pkg_oid_by_func_oid(clist->oid))) { + RecompileSingleFunction(clist->oid, stmt->compileItem == COMPILE_PROCEDURE); + num++; + appendStringInfoString(&err_string, format_procedure(clist->oid)); + if (clist->next != NULL) { + appendStringInfoString(&err_string, ","); + } else { + appendStringInfoString(&err_string, "."); + } + } + } + if (num > 1) { + ereport(NOTICE, + (errcode(ERRCODE_AMBIGUOUS_FUNCTION), + errmsg("Compile %d %s: %s", num, + stmt->compileItem == COMPILE_FUNCTION ? "functions" : "procedure", + err_string.data))); + } + FreeStringInfo(&err_string); +} + /* * Implements the ALTER FUNCTION utility command (except for the * RENAME and OWNER clauses, which are handled as part of the generic diff --git a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp index c52086b39..678de0ffd 100644 --- a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp @@ -83,6 +83,8 @@ #include "utils/rel_gs.h" #include "utils/syscache.h" #include "utils/snapmgr.h" +#include "catalog/gs_dependencies_fn.h" +#include "catalog/pg_object.h" /* result structure for get_rels_with_domain() */ typedef struct { @@ -644,6 +646,20 @@ ObjectAddress DefineType(List* names, List* parameters) */ void RemoveTypeById(Oid typeOid) { +#ifndef ENABLE_MULTIPLE_NODES + GsDependObjDesc ref_obj; + if (t_thrd.proc->workingVersionNum >= SUPPORT_GS_DEPENDENCY_VERSION_NUM) { + gsplsql_init_gs_depend_obj_desc(&ref_obj); + char relkind = get_rel_relkind(typeOid); + if (relkind == RELKIND_COMPOSITE_TYPE || relkind == '\0') { + ref_obj.name = NULL; + Oid elem_oid = get_array_internal_depend_type_oid(typeOid); + if (!OidIsValid(elem_oid)) { + gsplsql_get_depend_obj_by_typ_id(&ref_obj, typeOid, InvalidOid, true); + } + } + } +#endif Relation relation; HeapTuple tup; @@ -682,6 +698,37 @@ void RemoveTypeById(Oid typeOid) ReleaseSysCache(tup); heap_close(relation, RowExclusiveLock); +#ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum >= SUPPORT_GS_DEPENDENCY_VERSION_NUM && NULL != ref_obj.name) { + CommandCounterIncrement(); + ref_obj.refPosType = GSDEPEND_REFOBJ_POS_IN_TYPE; + gsplsql_remove_type_gs_dependency(&ref_obj); + if (enable_plpgsql_gsdependency_guc()) { + ref_obj.type = GSDEPEND_OBJECT_TYPE_TYPE; + (void)gsplsql_remove_ref_dependency(&ref_obj); + Oid pkg_oid = GetTypePackageOid(typeOid); + if (OidIsValid(pkg_oid)) { + bool invalid_pkg = true; + if (NULL != u_sess->plsql_cxt.curr_compile_context && + NULL != u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package) { + invalid_pkg = pkg_oid == + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; + } + if (invalid_pkg) { + bool is_spec = ref_obj.name[0] != '$'; + SetPgObjectValid(pkg_oid, is_spec ? OBJECT_TYPE_PKGSPEC : OBJECT_TYPE_PKGBODY, false); + if (is_spec) { + SetPgObjectValid(pkg_oid, OBJECT_TYPE_PKGBODY, false); + } + gsplsql_set_pkg_func_status(GetPackageNamespace(pkg_oid), pkg_oid, false); + } + } + } + pfree_ext(ref_obj.schemaName); + pfree_ext(ref_obj.packageName); + pfree_ext(ref_obj.name); + } +#endif } /* @@ -3283,6 +3330,14 @@ ObjectAddress RenameType(RenameStmt* stmt) } #endif + if (enable_plpgsql_gsdependency_guc() && + gsplsql_is_object_depend(typeOid, GSDEPEND_OBJECT_TYPE_TYPE)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The rename operator of %s is not allowed, because it is referenced by the other object.", + TypeNameToString(typname)))); + } + /* * If type is composite we need to rename associated pg_class entry too. * RenameRelationInternal will call RenameTypeInternal automatically. diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index d8df1a761..9e14ceaba 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -3545,14 +3545,12 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction) wflists, &needSecondLevelAgg, collectiveGroupExpr); -#if defined(ENABLE_MULTIPLE_NODES) || defined(USE_SPQ) - if (IS_SPQ_RUNNING) { - /* - * grouping_tlist was modified by build_groupingsets_plan, - * we have to change tlist at the same time. - */ - tlist = grouping_tlist; - } +#ifdef ENABLE_MULTIPLE_NODES + /* + * grouping_tlist was modified by build_groupingsets_plan, + * we have to change tlist at the same time. + */ + tlist = grouping_tlist; #endif /* Delete eq class expr after grouping */ delete_eq_member(root, tlist, collectiveGroupExpr); diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index a647d7ea9..fbfb8d7ee 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -63,6 +63,7 @@ #include "catalog/pg_proc.h" #include "catalog/gs_package.h" #include "catalog/pg_trigger.h" +#include "catalog/pg_type_fn.h" #include "commands/defrem.h" #include "commands/trigger.h" #ifdef ENABLE_MULTIPLE_NODES @@ -462,6 +463,7 @@ static void ParseUpdateMultiSet(List *set_target_list, SelectStmt *stmt, core_yy static char *GetTargetFuncArgTypeName(char *typeString, TypeName* t); static char *FormatFuncArgType(core_yyscan_t yyscanner, char *argsString, List* parameters); static char *ParseFunctionArgSrc(core_yyscan_t yyscanner); +static char *ParseFuncHeadSrc(core_yyscan_t yyscanner, bool isFunction = true); static void parameter_check_execute_direct(const char* query); static Node *make_node_from_scanbuf(int start_pos, int end_pos, core_yyscan_t yyscanner); static int64 SequenceStrGetInt64(const char *str); @@ -620,7 +622,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); DropForeignServerStmt DropUserMappingStmt ExplainStmt ExecDirectStmt FetchStmt GetDiagStmt GrantStmt GrantRoleStmt GrantDbStmt IndexStmt InsertStmt KillStmt ListenStmt LoadStmt LockStmt NotifyStmt ExplainableStmt PreparableStmt - CreateFunctionStmt CreateEventStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt + CreateFunctionStmt CreateEventStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt CompileStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt RemoveFuncStmt RemoveOperStmt RemovePackageStmt RenameStmt RevokeStmt RevokeRoleStmt RevokeDbStmt RuleActionStmt RuleActionStmtOrEmpty RuleStmt SecLabelStmt SelectStmt SelectStmtWithoutWithClause SignalResignalStmt TimeCapsuleStmt TransactionStmt TruncateStmt CallFuncStmt @@ -701,7 +703,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); %type opt_fields_options fields_list opt_lines_options lines_list expr_do_list %type opt_ignore_number opt_character fields_option lines_option conflict_option opt_do_language -%type opt_lock lock_type cast_context opt_wait opt_lock_for_b kill_opt +%type opt_lock lock_type cast_context opt_wait compile_pkg_opt opt_lock_for_b kill_opt %type vacuum_option_list vacuum_option_elem opt_verify_options %type opt_check opt_force opt_or_replace opt_grant_grant_option opt_grant_admin_option @@ -1163,7 +1165,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CATALOG_NAME CHAIN CHANGE CHANNEL CHAR_P CHARACTER CHARACTERISTICS CHARACTERSET CHARSET CHECK CHECKPOINT CHECKSUM CLASS CLASS_ORIGIN CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_NAME COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COLUMNS COMMENT COMMENTS COMMIT CONSISTENT - COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPLETION COMPRESS COMPRESSION CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRAINTS + COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPILE COMPLETE COMPLETION COMPRESS COMPRESSION CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRAINTS CONTAINS CONTENT_P CONTINUE_P CONTVIEW CONVERSION_P CONVERT CONNECT COORDINATOR COORDINATORS COPY COST CREATE CROSS CSN CSV CUBE CURRENT_P CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA @@ -1232,7 +1234,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); RESIGNAL RLIKE ROUTINE ROW_FORMAT SCHEMAS SAMPLE SAVEPOINT SCHEDULE SCHEMA SCHEMA_NAME SCROLL SEARCH SECONDARY_ENGINE_ATTRIBUTE SECOND_P SECOND_MICROSECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES SERIALIZABLE SERVER SESSION SESSION_USER SET SETS SETOF SHARE SHIPPABLE SHOW SHUTDOWN SIBLINGS SIGNAL SIGNED - SIMILAR SIMPLE SIZE SKIP SLAVE SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOUNDS SOURCE_P SPACE SPILL SPLIT SQLSTATE STABLE STACKED_P STANDALONE_P START STARTS STARTWITH + SIMILAR SIMPLE SIZE SKIP SLAVE SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOUNDS SOURCE_P SPACE SPECIFICATION SPILL SPLIT SQLSTATE STABLE STACKED_P STANDALONE_P START STARTS STARTWITH STATEMENT STATEMENT_ID STATISTICS STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STATUS STDIN STDOUT STORAGE STORE_P STORED STRATIFY STREAM STRICT_P STRIP_P SUBCLASS_ORIGIN SUBPARTITION SUBPARTITIONS SUBSCRIPTION SUBSTR SUBSTRING SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM_P SYS_REFCURSOR STARTING SQL_P @@ -1530,6 +1532,7 @@ stmt : | ClosePortalStmt | ClusterStmt | CommentStmt + | CompileStmt | ConstraintsSetStmt | CopyStmt | CreateAsStmt @@ -19417,6 +19420,7 @@ CreateFunctionStmt: CREATE opt_or_replace definer_user FUNCTION func_name_opt_arg proc_args RETURNS func_return createfunc_opt_list opt_definition { + set_function_style_pg(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); n->isOraStyle = false; n->isPrivate = false; @@ -19436,6 +19440,7 @@ CreateFunctionStmt: | CREATE opt_or_replace definer_user FUNCTION func_name_opt_arg proc_args RETURNS TABLE '(' table_func_column_list ')' createfunc_opt_list opt_definition { + set_function_style_pg(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); n->isOraStyle = false; n->isPrivate = false; @@ -19456,6 +19461,7 @@ CreateFunctionStmt: | CREATE opt_or_replace definer_user FUNCTION func_name_opt_arg proc_args createfunc_opt_list opt_definition { + set_function_style_pg(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); n->isOraStyle = false; n->isPrivate = false; @@ -19478,6 +19484,10 @@ CreateFunctionStmt: u_sess->parser_cxt.eaten_begin = false; pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true; u_sess->parser_cxt.isCreateFuncOrProc = true; + if (set_is_create_plsql_type()) { + set_create_plsql_type_start(); + set_function_style_a(); + } } subprogram_body { int rc = 0; @@ -19497,6 +19507,9 @@ CreateFunctionStmt: n->funcname = $5; n->parameters = $6; n->inputHeaderSrc = FormatFuncArgType(yyscanner, funSource->headerSrc, n->parameters); + if (enable_plpgsql_gsdependency_guc()) { + n->funcHeadSrc = ParseFuncHeadSrc(yyscanner); + } n->returnType = $8; n->options = $9; n->options = lappend(n->options, makeDefElem("as", @@ -20636,11 +20649,11 @@ CreateProcedureStmt: u_sess->parser_cxt.isCreateFuncOrProc = true; } subprogram_body { - int rc = 0; - rc = CompileWhich(); - if ((rc == PLPGSQL_COMPILE_PROC || rc == PLPGSQL_COMPILE_NULL) && u_sess->cmd_cxt.CurrentExtensionObject == InvalidOid) { - u_sess->plsql_cxt.procedure_first_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @8); - } + int rc = 0; + rc = CompileWhich(); + if ((rc == PLPGSQL_COMPILE_PROC || rc == PLPGSQL_COMPILE_NULL) && u_sess->cmd_cxt.CurrentExtensionObject == InvalidOid) { + u_sess->plsql_cxt.procedure_first_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @8); + } rc = CompileWhich(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); FunctionSources *funSource = (FunctionSources *)$10; @@ -20656,6 +20669,9 @@ CreateProcedureStmt: n->funcname = $5; n->parameters = $6; n->inputHeaderSrc = FormatFuncArgType(yyscanner, funSource->headerSrc, n->parameters); + if (enable_plpgsql_gsdependency_guc()) { + n->funcHeadSrc = ParseFuncHeadSrc(yyscanner, false); + } n->returnType = NULL; n->isProcedure = true; if (0 == count) @@ -20806,9 +20822,11 @@ CreateProcedureStmt: ; CreatePackageStmt: - CREATE opt_or_replace PACKAGE pkg_name invoker_rights as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;} + CREATE opt_or_replace PACKAGE pkg_name invoker_rights as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;set_function_style_a();} { - u_sess->plsql_cxt.package_as_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @6); + set_create_plsql_type_start(); + u_sess->plsql_cxt.need_create_depend = true; + u_sess->plsql_cxt.package_as_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @6); CreatePackageStmt *n = makeNode(CreatePackageStmt); char *pkgNameBegin = NULL; char *pkgNameEnd = NULL; @@ -20886,6 +20904,7 @@ CreatePackageStmt: } else { parser_yyerror("package spec is not ended correctly"); } + u_sess->plsql_cxt.isCreatePkg = false; } tok = YYLEX; } @@ -21205,8 +21224,10 @@ pkg_body_subprogram: { } ; CreatePackageBodyStmt: - CREATE opt_or_replace PACKAGE BODY_P pkg_name as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;} pkg_body_subprogram + CREATE opt_or_replace PACKAGE BODY_P pkg_name as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;set_function_style_a();} pkg_body_subprogram { + set_create_plsql_type_start(); + u_sess->plsql_cxt.need_create_depend = true; char *pkgNameBegin = NULL; char *pkgNameEnd = NULL; char *pkgName = NULL; @@ -21398,15 +21419,17 @@ param_name: type_function_name %prec lower_than_zerofill ; func_return: - func_type - { + func_type { + if (enable_plpgsql_gsdependency_guc()) { + pg_yyget_extra(yyscanner)->core_yy_extra.return_pos_end = yylloc; /* We can catch over-specified results here if we want to, * but for now better to silently swallow typmod, etc. * - thomas 2000-03-22 */ - $$ = $1; } - + $$ = $1; + } + ; /* @@ -22236,6 +22259,69 @@ opt_restrict: | /* EMPTY */ ; +compile_pkg_opt: + BODY_P {$$ = COMPILE_PKG_BODY;} + | PACKAGE {$$ = COMPILE_PACKAGE;} + | SPECIFICATION {$$ = COMPILE_PKG_SPECIFICATION;} + | /* EMPTY */ {$$ = COMPILE_PACKAGE;} + ; +CompileStmt: + ALTER PROCEDURE function_with_argtypes COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = ((FuncWithArgs*)$3)->funcname; + n->funcArgs = ((FuncWithArgs*)$3)->funcargs; + n->compileItem = COMPILE_PROCEDURE; + } + $$ = (Node*)n; + } + | ALTER PROCEDURE func_name_opt_arg COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = $3; + n->funcArgs = NULL; + n->compileItem = COMPILE_PROCEDURE; + } + $$ = (Node*)n; + } + | ALTER FUNCTION function_with_argtypes COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = ((FuncWithArgs*)$3)->funcname; + n->funcArgs = ((FuncWithArgs*)$3)->funcargs; + n->compileItem = COMPILE_FUNCTION; + } + $$ = (Node*)n; + } + | ALTER FUNCTION func_name_opt_arg COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = $3; + n->funcArgs = NULL; + n->compileItem = COMPILE_FUNCTION; + } + $$ = (Node*)n; + } + | ALTER PACKAGE pkg_name COMPILE compile_pkg_opt + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = $3; + n->funcArgs = NULL; + n->compileItem = (CompileEntry)$5; + } + $$ = (Node*)n; + } + ; /***************************************************************************** * @@ -37471,6 +37557,7 @@ unreserved_keyword_without_key: | COMMITTED | COMPATIBLE_ILLEGAL_CHARS | COMPLETE + | COMPILE | COMPLETION | COMPRESS | COMPRESSION @@ -37852,6 +37939,7 @@ unreserved_keyword_without_key: | SNAPSHOT | SOURCE_P | SPACE + | SPECIFICATION | SPILL | SPLIT | SQL_P @@ -39983,7 +40071,12 @@ static char *GetTargetFuncArgTypeName(char *typeString, TypeName* t) { Type typtup; Oid toid; - typtup = LookupTypeName(NULL, t, NULL, false); + TypeDependExtend* dependExtend = NULL; + if (enable_plpgsql_gsdependency()) { + InstanceTypeNameDependExtend(&dependExtend); + } + typtup = LookupTypeName(NULL, t, NULL, false, dependExtend); + pfree_ext(dependExtend); if (typtup) { toid = typeTypeId(typtup); @@ -40047,9 +40140,10 @@ static char *FormatFuncArgType(core_yyscan_t yyscanner, char *argsString, List* pfree(argsString); proc_header_len = proc_header_len; - yyextra->core_yy_extra.func_param_begin = 0; - yyextra->core_yy_extra.func_param_end = 0; - + if (!enable_plpgsql_gsdependency_guc()) { + yyextra->core_yy_extra.func_param_begin = 0; + yyextra->core_yy_extra.func_param_end = 0; + } return buf.data; } @@ -40082,6 +40176,32 @@ static char *ParseFunctionArgSrc(core_yyscan_t yyscanner) return proc_header_str; } +static char *ParseFuncHeadSrc(core_yyscan_t yyscanner, bool is_function) +{ + base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner); + int proc_header_src_end = 0; + char *proc_header_info = NULL; + if (is_function) { + proc_header_src_end = yyextra->core_yy_extra.return_pos_end - 1; + } else { + proc_header_src_end = yyextra->core_yy_extra.func_param_end + 1; + } + if (proc_header_src_end == 1) { + return proc_header_info; + } + yyextra->core_yy_extra.return_pos_end = 0; + yyextra->core_yy_extra.func_param_begin = 0; + yyextra->core_yy_extra.func_param_end = 0; + if (proc_header_src_end > 0) { + proc_header_info = (char*)palloc0(proc_header_src_end + 1); + errno_t rc = EOK; + rc = strncpy_s(proc_header_info, (proc_header_src_end + 1), yyextra->core_yy_extra.scanbuf, proc_header_src_end); + securec_check(rc, "\0", "\0"); + proc_header_info[proc_header_src_end] = '\0'; + } + return proc_header_info; +} + static void parameter_check_execute_direct(const char* query) { #ifndef ENABLE_MULTIPLE_NODES diff --git a/contrib/dolphin/plugin_parser/parse_type.cpp b/contrib/dolphin/plugin_parser/parse_type.cpp index 844424395..b6196fe74 100644 --- a/contrib/dolphin/plugin_parser/parse_type.cpp +++ b/contrib/dolphin/plugin_parser/parse_type.cpp @@ -52,6 +52,9 @@ #include "utils/pl_package.h" #include "catalog/gs_collation.h" #include "plugin_parser/parse_utilcmd.h" +#include "catalog/pg_object.h" +#include "catalog/gs_dependencies_fn.h" +#include "catalog/pg_type_fn.h" #ifdef DOLPHIN #include "miscadmin.h" #include "commands/typecmds.h" @@ -89,13 +92,35 @@ Oid LookupPctTypeInPackage(RangeVar* rel, Oid pkgOid, const char* field) } } +Type LookupTypeNameSupportUndef(ParseState *pstate, const TypeName *typeName, int32 *typmod_p, bool print_notice) +{ + Type typtup = NULL; + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + TypeDependExtend* dependExt = NULL; + InstanceTypeNameDependExtend(&dependExt); + typtup = LookupTypeName(pstate, typeName, typmod_p, print_notice, dependExt); + pfree_ext(dependExt); + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + PG_RE_THROW(); + } + PG_END_TRY(); + set_create_plsql_type(oldCreatePlsqlType); + return typtup; +} + /* * LookupTypeName * Wrapper for typical case. */ -Type LookupTypeName(ParseState *pstate, const TypeName *typeName, int32 *typmod_p, bool print_notice) +Type LookupTypeName(ParseState *pstate, const TypeName *typeName, int32 *typmod_p, bool print_notice, TypeDependExtend* dependExtend) { - return LookupTypeNameExtended(pstate, typeName, typmod_p, true, print_notice); + return LookupTypeNameExtended(pstate, typeName, typmod_p, true, print_notice, dependExtend); } /* @@ -126,14 +151,16 @@ Type LookupTypeName(ParseState *pstate, const TypeName *typeName, int32 *typmod_ * pstate is only used for error location info, and may be NULL. */ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool temp_ok, - bool print_notice) + bool print_notice, TypeDependExtend* dependExtend) { Oid typoid = InvalidOid; - HeapTuple tup; + HeapTuple tup = NULL; int32 typmod = -1; Oid pkgOid = InvalidOid; bool notPkgType = false; - + char* schemaname = NULL; + char* typeName = NULL; + char* pkgName = NULL; if (typname->names == NIL) { /* We have the OID already if it's an internally generated TypeName */ typoid = typname->typeOid; @@ -146,10 +173,12 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* char* pkgName = NULL; char* schemaName = NULL; /* deconstruct the name list */ + int typTupStatus = InvalidTypeTup; switch (list_length(typname->names)) { case 1: tup = getPLpgsqlVarTypeTup(strVal(linitial(typname->names))); - if (HeapTupleIsValid(tup)) { + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return tup; } ereport(ERROR, @@ -159,16 +188,18 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* parser_errposition(pstate, typname->location))); break; case 2: - tup = FindPkgVariableType(pstate, typname, typmod_p); - if (HeapTupleIsValid(tup)) { + tup = FindPkgVariableType(pstate, typname, typmod_p, dependExtend); + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return (Type)tup; } rel->relname = strVal(linitial(typname->names)); field = strVal(lsecond(typname->names)); break; case 3: - tup = FindPkgVariableType(pstate, typname, typmod_p); - if (HeapTupleIsValid(tup)) { + tup = FindPkgVariableType(pstate, typname, typmod_p, dependExtend); + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return (Type)tup; } pkgName = strVal(linitial(typname->names)); @@ -182,8 +213,9 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* field = strVal(lthird(typname->names)); break; case 4: - tup = FindPkgVariableType(pstate, typname, typmod_p); - if (HeapTupleIsValid(tup)) { + tup = FindPkgVariableType(pstate, typname, typmod_p, dependExtend); + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return (Type)tup; } pkgName = strVal(lsecond(typname->names)); @@ -221,14 +253,38 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid, field); } if (!OidIsValid(relid)) { - relid = RangeVarGetRelidExtended(rel, NoLock, false, false, false, true, NULL, NULL, NULL, NULL); + if (enable_plpgsql_undefined()) { + relid = RangeVarGetRelidExtended(rel, NoLock, true, false, false, true, NULL, NULL, NULL, NULL); + if (!OidIsValid(relid) && HeapTupleIsValid(tup)) { + if (NULL != dependExtend) { + dependExtend->dependUndefined = true; + } + if (GetCurrCompilePgObjStatus() && + u_sess->plsql_cxt.functionStyleType != FUNCTION_STYLE_TYPE_REFRESH_HEAD) { + ereport(WARNING, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("TYPE %s does not exist in type.", rel->relname))); + } + InvalidateCurrCompilePgObj(); + return tup; + } + } else { + relid = RangeVarGetRelidExtended(rel, NoLock, false, false, false, true, NULL, NULL, NULL, NULL); + } } attnum = get_attnum(relid, field); if (attnum == InvalidAttrNumber) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", field, rel->relname), - parser_errposition(pstate, typname->location))); + if (enable_plpgsql_undefined()) { + if (NULL != dependExtend) { + dependExtend->dependUndefined = true; + } + return SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + } else { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", field, rel->relname), + parser_errposition(pstate, typname->location))); + } } typoid = get_atttype(relid, attnum); @@ -238,6 +294,16 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* typmod = get_atttypmod(relid, attnum); } + if (enable_plpgsql_undefined() && UndefineTypeTup == typTupStatus && NULL != dependExtend) { + gsplsql_delete_unrefer_depend_obj_oid(dependExtend->undefDependObjOid, false); + dependExtend->undefDependObjOid = InvalidOid; + ReleaseSysCache(tup); + tup = NULL; + } + if (enable_plpgsql_gsdependency() && NULL != dependExtend) { + dependExtend->typeOid = get_rel_type_id(relid); + } + /* If an array reference, return the array type instead */ if (typname->arrayBounds != NIL) { typoid = get_array_type(typoid); @@ -253,10 +319,6 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* } } else { /* Normal reference to a type name */ - char* schemaname = NULL; - char* typeName = NULL; - char* pkgName = NULL; - /* Handle %ROWTYPE reference to type of an existing table. */ if (typname->pct_rowtype) { RangeVar* relvar = NULL; @@ -284,14 +346,25 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* } Oid class_oid = RangeVarGetRelidExtended(relvar, NoLock, true, false, false, true, NULL, NULL); if (!OidIsValid(class_oid)) { - pfree_ext(relvar); /* if case: cursor%rowtype */ tup = getCursorTypeTup(strVal(linitial(typname->names))); if (HeapTupleIsValid(tup)) { return (Type)tup; } - - ereport(ERROR, + if (enable_plpgsql_undefined() && NULL != dependExtend) { + Oid undefRefObjOid = gsplsql_try_build_exist_schema_undef_table(relvar); + if (OidIsValid(undefRefObjOid)) { + dependExtend->undefDependObjOid = undefRefObjOid; + dependExtend->dependUndefined = true; + InvalidateCurrCompilePgObj(); + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + if (typmod_p != NULL) { + *typmod_p = -1; + } + } + } + pfree_ext(relvar); + ereport(NULL != tup ? WARNING : ERROR, (errmodule(MOD_PARSER), errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation does not exist when parse word."), @@ -299,6 +372,7 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* NameListToString(typname->names)), errcause("incorrectly referencing relation"), erraction("check the relation name for %%ROWTYPE"))); + return (Type)tup; } char relkind = get_rel_relkind(class_oid); /* onyl table is allowed for %ROWTYPE. */ @@ -346,13 +420,23 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* /* find type in current packgae first */ typoid = LookupTypeInPackage(typname->names, typeName); } - if (isPkgType) { - typoid = LookupTypeInPackage(typname->names, typeName, pkgOid); - } - if (!OidIsValid(typoid)) { - /* Unqualified type name, so search the search path */ - typoid = TypenameGetTypidExtended(typeName, temp_ok); - notPkgType = true; /* should also track type dependency, fix when refactoring */ + if (enable_plpgsql_gsdependency_guc()) { + if (isPkgType) { + typoid = LookupTypeInPackage(typname->names, typeName, pkgOid); + } else if (!OidIsValid(typoid)) { + /* Unqualified type name, so search the search path */ + typoid = TypenameGetTypidExtended(typeName, temp_ok); + notPkgType = true; /* should also track type dependency, fix when refactoring */ + } + } else { + if (isPkgType) { + typoid = LookupTypeInPackage(typname->names, typeName, pkgOid); + } + if (!OidIsValid(typoid)) { + /* Unqualified type name, so search the search path */ + typoid = TypenameGetTypidExtended(typeName, temp_ok); + notPkgType = true; /* should also track type dependency, fix when refactoring */ + } } } @@ -373,29 +457,58 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* if (typmod_p != NULL) { *typmod_p = -1; } - return NULL; - } - - /* Don't support the type in blacklist. */ - bool is_unsupported_type = !u_sess->attr.attr_common.IsInplaceUpgrade && IsTypeInBlacklist(typoid); - if (is_unsupported_type) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type %s is not yet supported.", format_type_be(typoid)))); - } + if (enable_plpgsql_undefined() && NULL != dependExtend) { + if (NULL != schemaname && NULL == pkgName && !OidIsValid(get_namespace_oid(schemaname, true))) { + pkgName = schemaname; + schemaname = NULL; + } + GsDependObjDesc objDesc; + objDesc.schemaName = schemaname; + char* activeSchemaName = NULL; + if (schemaname == NULL) { + activeSchemaName = get_namespace_name(get_compiled_object_nspoid()); + objDesc.schemaName = activeSchemaName; + } + objDesc.packageName = pkgName; + objDesc.name = typeName; + objDesc.type = GSDEPEND_OBJECT_TYPE_TYPE; + if (u_sess->plsql_cxt.functionStyleType != FUNCTION_STYLE_TYPE_REFRESH_HEAD) { + dependExtend->undefDependObjOid = gsplsql_flush_undef_ref_depend_obj(&objDesc); + } else { + dependExtend->undefDependObjOid = InvalidOid; + } + dependExtend->dependUndefined = true; + pfree_ext(activeSchemaName); + if (GetCurrCompilePgObjStatus() && + u_sess->plsql_cxt.functionStyleType != FUNCTION_STYLE_TYPE_REFRESH_HEAD) { + ereport(WARNING, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("Type %s does not exist.", typeName))); + } + InvalidateCurrCompilePgObj(); + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + } + } else { + /* Don't support the type in blacklist. */ + bool is_unsupported_type = !u_sess->attr.attr_common.IsInplaceUpgrade && IsTypeInBlacklist(typoid); + if (is_unsupported_type) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type %s is not yet supported.", format_type_be(typoid)))); + } - tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid)); + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid)); - /* should not happen */ - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", typoid))); - } - if (!typname->pct_type) { - typmod = typenameTypeMod(pstate, typname, (Type)tup); - } - if (typmod_p != NULL) { - *typmod_p = typmod; + /* should not happen */ + if (!HeapTupleIsValid(tup)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", typoid))); + } + if (!typname->pct_type) { + typmod = typenameTypeMod(pstate, typname, (Type)tup); + } + if (typmod_p != NULL) { + *typmod_p = typmod; + } } - return (Type)tup; } @@ -406,11 +519,11 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* * a suitable error message if the type cannot be found or is not defined. * Callers of this can therefore assume the result is a fully valid type. */ -Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p) +Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p, TypeDependExtend* dependExtend) { Type tup; - tup = LookupTypeName(pstate, typname, typmod_p); + tup = LookupTypeName(pstate, typname, typmod_p, true, dependExtend); /* * If the type is relation, then we check @@ -465,11 +578,11 @@ Oid typenameTypeId(ParseState* pstate, const TypeName* typname) * This is equivalent to typenameType, but we only hand back the type OID * and typmod, not the syscache entry. */ -void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p) +void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p, TypeDependExtend* dependExtend) { Type tup; - tup = typenameType(pstate, typname, typmod_p); + tup = typenameType(pstate, typname, typmod_p, dependExtend); *typeid_p = HeapTupleGetOid(tup); ReleaseSysCache(tup); } @@ -938,7 +1051,7 @@ static void pts_error_callback(void* arg) * such as "int4" or "integer" or "character varying(32)", parse * the string and convert it to a type OID and type modifier. */ -void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p) +void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p, TypeDependExtend* dependExtend) { StringInfoData buf; buf.data = NULL; @@ -1002,7 +1115,7 @@ void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p) goto fail; } - typenameTypeIdAndMod(NULL, typname, typeid_p, typmod_p); + typenameTypeIdAndMod(NULL, typname, typeid_p, typmod_p, dependExtend); pfree_ext(buf.data); @@ -1011,7 +1124,13 @@ void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p) fail: pfree_ext(buf.data); InsertErrorMessage("invalid type name", u_sess->plsql_cxt.plpgsql_yylloc); - ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid type name \"%s\"", str))); + if (enable_plpgsql_undefined()) { + InvalidateCurrCompilePgObj(); + *typeid_p = UNDEFINEDOID; + ereport(WARNING, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid type name \"%s\"", str))); + } else { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid type name \"%s\"", str))); + } } /* @@ -1480,8 +1599,31 @@ char* CastPackageTypeName(const char* typName, Oid objOid, bool isPackage, bool return castTypName.data; } +char* ParseTypeName(const char* typName, Oid pkgOid) +{ + if (!OidIsValid(pkgOid)) { + return NULL; + } + char* oldStr = NULL; + const int oldStrLen =12; + oldStr = (char*)palloc0(oldStrLen * sizeof(char)); + pg_ltoa(pkgOid, oldStr); + int len = strlen(oldStr); + char* pos = strstr((char*)typName, oldStr); + pfree_ext(oldStr); + if (NULL == pos) { + return NULL; + } + pos +=len; + if (*pos != '.') { + return NULL; + } + return pstrdup(++pos); +} + /* find if %type ref a package variable type */ -HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p) +HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p, + TypeDependExtend* depend_extend) { HeapTuple tup = NULL; @@ -1489,8 +1631,7 @@ HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32 return tup; #else int32 typmod = -1; - - if (u_sess->plsql_cxt.curr_compile_context == NULL) { + if (!enable_plpgsql_gsdependency_guc() && u_sess->plsql_cxt.curr_compile_context == NULL) { return tup; } @@ -1501,15 +1642,14 @@ HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32 } /* find package.var%TYPE second */ - if (list_length(typname->names) <= 1 || list_length(typname->names) >= 4) { + if (list_length(typname->names) <= 1) { + return tup; + } + if (list_length(typname->names) >= (enable_plpgsql_gsdependency_guc() ? 5 :4)) { return tup; } PLpgSQL_datum* datum = GetPackageDatum(typname->names); if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_VAR) { - if (OidIsValid(((PLpgSQL_var*)datum)->datatype->tableOfIndexType)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("not support ref table of variable as procedure argument type"))); - } Oid typOid = ((PLpgSQL_var*)datum)->datatype->typoid; tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typOid)); /* should not happen */ @@ -1521,6 +1661,19 @@ HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32 if (typmod_p != NULL) { *typmod_p = typmod; } + if (enable_plpgsql_gsdependency() && NULL != depend_extend) { + DeconstructQualifiedName(typname->names, &depend_extend->schemaName, + &depend_extend->objectName, &depend_extend->packageName); + } + } else if (enable_plpgsql_undefined() && NULL != depend_extend) { + Oid undefRefObjOid = gsplsql_try_build_exist_pkg_undef_var(typname->names); + if (OidIsValid(undefRefObjOid)) { + depend_extend->undefDependObjOid = undefRefObjOid; + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + if (typmod_p != NULL) { + *typmod_p = -1; + } + } } return tup; #endif @@ -1567,13 +1720,18 @@ Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid, Oid n /* pkgOid is invalid, try to find the type in current compile package */ if (!OidIsValid(pkgOid)) { - /* if not compiling packgae, just return invalid oid */ - if (u_sess->plsql_cxt.curr_compile_context == NULL || - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL) { - return typOid; + if (enable_plpgsql_gsdependency_guc() && + u_sess->plsql_cxt.functionStyleType == FUNCTION_STYLE_TYPE_REFRESH_HEAD && + OidIsValid(u_sess->plsql_cxt.currRefreshPkgOid)) { + pkgOid = u_sess->plsql_cxt.currRefreshPkgOid; + } else { + /* if not compiling packgae, just return invalid oid */ + if (u_sess->plsql_cxt.curr_compile_context == NULL || + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL) { + return typOid; + } + pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; } - - pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; /* find public package type first */ castTypeName = CastPackageTypeName(typeName, pkgOid, true, true); typOid = TypenameGetTypidExtended(castTypeName, false); @@ -1606,7 +1764,13 @@ Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid, Oid n pfree_ext(castTypeName); if (OidIsValid(typOid)) { - check_record_nest_tableof_index_type(NULL, typeNames); + bool pkgValid = true; + if (enable_plpgsql_gsdependency_guc()) { + pkgValid = GetPgObjectValid(pkgOid, OBJECT_TYPE_PKGSPEC); + } + if (pkgValid) { + // check_record_nest_tableof_index_type(NULL, typeNames); + } return typOid; } @@ -1681,6 +1845,14 @@ void check_type_supports_multi_charset(Oid typid, bool allow_array) } } +TypeTupStatus GetTypeTupStatus(Type typ) +{ + if (HeapTupleIsValid(typ)) { + return (UNDEFINEDOID == HeapTupleGetOid(typ) ? UndefineTypeTup : NormalTypeTup); + } + return InvalidTypeTup; +} + #ifdef DOLPHIN /* * DefineAnonymousEnum diff --git a/contrib/dolphin/plugin_parser/scan.l b/contrib/dolphin/plugin_parser/scan.l index 10d0db85a..603b3dbc6 100644 --- a/contrib/dolphin/plugin_parser/scan.l +++ b/contrib/dolphin/plugin_parser/scan.l @@ -1581,6 +1581,7 @@ scanner_init(const char *str, yyext->include_ora_comment = false; yyext->func_param_begin = 0; yyext->func_param_end = 0; + yyext->return_pos_end = 0; /* * Make a scan buffer with special termination needed by flex. diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y index eb8ee42fb..0366a66ee 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y +++ b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y @@ -18,6 +18,7 @@ #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/gs_package.h" +#include "catalog/gs_dependencies_fn.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_synonym.h" @@ -242,7 +243,7 @@ static Oid get_table_type(PLpgSQL_datum* datum); static Node* make_columnDef_from_attr(PLpgSQL_rec_attr* attr); static TypeName* make_typename_from_datatype(PLpgSQL_type* datatype); static Oid plpgsql_build_package_record_type(const char* typname, List* list, bool add2namespace); -static void plpgsql_build_package_array_type(const char* typname, Oid elemtypoid, char arraytype); +static void plpgsql_build_package_array_type(const char* typname, Oid elemtypoid, char arraytype, TypeDependExtend* dependExtend = NULL); static void plpgsql_build_package_refcursor_type(const char* typname); int plpgsql_yylex_single(void); static void get_datum_tok_type(PLpgSQL_datum* target, int* tok_flag); @@ -1345,6 +1346,9 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no parser_errposition(@5))); } } + if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_type_in_body_dependency($3); + } } pfree_ext(varname->name); } @@ -1488,7 +1492,7 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no plpgsql_build_varrayType($2->name, $2->lineno, $9, true); if (IS_PACKAGE) { - plpgsql_build_package_array_type($2->name, $9->typoid, TYPCATEGORY_ARRAY); + plpgsql_build_package_array_type($2->name, $9->typoid, TYPCATEGORY_ARRAY, $9->dependExtend); } pfree_ext($2->name); pfree($2); @@ -1642,7 +1646,7 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no } plpgsql_build_tableType($2->name, $2->lineno, $6, true); if (IS_PACKAGE) { - plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF); + plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF, $6->dependExtend); } pfree_ext($2->name); pfree($2); @@ -1813,9 +1817,9 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no if (IS_PACKAGE) { if ($10->typoid == VARCHAROID) { - plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_VARCHAR); + plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_VARCHAR, $6->dependExtend); } else { - plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_INTEGER); + plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_INTEGER, $6->dependExtend); } } pfree_ext($2->name); @@ -1960,6 +1964,11 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no } if (IS_PACKAGE) { newp->typoid = plpgsql_build_package_record_type($2->name, $6, true); + } else if (enable_plpgsql_gsdependency()) { + ListCell* cell = NULL; + foreach(cell, $6) { + gsplsql_build_gs_type_in_body_dependency(((PLpgSQL_rec_attr*)lfirst(cell))->type); + } } pfree_ext($2->name); pfree($2); @@ -6040,6 +6049,9 @@ cursor_variable : T_DATUM $1.ident), parser_errposition(@1))); } + if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_variable_dependency($1.idents); + } $$ = $1.dno; } | T_WORD @@ -8376,6 +8388,12 @@ static bool construct_cword(StringInfo ds, ArrayParseContext *context, int *tok, } else { yyerror("syntax error"); } + if (enable_plpgsql_gsdependency()) { + FuncCandidateList clist = FuncnameGetCandidates(idents, -1, NIL, false, false, true); + if (clist == NULL) { + gsplsql_build_gs_variable_dependency(idents); + } + } if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { int dno = -1; char *name = NameListToString(idents); @@ -8397,7 +8415,21 @@ static bool construct_cword(StringInfo ds, ArrayParseContext *context, int *tok, int curloc = yylloc; *tok = yylex(); plpgsql_push_back_token(*tok); - return construct_object_type(ds, context, makeTypeNameFromNameList(idents), tok, parenlevel, curloc, loc); + bool result; + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + result = construct_object_type(ds, context, makeTypeNameFromNameList(idents), tok, parenlevel, curloc, loc); + set_create_plsql_type(oldCreatePlsqlType); + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + PG_RE_THROW(); + } + PG_END_TRY(); + return result; } /* Convenience routine to read an expression with one possible terminator */ @@ -8976,6 +9008,9 @@ read_sql_construct6(int until, idents = yylval.wdatum.idents; int var_dno = yylval.wdatum.dno; + if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_variable_dependency(idents); + } if (type_flag == PLPGSQL_TOK_TABLE_VAR) { /* * table var name may be schema.pkg.table_var @@ -9688,16 +9723,20 @@ read_datatype(int tok) if (tok_is_keyword(tok, &yylval, K_TYPE, "type")) { + TypeDependExtend* typeDependExtend = NULL; + if (enable_plpgsql_gsdependency()) { + InstanceTypeNameDependExtend(&typeDependExtend); + } /* find val.col%TYPE first */ HeapTuple tup = NULL; int collectionType = PLPGSQL_COLLECTION_NONE; Oid tableOfIndexType = InvalidOid; int32 typMod = -1; - tup = FindRowVarColType(dtnames, &collectionType, &tableOfIndexType, &typMod); + tup = FindRowVarColType(dtnames, &collectionType, &tableOfIndexType, &typMod, typeDependExtend); if (tup != NULL) { Oid typOid = typeTypeId(tup); ReleaseSysCache(tup); - PLpgSQL_type* type = plpgsql_build_datatype(typOid, typMod, InvalidOid); + PLpgSQL_type* type = plpgsql_build_datatype(typOid, typMod, InvalidOid, typeDependExtend); if (OidIsValid(tableOfIndexType)) { type->collectionType = collectionType; type->tableOfIndexType = tableOfIndexType; @@ -9707,22 +9746,47 @@ read_datatype(int tok) /* find pkg.var%TYPE second */ PLpgSQL_datum* datum = GetPackageDatum(dtnames); - if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_VAR) { - PLpgSQL_var* var = (PLpgSQL_var*)datum; - Oid typOid = var->datatype->typoid; - int32 typmod = var->datatype->atttypmod; - Oid collation = var->datatype->collation; - int collectionType = var->datatype->collectionType; - Oid tableOfIndexType = var->datatype->tableOfIndexType; - - PLpgSQL_type* type = plpgsql_build_datatype(typOid, typmod, collation); - type->collectionType = collectionType; - type->tableOfIndexType = tableOfIndexType; - return type; + if (datum != NULL) { + if (datum->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = (PLpgSQL_var*)datum; + Oid typOid = var->datatype->typoid; + int32 typmod = var->datatype->atttypmod; + Oid collation = var->datatype->collation; + int collectionType = var->datatype->collectionType; + Oid tableOfIndexType = var->datatype->tableOfIndexType; + if (var->pkg != NULL && enable_plpgsql_gsdependency()) { + typeDependExtend->objectName = pstrdup(var->refname); + typeDependExtend->packageName = pstrdup(var->pkg->pkg_signature); + typeDependExtend->schemaName = get_namespace_name(var->pkg->namespaceOid); + } + PLpgSQL_type* type = plpgsql_build_datatype(typOid, typmod, collation, typeDependExtend); + type->collectionType = collectionType; + type->tableOfIndexType = tableOfIndexType; + return type; + } else if (datum->dtype == PLPGSQL_DTYPE_ROW){ + PLpgSQL_row* row = (PLpgSQL_row*)datum; + if (row->rowtupdesc && row->rowtupdesc->tdtypeid != RECORDOID && + OidIsValid(row->rowtupdesc->tdtypeid)) { + if (row->pkg != NULL && enable_plpgsql_gsdependency()) { + typeDependExtend->objectName = pstrdup(row->refname); + typeDependExtend->packageName = pstrdup(row->pkg->pkg_signature); + typeDependExtend->schemaName = get_namespace_name(row->pkg->namespaceOid); + } + return plpgsql_build_datatype(row->rowtupdesc->tdtypeid, -1, InvalidOid, typeDependExtend); + } + } } - result = plpgsql_parse_cwordtype(dtnames); + result = plpgsql_parse_cwordtype(dtnames, typeDependExtend); if (result) return result; + if (enable_plpgsql_undefined()) { + Oid tryUndefObjOid = gsplsql_try_build_exist_pkg_undef_var(dtnames); + if (OidIsValid(tryUndefObjOid)) { + typeDependExtend->undefDependObjOid = tryUndefObjOid; + typeDependExtend->dependUndefined = true; + return plpgsql_build_datatype(UNDEFINEDOID, -1, InvalidOid, typeDependExtend); + } + } } else if (tok_is_keyword(tok, &yylval, K_ROWTYPE, "rowtype")) @@ -12130,7 +12194,25 @@ parse_datatype(const char *string, int location) u_sess->plsql_cxt.plpgsql_yylloc = plpgsql_yylloc; /* Let the main parser try to parse it under standard SQL rules */ - parseTypeString(string, &type_id, &typmod); + TypeDependExtend* typeDependExtend = NULL; + if (enable_plpgsql_gsdependency()) { + InstanceTypeNameDependExtend(&typeDependExtend); + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + parseTypeString(string, &type_id, &typmod, typeDependExtend); + set_create_plsql_type(oldCreatePlsqlType); + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + PG_RE_THROW(); + } + PG_END_TRY(); + } else { + parseTypeString(string, &type_id, &typmod, typeDependExtend); + } (void)MemoryContextSwitchTo(oldCxt); @@ -12140,11 +12222,11 @@ parse_datatype(const char *string, int location) /* Okay, build a PLpgSQL_type data structure for it */ if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { - return plpgsql_build_datatype(type_id, typmod, 0); + return plpgsql_build_datatype(type_id, typmod, 0, typeDependExtend); } return plpgsql_build_datatype(type_id, typmod, - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_input_collation); + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_input_collation, typeDependExtend); } /* Build a arrary_type by elem_type. */ @@ -12519,6 +12601,9 @@ static PLpgSQL_type* build_type_from_record_var(int dno, int location) /* already build one, just use it */ if(IsPackageDependType(oldtypeoid, pkgoid)) { newtypeoid = oldtypeoid; + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(newtypeoid); + } } else { ereport(errstate, (errmodule(MOD_PLSQL), @@ -12558,6 +12643,9 @@ static PLpgSQL_type* build_type_from_record_var(int dno, int location) /* build dependency on created composite type. */ buildDependencyForCompositeType(newtypeoid); + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(newtypeoid); + } } /* build datatype of the created composite type. */ @@ -12595,6 +12683,7 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo Oid oldtypeoid = InvalidOid; Oid newtypeoid = InvalidOid; char* schamaName = NULL; + Oid pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; Oid pkgNamespaceOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->namespaceOid; if (OidIsValid(pkgNamespaceOid)) { schamaName = get_namespace_name(pkgNamespaceOid); @@ -12602,7 +12691,7 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo pkgNamespaceOid = getCurrentNamespace(); } char* casttypename = CastPackageTypeName(typname, - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid, true, + pkgOid, true, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->is_spec_compiling); if (strlen(casttypename) >= NAMEDATALEN ) { ereport(errstate, @@ -12617,10 +12706,14 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo } oldtypeoid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(casttypename), ObjectIdGetDatum(pkgNamespaceOid)); - if (OidIsValid(oldtypeoid)) { + bool oldTypeOidIsValid = OidIsValid(oldtypeoid); + if (oldTypeOidIsValid) { /* already build on, just use it */ - if(IsPackageDependType(oldtypeoid, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid)) { + if(IsPackageDependType(oldtypeoid, pkgOid)) { newtypeoid = oldtypeoid; + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(newtypeoid); + } } else { ereport(errstate, (errmodule(MOD_PLSQL), @@ -12667,6 +12760,9 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo CommandCounterIncrement(); pfree_ext(r); list_free_deep(codeflist); + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + gsplsql_build_ref_type_dependency(newtypeoid); + } } PLpgSQL_type *newtype = NULL; @@ -12682,7 +12778,7 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo return newtypeoid; } -static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid, char arraytype) +static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid, char arraytype, TypeDependExtend* dependExtend) { char typtyp; ObjectAddress myself, referenced; @@ -12703,8 +12799,25 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid pkgNamespaceOid = getCurrentNamespace(); } + Oid pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; Oid oldtypeoid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(casttypename), ObjectIdGetDatum(pkgNamespaceOid)); + bool oldtypeoidIsValid = OidIsValid(oldtypeoid); + if (enable_plpgsql_gsdependency() && u_sess->plsql_cxt.need_create_depend) { + char* schemaName = get_namespace_name(pkgNamespaceOid); + char* packageName = GetPackageName(pkgOid); + bool dependUndef = gsplsql_check_type_depend_undefined(schemaName, packageName, typname); + pfree_ext(schemaName); + pfree_ext(packageName); + if (dependUndef) { + ObjectAddress address; + address.classId = TypeRelationId; + address.objectId = oldtypeoid; + address.objectSubId = 0; + performDeletion(&address, DROP_CASCADE, PERFORM_DELETION_INTERNAL); + oldtypeoidIsValid = false; + } + } if (OidIsValid(oldtypeoid)) { /* alread build one, just return */ if(IsPackageDependType(oldtypeoid, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid)) { @@ -12723,7 +12836,9 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid if (arraytype == TYPCATEGORY_TABLEOF || arraytype == TYPCATEGORY_TABLEOF_VARCHAR || arraytype == TYPCATEGORY_TABLEOF_INTEGER) { - elemtypoid = get_array_type(elemtypoid); + if (UNDEFINEDOID != elemtypoid) { + elemtypoid = get_array_type(elemtypoid); + } typtyp = TYPTYPE_TABLEOF; } else { typtyp = TYPTYPE_BASE; @@ -12764,7 +12879,8 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid -1, /* typmod */ 0, /* array dimensions for typBaseType */ false, /* Type NOT NULL */ - get_typcollation(elemtypoid)); + get_typcollation(elemtypoid), + dependExtend); CommandCounterIncrement(); @@ -12777,6 +12893,7 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid pfree_ext(casttypename); } + static void plpgsql_build_package_refcursor_type(const char* typname) { CreateSynonymStmt stmt; @@ -12888,7 +13005,7 @@ static Node* make_columnDef_from_attr(PLpgSQL_rec_attr* attr) static TypeName* make_typename_from_datatype(PLpgSQL_type* datatype) { - return makeTypeNameFromOid(datatype->typoid, datatype->atttypmod); + return makeTypeNameFromOid(datatype->typoid, datatype->atttypmod, datatype->dependExtend); } /* diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp index b7dd47376..5da492123 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp @@ -47,6 +47,9 @@ #include "miscadmin.h" #include "tcop/tcopprot.h" #include "commands/event_trigger.h" +#include "catalog/gs_dependencies_fn.h" +#include "catalog/pg_object.h" +#include "catalog/pg_type_fn.h" #ifdef DOLPHIN #include "plugin_commands/mysqlmode.h" #endif @@ -131,7 +134,7 @@ static bool plpgsql_check_search_path(PLpgSQL_function* func, HeapTuple proc_tup return check_search_path_interface(func->fn_searchpath->schemas, proc_tup); } -PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator) +PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator, bool isRecompile) { Oid func_oid = fcinfo->flinfo->fn_oid; PLpgSQL_func_hashkey hashkey; @@ -141,6 +144,7 @@ PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator) #ifdef DOLPHIN bool recompile_due_to_strict = false; #endif + bool func_valid = true; /* * Lookup the pg_proc tuple by Oid; we'll need it in any case */ @@ -159,7 +163,25 @@ PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator) Datum pkgoiddatum = SysCacheGetAttr(PROCOID, proc_tup, Anum_pg_proc_packageid, &isnull); Oid packageOid = DatumGetObjectId(pkgoiddatum); Oid old_value = saveCallFromPkgOid(packageOid); - + if (enable_plpgsql_gsdependency_guc()) { + if (func == NULL) { + /* Compute hashkey using function signature and actual arg types */ + compute_function_hashkey(proc_tup, fcinfo, proc_struct, &hashkey, for_validator); + hashkey_valid = true; + /* And do the lookup */ + func = plpgsql_HashTableLookup(&hashkey); + } + /** + * only check for func need recompile or not, + */ + if (func_oid >= FirstNormalObjectId) { + func_valid = GetPgObjectValid(func_oid, OBJECT_TYPE_PROC); + } + if (!func_valid) { + fcinfo->flinfo->fn_extra = NULL; + } + } + recheck: if (func == NULL) { /* Compute hashkey using function signature and actual arg types */ @@ -170,6 +192,11 @@ recheck: func = plpgsql_HashTableLookup(&hashkey); } + if (!func_valid && func != NULL && !u_sess->plsql_cxt.need_create_depend && + !isRecompile && u_sess->SPI_cxt._connected >= 0 && !u_sess->plsql_cxt.during_compile) { + func->is_need_recompile = true; + } + if (func != NULL) { /* We have a compiled function, but is it still valid? */ #ifdef DOLPHIN @@ -177,7 +204,8 @@ recheck: proc_struct->pronamespace != PG_CATALOG_NAMESPACE); #endif if (func->fn_xmin == HeapTupleGetRawXmin(proc_tup) && - ItemPointerEquals(&func->fn_tid, &proc_tup->t_self) && plpgsql_check_search_path(func, proc_tup) + ItemPointerEquals(&func->fn_tid, &proc_tup->t_self) && plpgsql_check_search_path(func, proc_tup) && + !isRecompile && !func->is_need_recompile #ifdef DOLPHIN && !recompile_due_to_strict #endif @@ -253,9 +281,14 @@ recheck: */ PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; int save_compile_status = getCompileStatus(); + bool save_curr_status = GetCurrCompilePgObjStatus(); PG_TRY(); { + List* ref_obj_list = gsplsql_prepare_recompile_func(func_oid, proc_struct->pronamespace, packageOid, isRecompile); + SetCurrCompilePgObjStatus(true); func = do_compile(fcinfo, proc_tup, func, &hashkey, for_validator); + UpdateCurrCompilePgObjStatus(save_curr_status); + gsplsql_complete_recompile_func(ref_obj_list); (void)CompileStatusSwtichTo(save_compile_status); } PG_CATCH(); @@ -268,6 +301,7 @@ recheck: InsertError(func_oid); } #endif + SetCurrCompilePgObjStatus(save_compile_status); popToOldCompileContext(save_compile_context); (void)CompileStatusSwtichTo(save_compile_status); PG_RE_THROW(); @@ -613,6 +647,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, int* in_arg_varnos = NULL; PLpgSQL_variable** out_arg_variables; Oid pkgoid = InvalidOid; + Oid namespaceOid = InvalidOid; Oid* saved_pseudo_current_userId = NULL; char* signature = NULL; @@ -654,6 +689,8 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, /* Null prokind items are created when there is no procedure */ isFunc = true; } + Datum pronamespaceDatum = SysCacheGetAttr(PROCOID, proc_tup, Anum_pg_proc_pronamespace, &isnull); + namespaceOid = DatumGetObjectId(pronamespaceDatum); /* * Setup error traceback support for ereport() */ @@ -742,6 +779,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(curr_compile->compile_cxt); func->fn_signature = pstrdup(signature); func->is_private = BoolGetDatum(proisprivatedatum); + func->namespaceOid = namespaceOid; /* * if function belong to a package, it will use package search path. */ @@ -1223,8 +1261,63 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, * Now parse the function's text */ bool saved_flag = u_sess->plsql_cxt.have_error; - u_sess->plsql_cxt.have_error = false; - parse_rc = plpgsql_yyparse(); + ResourceOwnerData* oldowner = NULL; + int64 stackId = 0; + MemoryContext oldcxt; + volatile bool has_error = false; + if (enable_plpgsql_gsdependency_guc() && u_sess->plsql_cxt.isCreateFunction && !IsInitdb) { + oldowner = t_thrd.utils_cxt.CurrentResourceOwner; + oldcxt = CurrentMemoryContext; + SPI_savepoint_create("createFunction"); + stackId = u_sess->plsql_cxt.nextStackEntryId; + MemoryContextSwitchTo(oldcxt); + bool save_isPerform = u_sess->parser_cxt.isPerform; + PG_TRY(); + { + u_sess->parser_cxt.isPerform = false; + parse_rc = plpgsql_yyparse(); + u_sess->parser_cxt.isPerform = save_isPerform; + SPI_savepoint_release("createFunction"); + stp_cleanup_subxact_resource(stackId); + MemoryContextSwitchTo(oldcxt); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; + } + PG_CATCH(); + { + u_sess->parser_cxt.isPerform = save_isPerform; + SPI_savepoint_rollbackAndRelease("createFunction", InvalidTransactionId); + stp_cleanup_subxact_resource(stackId); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; + MemoryContextSwitchTo(oldcxt); + has_error = true; + ErrorData* edata = &t_thrd.log_cxt.errordata[t_thrd.log_cxt.errordata_stack_depth]; + ereport(WARNING, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("%s", edata->message), + errdetail("N/A"), + errcause("compile package or procedure error."), + erraction("check package or procedure error and redefine"))); + if (edata->sqlerrcode == ERRCODE_OUT_OF_LOGICAL_MEMORY) { + PG_RE_THROW(); + } + FlushErrorState(); + } + PG_END_TRY(); + }else { + bool save_isPerform = u_sess->parser_cxt.isPerform; + u_sess->parser_cxt.isPerform = false; + parse_rc = plpgsql_yyparse(); + u_sess->parser_cxt.isPerform = save_isPerform; + } + if (enable_plpgsql_gsdependency_guc() && has_error) { + plpgsql_scanner_finish(); + pfree_ext(proc_source); + PopOverrideSearchPath(); + u_sess->plsql_cxt.curr_compile_context = popCompileContext(); + clearCompileContext(curr_compile); + return NULL; + } #ifndef ENABLE_MULTIPLE_NODES if (u_sess->plsql_cxt.have_error && u_sess->attr.attr_common.plsql_show_all_error) { u_sess->plsql_cxt.have_error = false; @@ -1350,6 +1443,31 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, if (curr_compile->plpgsql_DumpExecTree) { plpgsql_dumptree(func); } + + if (enable_plpgsql_gsdependency_guc()) { + bool curr_compile_status = GetCurrCompilePgObjStatus(); + if (curr_compile_status) { + bool is_undefined = gsplsql_is_undefined_func(func->fn_oid); + func->isValid = !is_undefined; + + if (!func->isValid && u_sess->plsql_cxt.createPlsqlType == CREATE_PLSQL_TYPE_RECOMPILE) { + GsDependObjDesc obj = gsplsql_construct_func_head_obj(func->fn_oid, func->namespaceOid, func->pkg_oid); + obj.type = GSDEPEND_OBJECT_TYPE_PROCHEAD; + gsplsql_do_refresh_proc_header(&obj, &is_undefined); + } + + if (is_undefined && !u_sess->plsql_cxt.compile_has_warning_info) { + u_sess->plsql_cxt.compile_has_warning_info = true; + ereport(WARNING, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The header information of function %s is not defined.", NameStr(proc_struct->proname)))); + } + UpdateCurrCompilePgObjStatus(!is_undefined); + } else { + func->isValid = GetCurrCompilePgObjStatus(); + } + } else { + func->isValid = true; + } /* * add it to the hash table except specified function. */ @@ -2117,7 +2235,7 @@ void getTableofTypeFromVar(PLpgSQL_var* var, int* collectionType, Oid* tableofIn } } -HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofIndexType, int32* typMod) +HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofIndexType, int32* typMod, TypeDependExtend* dependExtend) { if (u_sess->plsql_cxt.curr_compile_context == NULL) { return NULL; @@ -2125,28 +2243,31 @@ HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofInd PLpgSQL_datum* datum = NULL; char* field = NULL; - + char* schemaName = NULL; + char* packageName = NULL; + char* objectName = NULL; /* find row var and field first */ switch (list_length(nameList)) { case 2: { + objectName = strVal(linitial(nameList)); datum = plpgsql_lookup_datum(false, strVal(linitial(nameList)), NULL, NULL, NULL); field = strVal(lsecond(nameList)); break; } case 3: { - char* word1 = strVal(linitial(nameList)); - char* word2 = strVal(lsecond(nameList)); - List *names2 = list_make2(makeString(word1), makeString(word2)); + packageName = strVal(linitial(nameList)); + objectName = strVal(lsecond(nameList)); + List *names2 = list_make2(makeString(packageName), makeString(objectName)); datum = GetPackageDatum(names2); list_free_ext(names2); field = strVal(lthird(nameList)); break; } case 4: { - char* word1 = strVal(linitial(nameList)); - char* word2 = strVal(lsecond(nameList)); - char* word3 = strVal(lthird(nameList)); - List *names3 = list_make3(makeString(word1), makeString(word2), makeString(word3)); + schemaName = strVal(linitial(nameList)); + packageName = strVal(lsecond(nameList)); + objectName = strVal(lthird(nameList)); + List *names3 = list_make3(makeString(schemaName), makeString(packageName), makeString(objectName)); datum = GetPackageDatum(names3); list_free_ext(names3); field = strVal(lfourth(nameList)); @@ -2196,6 +2317,12 @@ HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofInd return NULL; } + if (enable_plpgsql_gsdependency() && NULL != dependExtend) { + dependExtend->schemaName = schemaName; + dependExtend->packageName = packageName; + dependExtend->objectName = objectName; + } + HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typOid)); /* should not happen */ if (!HeapTupleIsValid(tup)) { @@ -3077,11 +3204,26 @@ PLpgSQL_type* plpgsql_parse_wordtype(char* ident) return NULL; } +static PLpgSQL_type* gsplsql_make_type_for_pkg_var_ref_type(GsDependObjDesc* obj, PLpgSQL_datum* datum, + TypeDependExtend* dependExtend) +{ + InstanceTypeNameDependExtend(&dependExtend); + dependExtend->schemaName = pstrdup(obj->schemaName); + dependExtend->packageName = pstrdup(obj->packageName); + dependExtend->objectName = pstrdup(obj->name); + PLpgSQL_var* var = (PLpgSQL_var*)datum; + PLpgSQL_type* type = plpgsql_build_datatype(var->datatype->typoid, var->datatype->atttypmod, + var->datatype->collation, dependExtend); + type->collectionType = var->datatype->collectionType; + type->tableOfIndexType = var->datatype->tableOfIndexType; + return type; +} + /* ---------- * plpgsql_parse_cwordtype Same lookup for compositeword%TYPE * ---------- */ -PLpgSQL_type* plpgsql_parse_cwordtype(List* idents) +PLpgSQL_type* plpgsql_parse_cwordtype(List* idents, TypeDependExtend* dependExtend) { PLpgSQL_type* dtype = NULL; PLpgSQL_nsitem* nse = NULL; @@ -3161,6 +3303,21 @@ PLpgSQL_type* plpgsql_parse_cwordtype(List* idents) goto done; } fldname = strVal(lthird(idents)); + } else if (enable_plpgsql_gsdependency_guc()) { + GsDependObjDesc objDesc; + Oid schemaOId = gsplsql_parse_pkg_var_obj4(&objDesc, idents); + if (!OidIsValid(schemaOId) || !OidIsValid(PackageNameGetOid(objDesc.packageName, schemaOId))) { + goto done; + } + List* new_var_name = list_make3(makeString(objDesc.schemaName), makeString(objDesc.packageName), + makeString(objDesc.name)); + PLpgSQL_datum* datum = GetPackageDatum(new_var_name); + list_free_ext(new_var_name); + if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_VAR) { + MemoryContextSwitchTo(old_cxt); + return gsplsql_make_type_for_pkg_var_ref_type(&objDesc, datum, dependExtend); + } + goto done; } else { goto done; } @@ -3202,6 +3359,13 @@ PLpgSQL_type* plpgsql_parse_cwordtype(List* idents) */ MemoryContextSwitchTo(old_cxt); dtype = build_datatype(type_tup, attr_struct->atttypmod, attr_struct->attcollation); + if (enable_plpgsql_gsdependency() && NULL != dtype) { + Oid typ_oid = get_rel_type_id(class_oid); + AssertEreport(InvalidOid != typ_oid, MOD_PLSQL, "all relation must have type"); + dtype->dependExtend = dependExtend; + InstanceTypeNameDependExtend(&dtype->dependExtend); + dtype->dependExtend->typeOid = typ_oid; + } MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); done: @@ -3233,20 +3397,45 @@ PLpgSQL_type* plpgsql_parse_wordrowtype(char* ident) * but no need to collect more errdetails. */ (void)RelnameGetRelidExtended(ident, &class_oid); - + Oid typ_oid = InvalidOid; + TypeDependExtend* dependExtend = NULL; if (!OidIsValid(class_oid)) { char message[MAXSTRLEN]; errno_t rc = 0; rc = sprintf_s(message, MAXSTRLEN, "relation \"%s\" does not exist when parse word.", ident); securec_check_ss(rc, "", ""); InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc, true); - ereport(ERROR, - (errmodule(MOD_PLSQL), - errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("relation \"%s\" does not exist when parse word.", ident))); + if (enable_plpgsql_undefined()) { + RangeVar rangvar; + rangvar.schemaname = NULL; + rangvar.relname = ident; + Oid undefRefObjOid = gsplsql_try_build_exist_schema_undef_table(&rangvar); + if (OidIsValid(undefRefObjOid)) { + InstanceTypeNameDependExtend(&dependExtend); + dependExtend->undefDependObjOid = undefRefObjOid; + dependExtend->dependUndefined = true; + typ_oid = UNDEFINEDOID; + ereport(WARNING, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation \"%s\" does not exist when parse word.", ident))); + } else { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation \"%s\" does not exist when parse word.", ident))); + } + } else { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation \"%s\" does not exist when parse word.", ident))); + } + } else { + typ_oid = get_rel_type_id(class_oid); } /* Build and return the row type struct */ - return plpgsql_build_datatype(get_rel_type_id(class_oid), -1, InvalidOid); + return plpgsql_build_datatype(typ_oid, -1, InvalidOid, dependExtend); } /* ---------- @@ -3260,23 +3449,64 @@ PLpgSQL_type* plpgsql_parse_cwordrowtype(List* idents) RangeVar* relvar = NULL; MemoryContext old_cxt = NULL; - if (list_length(idents) != 2) { + if (!enable_plpgsql_gsdependency_guc() && list_length(idents) != 2) { return NULL; } + switch (list_length(idents)) + { + case 1: + relvar = makeRangeVar(NULL, strVal(linitial(idents)), -1); + break; + case 2: + relvar = makeRangeVar(strVal(linitial(idents)), strVal(lsecond(idents)), -1); + break; + case 3: + relvar = makeRangeVar(strVal(lsecond(idents)), strVal(lthird(idents)), -1); + relvar->catalogname = strVal(linitial(idents)); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("improper %%ROWTYPE reference"))); + break; + } /* Avoid memory leaks in long-term function context */ old_cxt = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); /* Look up relation name. Can't lock it - we might not have privileges. */ - relvar = makeRangeVar(strVal(linitial(idents)), strVal(lsecond(idents)), -1); - + Oid typ_oid = InvalidOid; + TypeDependExtend* dependExtend = NULL; /* Here relvar is allowed to be a synonym object. */ - class_oid = RangeVarGetRelidExtended(relvar, NoLock, false, false, false, true, NULL, NULL); + if (!enable_plpgsql_undefined()) { + class_oid = RangeVarGetRelidExtended(relvar, NoLock, false, false, false, true, NULL, NULL); + pfree_ext(relvar); + typ_oid = get_rel_type_id(class_oid); + } else { + class_oid = RangeVarGetRelidExtended(relvar, NoLock, true, false, false, true, NULL, NULL); + typ_oid = get_rel_type_id(class_oid); + if (!OidIsValid(typ_oid) && enable_plpgsql_undefined()) { + Oid undefRefObjOid = gsplsql_try_build_exist_schema_undef_table(relvar); + pfree_ext(relvar); + if (OidIsValid(undefRefObjOid)) { + InstanceTypeNameDependExtend(&dependExtend); + dependExtend->undefDependObjOid = undefRefObjOid; + dependExtend->dependUndefined = true; + typ_oid = UNDEFINEDOID; + } + } + if (!OidIsValid(typ_oid) || UNDEFINEDOID == typ_oid) { + ereport((typ_oid == UNDEFINEDOID ? WARNING : ERROR), + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("relation does not exist when parse word."))); + } + } + MemoryContextSwitchTo(old_cxt); /* Build and return the row type struct */ - return plpgsql_build_datatype(get_rel_type_id(class_oid), -1, InvalidOid); + return plpgsql_build_datatype(typ_oid, -1, InvalidOid, dependExtend); } /* cursor generate a composite type, find its col type */ @@ -4012,7 +4242,7 @@ PLpgSQL_row* build_row_from_rec_type(const char* rowname, int lineno, PLpgSQL_re * If collation is not InvalidOid then it overrides the type's default * collation. But collation is ignored if the datatype is non-collatable. */ -PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation) +PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation, TypeDependExtend* type_depend_extend) { HeapTuple type_tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); if (!HeapTupleIsValid(type_tup)) { @@ -4045,7 +4275,11 @@ PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation) typ = build_datatype(type_tup, typmod, collation); } ReleaseSysCache(type_tup); - + if (enable_plpgsql_gsdependency() && NULL != typ) { + InstanceTypeNameDependExtend(&type_depend_extend); + typ->dependExtend = type_depend_extend; + typ->dependExtend->typeOid = typeOid; + } return typ; } @@ -4087,6 +4321,7 @@ PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation) case TYPTYPE_DOMAIN: case TYPTYPE_ENUM: case TYPTYPE_RANGE: + case TYPTYPE_UNDEFINE: #ifdef DOLPHIN case TYPTYPE_SET: #endif @@ -4120,6 +4355,7 @@ PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation) typ->typrelid = type_struct->typrelid; typ->typioparam = getTypeIOParam(type_tup); typ->collation = type_struct->typcollation; + typ->dependExtend = NULL; if (OidIsValid(collation) && OidIsValid(typ->collation)) { typ->collation = collation; } @@ -4481,7 +4717,7 @@ PLpgSQL_condition* plpgsql_parse_err_condition_b(const char* condname) if (prev == NULL) { prev = plpgsql_parse_err_condition_b_signal(condname); } - + return prev; } diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp index 447182f6d..4509c2c3c 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp @@ -43,6 +43,8 @@ #include "executor/spi_priv.h" #include "distributelayer/streamMain.h" #include "commands/event_trigger.h" +#include "catalog/pg_object.h" +#include "catalog/gs_dependencies_fn.h" #ifdef STREAMPLAN #include "optimizer/streamplan.h" @@ -56,6 +58,8 @@ PG_MODULE_MAGIC; #endif #define MAXSTRLEN ((1 << 11) - 1) +static void init_do_stmt(PLpgSQL_package *pkg, bool isCreate, ListCell *cell, int oldCompileStatus, + PLpgSQL_compile_context *curr_compile, List *temp_tableof_index, MemoryContext oldcxt); static void auditExecPLpgSQLFunction(PLpgSQL_function* func, AuditResult result) { char details[PGAUDIT_MAXLENGTH]; @@ -778,6 +782,8 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) #endif int connect = SPI_connectid(); Oid firstLevelPkgOid = InvalidOid; + bool save_need_create_depend = u_sess->plsql_cxt.need_create_depend; + bool save_curr_status = GetCurrCompilePgObjStatus(); PG_TRY(); { PGSTAT_START_PLSQL_TIME_RECORD(); @@ -788,7 +794,33 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) bool saved_current_stp_with_exception = plpgsql_get_current_value_stp_with_exception(); /* Find or compile the function */ if (func == NULL) { + u_sess->plsql_cxt.compile_has_warning_info = false; + SetCurrCompilePgObjStatus(true); + if (enable_plpgsql_gsdependency_guc()) { + if (gsplsql_is_undefined_func(func_oid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), + (errmsg("\"%s\" header is undefined, you can try to recreate", get_func_name(func_oid))))); + } + if (GetPgObjectValid(func_oid, OBJECT_TYPE_PROC)) { + u_sess->plsql_cxt.need_create_depend = false; + } else { + u_sess->plsql_cxt.need_create_depend = true; + } + } func = plpgsql_compile(fcinfo, false); + if (func == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_FUNCTION_PROVIDED), errmodule(MOD_PLSQL), + errmsg("compile function error."), + errdetail("It may be because the compilation encountered an error and the exception was caught."), + errcause("compile procedure error."), + erraction("compile function result is null, it has error"))); + } + if (enable_plpgsql_gsdependency_guc()) { + if (!OidIsValid(func->pkg_oid)) { + SetPgObjectValid(func_oid, OBJECT_TYPE_PROC, true); + } + } + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; } if (func->fn_readonly) { stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_IMMUTABLE); @@ -980,6 +1012,8 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) } PG_CATCH(); { + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; + SetCurrCompilePgObjStatus(save_curr_status); /* clean stp save pointer if the outermost function is end. */ if (u_sess->SPI_cxt._connected == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = NULL; @@ -1001,7 +1035,7 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) /* destory all the SPI connect created in this PL function. */ SPI_disconnect(connect); - + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; /* re-throw the original error messages */ ReThrowError(edata); } @@ -1031,6 +1065,7 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) u_sess->opt_cxt.is_stream_support = outer_is_stream_support; } #endif + UpdateCurrCompilePgObjStatus(save_curr_status); if (has_switch) { SetUserIdAndSecContext(old_user, save_sec_context); u_sess->exec_cxt.cast_owner = InvalidOid; @@ -1341,6 +1376,7 @@ Datum b_plpgsql_validator(PG_FUNCTION_ARGS) } ReleaseSysCache(tuple); + bool save_curr_status = GetCurrCompilePgObjStatus(); /* Postpone body checks if !u_sess->attr.attr_sql.check_function_bodies */ if (u_sess->attr.attr_sql.check_function_bodies) { FunctionCallInfoData fake_fcinfo; @@ -1378,12 +1414,14 @@ Datum b_plpgsql_validator(PG_FUNCTION_ARGS) /* Test-compile the function */ PG_TRY(); { + SetCurrCompilePgObjStatus(true); u_sess->parser_cxt.isCreateFuncOrProc = true; func = plpgsql_compile(&fake_fcinfo, true); u_sess->parser_cxt.isCreateFuncOrProc = false; } PG_CATCH(); { + SetCurrCompilePgObjStatus(save_curr_status); #ifndef ENABLE_MULTIPLE_NODES u_sess->parser_cxt.isPerform = false; bool insertError = (u_sess->attr.attr_common.plsql_show_all_error || @@ -1442,6 +1480,7 @@ Datum b_plpgsql_validator(PG_FUNCTION_ARGS) pl_validate_function_sql(func, replace); u_sess->ClientAuthInProgress = saved_client_auth; } + UpdateCurrCompilePgObjStatus(save_curr_status); } #ifndef ENABLE_MULTIPLE_NODES if (!IsInitdb && u_sess->plsql_cxt.isCreateFunction) { @@ -1581,7 +1620,7 @@ void FunctionInPackageCompile(PLpgSQL_package* pkg) * ---------- */ #ifndef ENABLE_MULTIPLE_NODES -void PackageInit(PLpgSQL_package* pkg, bool isCreate) +void PackageInit(PLpgSQL_package* pkg, bool isCreate, bool isSpec, bool isNeedCompileFunc) { if (likely(pkg != NULL)) { if (likely(pkg->isInit)) { @@ -1594,23 +1633,23 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) PushOverrideSearchPath(pkg->pkg_searchpath); ListCell* cell = NULL; int oldCompileStatus = getCompileStatus(); - if (isCreate) { - CompileStatusSwtichTo(COMPILIE_PKG); - } + CompileStatusSwtichTo(COMPILIE_PKG); PLpgSQL_compile_context* curr_compile = createCompileContext("PL/pgSQL package context"); SPI_NESTCOMPILE_LOG(curr_compile->compile_cxt); MemoryContext temp = NULL; - if (u_sess->plsql_cxt.curr_compile_context != NULL) { + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt != NULL) { temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); } u_sess->plsql_cxt.curr_compile_context = curr_compile; pushCompileContext(); curr_compile->plpgsql_curr_compile_package = pkg; checkCompileMemoryContext(pkg->pkg_cxt); + MemoryContext oldcxt = MemoryContextSwitchTo(pkg->pkg_cxt); if (isCreate) { int exception_num = 0; - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + curr_compile->compile_tmp_cxt = oldcxt; processPackageProcList(pkg); foreach(cell, pkg->proc_list) { @@ -1625,9 +1664,11 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) } PG_CATCH(); { + set_create_plsql_type_end(); if (u_sess->plsql_cxt.create_func_error) { u_sess->plsql_cxt.create_func_error = false; exception_num += 1; + FlushErrorState(); } else { PG_RE_THROW(); } @@ -1647,16 +1688,15 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) errcause("compile procedure error."), erraction("check procedure error and redefine procedure"))); } - (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); } else { - if (pkg->is_bodycompiled) { + if (pkg->is_bodycompiled && !isSpec && isNeedCompileFunc) { (void)CompileStatusSwtichTo(COMPILIE_PKG_FUNC); - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + curr_compile->compile_tmp_cxt = oldcxt; FunctionInPackageCompile(pkg); - (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); (void)CompileStatusSwtichTo(oldCompileStatus); } } + (void*)MemoryContextSwitchTo(oldcxt); if (u_sess->attr.attr_common.plsql_show_all_error) { PopOverrideSearchPath(); ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), @@ -1678,71 +1718,43 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); int save_compile_status = u_sess->plsql_cxt.compile_status; List* temp_tableof_index = NULL; + bool save_is_package_instantiation = u_sess->plsql_cxt.is_package_instantiation; + bool needExecDoStmt = true; + if (enable_plpgsql_undefined()) { + needExecDoStmt = GetCurrCompilePgObjStatus(); + } + ResourceOwnerData* oldowner = NULL; + int64 stackId = 0; + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + oldowner = t_thrd.utils_cxt.CurrentResourceOwner; + SPI_savepoint_create("PackageInit"); + stackId = u_sess->plsql_cxt.nextStackEntryId; + } + bool save_isPerform = u_sess->parser_cxt.isPerform; PG_TRY(); { u_sess->plsql_cxt.is_package_instantiation = true; - foreach(cell, pkg->proc_list) { - if (IsA(lfirst(cell), DoStmt)) { - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); - DoStmt* doStmt = (DoStmt*)lfirst(cell); - if (!isCreate) { - if (!doStmt->isExecuted) { - (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); - temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; - u_sess->plsql_cxt.func_tableof_index = NULL; - if (u_sess->SPI_cxt._connected > -1 && - u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { - SPI_STACK_LOG("begin", NULL, NULL); - _SPI_begin_call(false); - ExecuteDoStmt(doStmt, true); - SPI_STACK_LOG("end", NULL, NULL); - _SPI_end_call(false); - } else { - ExecuteDoStmt(doStmt, true); - } - if (!doStmt->isSpec) { - pkg->isInit = true; - - } - free_func_tableof_index(); - u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; - (void)CompileStatusSwtichTo(oldCompileStatus); - doStmt->isExecuted = true; - } - } else { - if (doStmt->isSpec && !doStmt->isExecuted) { - (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); - temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; - u_sess->plsql_cxt.func_tableof_index = NULL; - if (u_sess->SPI_cxt._connected > -1 && - u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { - SPI_STACK_LOG("begin", NULL, NULL); - _SPI_begin_call(false); - ExecuteDoStmt(doStmt, true); - SPI_STACK_LOG("end", NULL, NULL); - _SPI_end_call(false); - } else if (!doStmt->isExecuted) { - ExecuteDoStmt(doStmt, true); - } - free_func_tableof_index(); - u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; - (void)CompileStatusSwtichTo(oldCompileStatus); - doStmt->isExecuted = true; - } - } - (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); - } + if (needExecDoStmt) { + init_do_stmt(pkg, isCreate, cell, oldCompileStatus, curr_compile, temp_tableof_index, oldcxt); + } + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + SPI_savepoint_release("PackageInit"); + stp_cleanup_subxact_resource(stackId); + MemoryContextSwitchTo(oldcxt); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; } stp_reset_xact_state_and_err_msg(oldStatus, needResetErrMsg); - u_sess->plsql_cxt.is_package_instantiation = false; + u_sess->plsql_cxt.is_package_instantiation = save_is_package_instantiation; ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), errmsg("%s finish compile, level: %d", __func__, list_length(u_sess->plsql_cxt.compile_context_list)))); u_sess->plsql_cxt.curr_compile_context = popCompileContext(); CompileStatusSwtichTo(oldCompileStatus); clearCompileContext(curr_compile); + PopOverrideSearchPath(); } PG_CATCH(); { + u_sess->parser_cxt.isPerform = save_isPerform; stp_reset_xact_state_and_err_msg(oldStatus, needResetErrMsg); u_sess->plsql_cxt.is_package_instantiation = false; free_temp_func_tableof_index(temp_tableof_index); @@ -1753,10 +1765,35 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) u_sess->plsql_cxt.curr_compile_context = save_compile_context; u_sess->plsql_cxt.compile_status = save_compile_status; clearCompileContextList(save_compile_list_length); - PG_RE_THROW(); + u_sess->plsql_cxt.curr_compile_context = popCompileContext(); + /*avoid memeory leak*/ + clearCompileContext(curr_compile); + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + SPI_savepoint_rollbackAndRelease("PackageInit", InvalidTransactionId); + stp_cleanup_subxact_resource(stackId); + if (likely(u_sess->SPI_cxt._curid >= 0)) { + if (likely(u_sess->SPI_cxt._current == &(u_sess->SPI_cxt._stack[u_sess->SPI_cxt._curid]))) { + _SPI_end_call(true); + } + } + SPI_finish(); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; + MemoryContextSwitchTo(oldcxt); + ErrorData* edata = &t_thrd.log_cxt.errordata[t_thrd.log_cxt.errordata_stack_depth]; + ereport(WARNING, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("%s", edata->message), + errdetail("N/A"), + errcause("compile package or procedure error."), + erraction("check package or procedure error and redefine"))); + FlushErrorState(); + } else { + PG_RE_THROW(); + } } PG_END_TRY(); - PopOverrideSearchPath(); + MemoryContextSwitchTo(oldcxt); restoreCallFromPkgOid(old_value); } #endif @@ -1876,3 +1913,62 @@ void DecreasePackageUseCount(PLpgSQL_function* func) } } +static void init_do_stmt(PLpgSQL_package *pkg, bool isCreate, ListCell *cell, int oldCompileStatus, + PLpgSQL_compile_context *curr_compile, List *temp_tableof_index, MemoryContext oldcxt) +{ + foreach(cell, pkg->proc_list) { + if (IsA(lfirst(cell), DoStmt)) { + curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + DoStmt* doStmt = (DoStmt*)lfirst(cell); + if (!isCreate) { + if (!doStmt->isExecuted) { + (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); + temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; + u_sess->plsql_cxt.func_tableof_index = NULL; + if (u_sess->SPI_cxt._connected > -1 && + u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { + SPI_STACK_LOG("begin", NULL, NULL); + _SPI_begin_call(false); + ExecuteDoStmt(doStmt, true); + SPI_STACK_LOG("end", NULL, NULL); + _SPI_end_call(false); + } else { + ExecuteDoStmt(doStmt, true); + } + if (!doStmt->isSpec) { + pkg->isInit = true; + + } + free_func_tableof_index(); + u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; + (void)CompileStatusSwtichTo(oldCompileStatus); + doStmt->isExecuted = true; + } + } else { + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + MemoryContextSwitchTo(oldcxt); + } + if (doStmt->isSpec && !doStmt->isExecuted) { + (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); + temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; + u_sess->plsql_cxt.func_tableof_index = NULL; + if (u_sess->SPI_cxt._connected > -1 && + u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { + SPI_STACK_LOG("begin", NULL, NULL); + _SPI_begin_call(false); + ExecuteDoStmt(doStmt, true); + SPI_STACK_LOG("end", NULL, NULL); + _SPI_end_call(false); + } else if (!doStmt->isExecuted) { + ExecuteDoStmt(doStmt, true); + } + free_func_tableof_index(); + u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; + (void)CompileStatusSwtichTo(oldCompileStatus); + doStmt->isExecuted = true; + } + } + (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); + } + } +} diff --git a/contrib/dolphin/plugin_utility.cpp b/contrib/dolphin/plugin_utility.cpp index edcac0390..c95ef2930 100644 --- a/contrib/dolphin/plugin_utility.cpp +++ b/contrib/dolphin/plugin_utility.cpp @@ -472,6 +472,7 @@ static void check_xact_readonly(Node* parse_tree) case T_AlterDatabaseSetStmt: case T_AlterDomainStmt: case T_AlterFunctionStmt: + case T_CompileStmt: case T_AlterRoleSetStmt: case T_AlterObjectSchemaStmt: case T_AlterOwnerStmt: @@ -3384,13 +3385,21 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, #endif PG_TRY(); { + set_create_plsql_type_start(); + u_sess->plsql_cxt.isCreatePkg = true; CreatePackageCommand((CreatePackageStmt*)parse_tree, query_string); + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; } PG_CATCH(); { if (u_sess->plsql_cxt.debug_query_string) { pfree_ext(u_sess->plsql_cxt.debug_query_string); } + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; PG_RE_THROW(); } PG_END_TRY(); @@ -3404,13 +3413,21 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, #endif PG_TRY(); { + set_create_plsql_type_start(); + u_sess->plsql_cxt.isCreatePkg = true; CreatePackageBodyCommand((CreatePackageBodyStmt*)parse_tree, query_string); + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; } PG_CATCH(); { if (u_sess->plsql_cxt.debug_query_string) { pfree_ext(u_sess->plsql_cxt.debug_query_string); } + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; PG_RE_THROW(); } PG_END_TRY(); @@ -5734,10 +5751,15 @@ ProcessUtilitySlow(Node *parse_tree, { PG_TRY(); { + set_create_plsql_type_start(); address = CreateFunction((CreateFunctionStmt*)parse_tree, query_string, InvalidOid); + set_create_plsql_type_end(); + set_function_style_none(); } PG_CATCH(); { + set_create_plsql_type_end(); + set_function_style_none(); #ifndef ENABLE_MULTIPLE_NODES CreateFunctionStmt* stmt = (CreateFunctionStmt*)parse_tree; char* schemaname = NULL; @@ -5842,6 +5864,29 @@ ProcessUtilitySlow(Node *parse_tree, #endif } break; + case T_CompileStmt: + { + if (u_sess->SPI_cxt._connected == -1) { + plpgsql_hashtable_clear_invalid_obj(true); + } + u_sess->plsql_cxt.during_compile = true; + u_sess->plsql_cxt.isCreateFunction = true; + if (!enable_plpgsql_gsdependency_guc()) { + u_sess->plsql_cxt.during_compile = false; + ereport(ERROR, (errmsg("This operation is not supported."))); + break; + } + u_sess->plsql_cxt.is_alter_compile_stmt = true; + CompileStmt* tmpStmt = (CompileStmt*)parse_tree; + if (tmpStmt->compileItem == COMPILE_FUNCTION || tmpStmt->compileItem == COMPILE_PROCEDURE) { + RecompileFunction(tmpStmt); + } else { + RecompilePackage(tmpStmt); + } + u_sess->plsql_cxt.during_compile = false; + u_sess->plsql_cxt.is_alter_compile_stmt = false; + } break; + case T_IndexStmt: /* CREATE INDEX */ { IndexStmt* stmt = (IndexStmt*)parse_tree; @@ -8474,6 +8519,29 @@ static const char* AlterObjectTypeCommandTag(ObjectType obj_type) return tag; } +static const char* CompileTag(CompileEntry compileItem) +{ + const char* tag = NULL; + switch (compileItem) { + case COMPILE_PROCEDURE: + tag = "ALTER PROCEDURE"; + break; + case COMPILE_FUNCTION: + tag = "ALTER FUNCTION"; + break; + case COMPILE_PACKAGE: + tag = "ALTER PACKAGE"; + break; + case COMPILE_PKG_SPECIFICATION: + tag = "ALTER PACKAGE SPECIFICATION"; + break; + case COMPILE_PKG_BODY: + tag = "ALTER PACKAGE BODY"; + break; + } + return tag; +} + /* * CreateCommandTag * utility to get a string representation of the command operation, @@ -8930,6 +8998,17 @@ const char* CreateCommandTag(Node* parse_tree) tag = "ALTER FUNCTION"; break; + case T_CompileStmt: { + u_sess->plsql_cxt.during_compile = true; + if (!enable_plpgsql_gsdependency_guc()) { + u_sess->plsql_cxt.during_compile = false; + ereport(ERROR, (errmsg("This operation is not supported."))); + break; + } + CompileStmt* stmt = (CompileStmt*)parse_tree; + tag = CompileTag(stmt->compileItem); + } break; + case T_GrantStmt: { GrantStmt* stmt = (GrantStmt*)parse_tree; @@ -10122,6 +10201,7 @@ LogStmtLevel GetCommandLogLevel(Node* parse_tree) break; case T_AlterFunctionStmt: + case T_CompileStmt: case T_CreateEventStmt: case T_AlterEventStmt: case T_DropEventStmt: diff --git a/contrib/dolphin/plugin_utils/adt/json.cpp b/contrib/dolphin/plugin_utils/adt/json.cpp index 87ca14942..6f0862ba8 100644 --- a/contrib/dolphin/plugin_utils/adt/json.cpp +++ b/contrib/dolphin/plugin_utils/adt/json.cpp @@ -434,6 +434,8 @@ static void parse_object(JsonLexContext *lex, JsonSemAction *sem) json_struct_action oend = sem->object_end; JsonTokenType tok; + check_stack_depth(); + if (ostart != NULL) { (*ostart)(sem->semstate); } @@ -507,6 +509,8 @@ static void parse_array(JsonLexContext *lex, JsonSemAction *sem) json_struct_action astart = sem->array_start; json_struct_action aend = sem->array_end; + check_stack_depth(); + if (astart != NULL) { (*astart)(sem->semstate); } diff --git a/contrib/dolphin/plugin_utils/adt/regproc.cpp b/contrib/dolphin/plugin_utils/adt/regproc.cpp index 4c23c2608..4e023346f 100644 --- a/contrib/dolphin/plugin_utils/adt/regproc.cpp +++ b/contrib/dolphin/plugin_utils/adt/regproc.cpp @@ -41,6 +41,7 @@ #include "utils/syscache.h" #include "utils/snapmgr.h" #include "catalog/pg_proc_fn.h" +#include "catalog/pg_type_fn.h" static void parseNameAndArgTypes(const char* string, bool allowNone, List** names, int* nargs, Oid* argtypes); @@ -340,6 +341,39 @@ format_procedure_parts(Oid procedure_oid, List **objnames, List **objargs) } +char * format_procedure_no_visible(Oid procedure_oid) +{ + char* result = NULL; + HeapTuple proctup; + proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(procedure_oid)); + if (HeapTupleIsValid(proctup)) { + Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); + char* proname = NameStr(procform->proname); + StringInfoData buf; + initStringInfo(&buf); + appendStringInfo(&buf, "%s(", proname); + bool isNull = false; + Datum argTypeDtum = ProcedureGetAllArgTypes(proctup, &isNull); + oidvector* proargs = (oidvector*)PG_DETOAST_DATUM(argTypeDtum); + int nargs = proargs->dim1; + int i; + for (i = 0; i < nargs; i++) { + if (i > 0) + appendStringInfoChar(&buf, ','); + MakeTypeNamesStrForTypeOid(&buf, proargs->values[i]); + } + appendStringInfoChar(&buf, ')'); + result = buf.data; + ReleaseSysCache(proctup); + } else { + /* If OID doesn't match any pg_proc entry, return it numerically */ + result = (char*)palloc(NAMEDATALEN); + errno_t rc = snprintf_s(result, NAMEDATALEN, NAMEDATALEN - 1, "%u", procedure_oid); + securec_check_ss(rc, "\0", "\0"); + } + return result; +} + /* * Routine to produce regprocedure names; see format_procedure above. * diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index a8865fae6..bef6e3d2c 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -4680,7 +4680,7 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) char* p = NULL; bool isOraFunc = false; bool isDolphinStyle = false; - NameData* pkgname = NULL; + char* pkgname = NULL; initStringInfo(&buf); /* Look up the function */ @@ -4738,7 +4738,7 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) if (proIsProcedure) { if (pkgname != NULL) { appendStringInfo(&buf, "CREATE OR REPLACE PROCEDURE %s(", - quote_qualified_identifier(nsp, pkgname->data, name)); + quote_qualified_identifier(nsp, pkgname, name)); } else if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { appendStringInfo(&buf, "CREATE DEFINER = %s PROCEDURE %s(", GetUserNameFromId(proc->proowner), quote_qualified_identifier(nsp, name)); @@ -4750,7 +4750,7 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) } else { if (pkgname != NULL) { appendStringInfo(&buf, "CREATE OR REPLACE FUNCTION %s(", - quote_qualified_identifier(nsp, pkgname->data, name)); + quote_qualified_identifier(nsp, pkgname, name)); } else if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { appendStringInfo(&buf, "CREATE DEFINER = %s FUNCTION %s(", GetUserNameFromId(proc->proowner), quote_qualified_identifier(nsp, name)); @@ -12438,7 +12438,7 @@ static char* generate_function_name( int p_nvargs; Oid* p_true_typeids = NULL; Oid p_vatype; - NameData* pkgname = NULL; + char* pkgname = NULL; Datum pkgOiddatum; Oid pkgOid = InvalidOid; bool isnull = true; @@ -12494,7 +12494,7 @@ static char* generate_function_name( else nspname = get_namespace_name(procform->pronamespace); if (OidIsValid(pkgOid)) { - result = quote_qualified_identifier(nspname, pkgname->data, proname); + result = quote_qualified_identifier(nspname, pkgname, proname); } else { result = quote_qualified_identifier(nspname, proname); } diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 25df037f0..5d5c5b1eb 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -1204,6 +1204,26 @@ Datum unknownsend(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } +Datum undefinedin(PG_FUNCTION_ARGS) +{ + return unknownin(fcinfo); +} + +Datum undefinedout(PG_FUNCTION_ARGS) +{ + return unknownout(fcinfo); +} + +Datum undefinedrecv(PG_FUNCTION_ARGS) +{ + return unknownrecv(fcinfo); +} + +Datum undefinedsend(PG_FUNCTION_ARGS) +{ + return unknownsend(fcinfo); +} + static Datum text_length_huge(Datum str) { if (pg_database_encoding_max_length() == 1) { diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index e864bb227..c3ac6790b 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -178,6 +178,7 @@ #include "pgstat.h" #include "postmaster/rbcleaner.h" #include "catalog/gs_collation.h" +#include "catalog/gs_dependencies_fn.h" #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/utils/ts_relcache.h" #include "tsdb/common/ts_tablecmds.h" @@ -2121,6 +2122,7 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object bool relhasuids = false; Oid nspdefcoll = InvalidOid; Oid rel_coll_oid = InvalidOid; + List* depend_extend = NIL; /* * isalter is true, change the owner of the objects as the owner of the @@ -2657,6 +2659,14 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object } else ofTypeId = InvalidOid; + if (enable_plpgsql_gsdependency()) { + ListCell* cell = NULL; + foreach(cell, schema) { + ColumnDef* col_def = (ColumnDef*)lfirst(cell); + depend_extend = lappend(depend_extend, col_def->typname->dependExtend); + } + } + /* * Look up inheritance ancestors and generate relation schema, including * inherited attributes. @@ -2976,7 +2986,8 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object ceLst, storage_type, AccessShareLock, - typaddress); + typaddress, + depend_extend); if (bucketinfo != NULL) { pfree_ext(bucketinfo->bucketcol); pfree_ext(bucketinfo->bucketlist); @@ -3120,7 +3131,11 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object relation_close(rel, NoLock); list_free_ext(rawDefaults); list_free_ext(ceLst); - + if (enable_plpgsql_gsdependency_guc() && relkind != RELKIND_TOASTVALUE) { + if (CompileWhich() == PLPGSQL_COMPILE_NULL) { + (void)gsplsql_build_ref_type_dependency(get_rel_type_id(relationId)); + } + } return address; } @@ -6119,7 +6134,33 @@ ObjectAddress renameatt(RenameStmt* stmt) } TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); - + if (enable_plpgsql_gsdependency_guc()) { + Oid type_oid = get_rel_type_id(relid); + if (OidIsValid(type_oid)) { + GsDependObjDesc obj; + gsplsql_get_depend_obj_by_typ_id(&obj, type_oid, InvalidOid); + HeapTuple obj_tup = gsplsql_search_object(&obj, false); + if (HeapTupleIsValid(obj_tup)) { + heap_freetuple(obj_tup); + pfree_ext(obj.schemaName); + pfree_ext(obj.packageName); + pfree_ext(obj.name); + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("cannot rename attribute of the type because it is dependent on another object."))); + } + obj.refPosType = GSDEPEND_REFOBJ_POS_IN_TYPE; + bool exist_dep = gsplsql_exist_dependency(&obj); + pfree_ext(obj.schemaName); + pfree_ext(obj.packageName); + pfree_ext(obj.name); + if (exist_dep) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("cannot rename attribute of the type because it is dependent on another object."))); + } + } + } // Check relations's internal mask Relation rel = relation_open(relid, AccessShareLock); if ((((uint32)RelationGetInternalMask(rel)) & INTERNAL_MASK_DALTER)) @@ -6700,7 +6741,19 @@ ObjectAddress RenameRelation(RenameStmt* stmt) errdetail("%s table doesn't support this ALTER yet.", ISMLOG(relname) ? "mlog" : "matviewmap")))); } ReleaseSysCache(tuple); - + if (enable_plpgsql_gsdependency_guc()) { + bool exist_dep = false; + char rel_kind = get_rel_relkind(relid); + if (RELKIND_RELATION == rel_kind) { + exist_dep = gsplsql_is_object_depend(get_rel_type_id(relid), GSDEPEND_OBJECT_TYPE_TYPE); + } + if (exist_dep) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The rename operator on %s is not allowed, " + "because it is dependent on another object.", stmt->relation->relname))); + } + } TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); /* If table has history table, we need rename corresponding history table */ if (is_ledger_usertable(relid)) { @@ -7969,6 +8022,9 @@ void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt* stmt) if (stmt->cmds != NIL) { /* process 'ALTER TABLE' cmd */ ATController(stmt, rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt), lockmode); + if (enable_plpgsql_gsdependency_guc()) { + (void)gsplsql_build_ref_type_dependency(get_rel_type_id(relid)); + } } else { /* if do not call ATController, close the relation in here, but keep lock until commit */ relation_close(rel, NoLock); -- Gitee From 87aa397d4f52ade4c26f1207dc04d24808c45550 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Tue, 24 Oct 2023 18:51:29 +0800 Subject: [PATCH 030/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5=E5=86=85=E6=A0=B8?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/whale/expected/nlssort.out | 20 +- contrib/whale/include/builtin_funcs.ini | 59 ++- contrib/whale/include/plugin_catalog/heap.h | 4 +- .../whale/include/plugin_commands/defrem.h | 3 + contrib/whale/include/plugin_parser/kwlist.h | 2 + .../whale/include/plugin_parser/parse_type.h | 24 +- contrib/whale/include/plugin_parser/scanner.h | 1 + contrib/whale/include/plugin_utils/plpgsql.h | 137 ++++++- .../whale/plugin_optimizer/plan/planner.cpp | 97 +++++ contrib/whale/plugin_parser/gram.y | 156 +++++++- contrib/whale/plugin_parser/parse_type.cpp | 318 ++++++++++++---- contrib/whale/plugin_parser/scan.l | 1 + contrib/whale/plugin_pl/plpgsql/src/gram.y | 266 ++++++++++--- .../whale/plugin_pl/plpgsql/src/pl_comp.cpp | 352 ++++++++++++++---- .../plugin_pl/plpgsql/src/pl_handler.cpp | 256 +++++++++---- contrib/whale/plugin_utility.cpp | 89 ++++- contrib/whale/plugin_utils/adt/ruleutils.cpp | 20 +- contrib/whale/plugin_utils/adt/varlena.cpp | 131 +++++-- contrib/whale/tablecmds.cpp | 64 +++- 19 files changed, 1641 insertions(+), 359 deletions(-) diff --git a/contrib/whale/expected/nlssort.out b/contrib/whale/expected/nlssort.out index 648f5ab68..7cf6cb1c8 100644 --- a/contrib/whale/expected/nlssort.out +++ b/contrib/whale/expected/nlssort.out @@ -6,19 +6,19 @@ INSERT INTO test_sort VALUES ('red'), ('brown'), ('yellow'), ('Purple'); SELECT * FROM test_sort ORDER BY NLSSORT(name, 'en_US.utf8'); name -------- - Purple yellow - red brown + Purple + red (4 rows) SELECT * FROM test_sort ORDER BY NLSSORT(name, ''); name -------- - Purple yellow - red brown + Purple + red (4 rows) SELECT set_nls_sort('invalid'); @@ -40,10 +40,10 @@ SELECT set_nls_sort(''); SELECT * FROM test_sort ORDER BY NLSSORT(name); name -------- - Purple yellow - red brown + Purple + red (4 rows) SELECT set_nls_sort('en_US.utf8'); @@ -55,20 +55,20 @@ SELECT set_nls_sort('en_US.utf8'); SELECT * FROM test_sort ORDER BY NLSSORT(name); name -------- - Purple yellow - red brown + Purple + red (4 rows) INSERT INTO test_sort VALUES(NULL); SELECT * FROM test_sort ORDER BY NLSSORT(name); name -------- - Purple yellow - red brown + Purple + red (5 rows) diff --git a/contrib/whale/include/builtin_funcs.ini b/contrib/whale/include/builtin_funcs.ini index a2396fd20..88498b5bd 100644 --- a/contrib/whale/include/builtin_funcs.ini +++ b/contrib/whale/include/builtin_funcs.ini @@ -2500,6 +2500,10 @@ "dsqrt", 1, AddBuiltinFunc(_0(230), _1("dsqrt"), _2(1), _3(true), _4(false), _5(dsqrt), _6(701), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("dsqrt"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("square root"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "dss_io_stat", 1, + AddBuiltinFunc(_0(6990), _1("dss_io_stat"), _2(1), _3(true), _4(false), _5(dss_io_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, INT4OID), _21(4, INT4OID, INT8OID, INT8OID, INT4OID), _22(4, 'i', 'o', 'o', 'o'), _23(4, "duration", "read_kilobyte_per_sec", "write_kilobyte_per_sec", "io_times"), _24(NULL), _25("dss_io_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "dsynonym_init", 1, AddBuiltinFunc(_0(3728), _1("dsynonym_init"), _2(1), _3(true), _4(false), _5(dsynonym_init), _6(2281), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2281), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("dsynonym_init"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("(internal)"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -2926,6 +2930,10 @@ "float8_interval", 1, AddBuiltinFunc(_0(4229), _1("float8_interval"), _2(1), _3(true), _4(false), _5(float8_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("float8_interval"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "float8_to_interval", 1, + AddBuiltinFunc(_0(4230), _1("float8_to_interval"), _2(2), _3(true), _4(false), _5(float8_to_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 701, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("float8_to_interval"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "float8_list_agg_noarg2_transfn", 1, AddBuiltinFunc(_0(3573), _1("float8_list_agg_noarg2_transfn"), _2(2), _3(false), _4(false), _5(float8_list_agg_noarg2_transfn), _6(2281), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2281, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("float8_list_agg_noarg2_transfn"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3175,7 +3183,7 @@ ), AddFuncGroup( "get_instr_unique_sql", 1, - AddBuiltinFunc(_0(5702), _1("get_instr_unique_sql"), _2(0), _3(false), _4(true), _5(get_instr_unique_sql), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(44, 19, 23, 19, 26, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 1184, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(44, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(44, "node_name", "node_id", "user_name", "user_id", "unique_sql_id", "query", "n_calls", "min_elapse_time", "max_elapse_time", "total_elapse_time", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "n_soft_parse", "n_hard_parse", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "last_updated", "sort_count", "sort_time", "sort_mem_used", "sort_spill_count", "sort_spill_size", "hash_count", "hash_time", "hash_mem_used", "hash_spill_count", "hash_spill_size"), _24(NULL), _25("get_instr_unique_sql"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(5702), _1("get_instr_unique_sql"), _2(0), _3(false), _4(true), _5(get_instr_unique_sql), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(60, 19, 23, 19, 26, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 1184, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(60, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(60, "node_name", "node_id", "user_name", "user_id", "unique_sql_id", "query", "n_calls", "min_elapse_time", "max_elapse_time", "total_elapse_time", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "n_soft_parse", "n_hard_parse", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "last_updated", "sort_count", "sort_time", "sort_mem_used", "sort_spill_count", "sort_spill_size", "hash_count", "hash_time", "hash_mem_used", "hash_spill_count", "hash_spill_size", "net_send_time", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown"), _24(NULL), _25("get_instr_unique_sql"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "get_instr_user_login", 1, @@ -7515,6 +7523,13 @@ "numtodsinterval", 1, AddBuiltinFunc(_0(3172), _1("numtodsinterval"), _2(2), _3(true), _4(false), _5(numtodsinterval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 1700, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("numtodsinterval"), _26("-"), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "num_to_interval", 4, + AddBuiltinFunc(_0(4223), _1("num_to_interval"), _2(2), _3(true), _4(false), _5(int1_to_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 5545, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("int1_to_interval"), _26("-"), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(4224), _1("num_to_interval"), _2(2), _3(true), _4(false), _5(int2_to_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 21, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("int2_to_interval"), _26("-"), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(4225), _1("num_to_interval"), _2(2), _3(true), _4(false), _5(int4_to_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 23, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("int4_to_interval"), _26("-"), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(4226), _1("num_to_interval"), _2(2), _3(true), _4(false), _5(numeric_to_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 1700, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("numeric_to_interval"), _26("-"), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "nvarchar2", 1, AddBuiltinFunc(_0(3961), _1("nvarchar2"), _2(3), _3(true), _4(false), _5(nvarchar2), _6(3969), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(3097), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 3969, 23, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("nvarchar2"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -9094,7 +9109,7 @@ ), AddFuncGroup( "pg_stat_get_wlm_session_info", 1, - AddBuiltinFunc(_0(5002), _1("pg_stat_get_wlm_session_info"), _2(1), _3(false), _4(true), _5(pg_stat_get_wlm_session_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 26), _21(87, 26, 25, 25, 25, 25, 25, 869, 25, 23, 25, 20, 1184, 1184, 20, 20, 25, 25, 25, 25, 23, 23, 23, 23, 23, 25, 23, 23, 23, 23, 20, 20, 20, 23, 20, 20, 20, 23, 23, 23, 23, 23, 25, 20, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(87, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(87, "datid", "dbname", "schemaname", "nodename", "username", "application_name", "client_addr", "client_hostname", "client_port", "query_band", "block_time", "start_time", "finish_time", "duration", "estimate_total_time", "status", "abort_info", "resource_pool", "control_group", "estimate_memory", "min_peak_memory", "max_peak_memory", "average_peak_memory", "memory_skew_percent", "spill_info", "min_spill_size", "max_spill_size", "average_spill_size", "spill_skew_percent", "min_dn_time", "max_dn_time", "average_dn_time", "dntime_skew_percent", "min_cpu_time", "max_cpu_time", "total_cpu_time", "cpu_skew_percent", "min_peak_iops", "max_peak_iops", "average_peak_iops", "iops_skew_percent", "warning", "queryid", "query", "query_plan", "node_group", "cpu_top1_node_name", "cpu_top2_node_name", "cpu_top3_node_name", "cpu_top4_node_name", "cpu_top5_node_name", "mem_top1_node_name", "mem_top2_node_name", "mem_top3_node_name", "mem_top4_node_name", "mem_top5_node_name", "cpu_top1_value", "cpu_top2_value", "cpu_top3_value", "cpu_top4_value", "cpu_top5_value", "mem_top1_value", "mem_top2_value", "mem_top3_value", "mem_top4_value", "mem_top5_value", "top_mem_dn", "top_cpu_dn", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "net_send_time", "data_io_time", "is_slow_query"), _24(NULL), _25("pg_stat_get_wlm_session_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(5002), _1("pg_stat_get_wlm_session_info"), _2(1), _3(false), _4(true), _5(pg_stat_get_wlm_session_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 26), _21(102, 26, 25, 25, 25, 25, 25, 869, 25, 23, 25, 20, 1184, 1184, 20, 20, 25, 25, 25, 25, 23, 23, 23, 23, 23, 25, 23, 23, 23, 23, 20, 20, 20, 23, 20, 20, 20, 23, 23, 23, 23, 23, 25, 20, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(102, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(102, "datid", "dbname", "schemaname", "nodename", "username", "application_name", "client_addr", "client_hostname", "client_port", "query_band", "block_time", "start_time", "finish_time", "duration", "estimate_total_time", "status", "abort_info", "resource_pool", "control_group", "estimate_memory", "min_peak_memory", "max_peak_memory", "average_peak_memory", "memory_skew_percent", "spill_info", "min_spill_size", "max_spill_size", "average_spill_size", "spill_skew_percent", "min_dn_time", "max_dn_time", "average_dn_time", "dntime_skew_percent", "min_cpu_time", "max_cpu_time", "total_cpu_time", "cpu_skew_percent", "min_peak_iops", "max_peak_iops", "average_peak_iops", "iops_skew_percent", "warning", "queryid", "query", "query_plan", "node_group", "cpu_top1_node_name", "cpu_top2_node_name", "cpu_top3_node_name", "cpu_top4_node_name", "cpu_top5_node_name", "mem_top1_node_name", "mem_top2_node_name", "mem_top3_node_name", "mem_top4_node_name", "mem_top5_node_name", "cpu_top1_value", "cpu_top2_value", "cpu_top3_value", "cpu_top4_value", "cpu_top5_value", "mem_top1_value", "mem_top2_value", "mem_top3_value", "mem_top4_value", "mem_top5_value", "top_mem_dn", "top_cpu_dn", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "net_send_time", "data_io_time", "is_slow_query", "srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown"), _24(NULL), _25("pg_stat_get_wlm_session_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "pg_stat_get_wlm_session_info_internal", 1, @@ -10976,14 +10991,18 @@ AddFuncGroup( "ss_buffer_ctrl", 1, AddBuiltinFunc(_0(4214), _1("ss_buffer_ctrl"), _2(0), _3(false), _4(true), _5(ss_buffer_ctrl), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(13, 23, 5545, 5545, 5545, 5545, 5545, 23, 26, 26, 26, 20, 5545, 26), _22(13, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(13, "bufferid", "is_remote_dirty", "lock_mode", "is_edp", "force_request", "need_flush", "buf_id", "state", "pblk_relno", "pblk_blkno", "pblk_lsn", "seg_fileno", "seg_blockno"), _24(NULL), _25("ss_buffer_ctrl"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "ss_txnstatus_cache_stat", 1, + AddBuiltinFunc(_0(8889), _1("ss_txnstatus_cache_stat"), _2(0), _3(true), _4(true), _5(ss_txnstatus_cache_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(8, 20, 20, 20, 701, 701, 701, 20, 701), _22(8, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(8, "vcache_gets", "hcache_gets", "nio_gets", "avg_hcache_gettime_us", "avg_nio_gettime_us", "cache_hit_rate", "hcache_eviction", "avg_eviction_refcnt"), _24(NULL), _25("ss_txnstatus_cache_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "start_collect_workload", 1, AddBuiltinFunc(_0(7810), _1("start_collect_workload"), _2(2), _3(true), _4(false), _5(start_collect_workload), _6(16), _7(PG_SQLADVISOR_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(2), _20(2, 23, 23), _21(NULL), _22(NULL), _23(NULL), _24("({CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 27 :constvalue 4 [ 16 39 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false} {CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location 27 :constvalue 4 [ 0 4 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("start_collect_workload"), _26(NULL), _27(NULL), _28(NULL), _29(2, 0, 1), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "standby_statement_history", 2, - AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(54, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25), _22(54, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(54, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), - AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(55, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25), _22(55, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(55, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(3118), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history_1v), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 16), _21(71, 16, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(71, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(71, "only_slow", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","parent_query_id"),_24(NULL), _25("standby_statement_history_1v"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(3119), _1("standby_statement_history"), _2(1), _3(false), _4(true), _5(standby_statement_history), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10000), _12(1185), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 16, 1185), _21(72, 16, 1185, 19, 19, 23, 19, 25, 25, 23, 20, 20, 25, 1184, 1184, 20, 20, 20, 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25, 25, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 17, 16, 25, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(72, 'i', 'v', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(72, "only_slow", "finish_time", "db_name", "schema_name", "origin_node", "user_name", "application_name", "client_addr", "client_port", "unique_query_id", "debug_query_id", "query", "start_time", "finish_time", "slow_sql_threshold", "transaction_id", "thread_id", "session_id", "n_soft_parse", "n_hard_parse", "query_plan", "n_returned_rows", "n_tuples_fetched", "n_tuples_returned", "n_tuples_inserted", "n_tuples_updated", "n_tuples_deleted", "n_blocks_fetched", "n_blocks_hit", "db_time", "cpu_time", "execution_time", "parse_time", "plan_time", "rewrite_time", "pl_execution_time", "pl_compilation_time", "data_io_time", "net_send_info", "net_recv_info", "net_stream_send_info", "net_stream_recv_info", "lock_count", "lock_time", "lock_wait_count", "lock_wait_time", "lock_max_count", "lwlock_count", "lwlock_wait_count", "lwlock_time", "lwlock_wait_time", "details", "is_slow_sql", "trace_id", "advise", "net_send_time","srt1_q", "srt2_simple_query", "srt3_analyze_rewrite", "srt4_plan_query", "srt5_light_query", "srt6_p", "srt7_b", "srt8_e", "srt9_d", "srt10_s", "srt11_c", "srt12_u", "srt13_before_query", "srt14_after_query","rtt_unknown","parent_query_id"),_24(NULL), _25("standby_statement_history"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "statement_detail_decode", 1, @@ -11199,6 +11218,10 @@ AddFuncGroup( "text_int8", 1, AddBuiltinFunc(_0(4191), _1("text_int8"), _2(1), _3(true), _4(false), _5(text_int8), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("text_int8"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "text_interval", 1, + AddBuiltinFunc(_0(4211), _1("text_interval"), _2(2), _3(true), _4(false), _5(text_interval), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 25, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("text_interval"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(true), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "text_larger", 1, AddBuiltinFunc(_0(458), _1("text_larger"), _2(2), _3(true), _4(false), _5(text_larger), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 25, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("text_larger"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("larger of two"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -12429,6 +12452,22 @@ AddFuncGroup( "ubtvacuumcleanup", 1, AddBuiltinFunc(_0(4762), _1("ubtvacuumcleanup"), _2(2), _3(true), _4(false), _5(ubtvacuumcleanup), _6(2281), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 2281, 2281), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("ubtvacuumcleanup"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "undefinedin", 1, + AddBuiltinFunc(_0(UNDEFINEDINPUT), _1("undefinedin"), _2(1), _3(true), _4(false), _5(undefinedin), _6(4408), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2275), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedin"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "undefinedout", 1, + AddBuiltinFunc(_0(UNDEFINEDOUTPUT), _1("undefinedout"), _2(1), _3(true), _4(false), _5(undefinedout), _6(2275), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 705), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedout"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "undefinedrecv", 1, + AddBuiltinFunc(_0(UNDEFINEDRECV), _1("undefinedrecv"), _2(1), _3(true), _4(false), _5(undefinedrecv), _6(4408), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2281), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedrecv"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "undefinedsend", 1, + AddBuiltinFunc(_0(UNDEFINEDSEND), _1("undefinedsend"), _2(1), _3(true), _4(false), _5(undefinedsend), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 705), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("undefinedsend"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "unique_key_recheck", 1, AddBuiltinFunc(_0(1250), _1("unique_key_recheck"), _2(0), _3(true), _4(false), _5(unique_key_recheck), _6(2279), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("unique_key_recheck"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("deferred UNIQUE constraint check"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -12885,3 +12924,15 @@ AddFuncGroup( "gs_repair_file", 1, AddBuiltinFunc(_0(4771), _1("gs_repair_file"), _2(3), _3(true), _4(true), _5(gs_repair_file), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 26, 25, 23), _21(3, 26, 25, 23), _22(3, 'i', 'i', 'i'), _23(3, "tableoid", "path", "timeout"), _24(NULL), _25("gs_repair_file"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_hot_standby_space_info", 1, + AddBuiltinFunc(_0(6218), _1("gs_hot_standby_space_info"), _2(0), _3(false), _4(true), _5(gs_hot_standby_space_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(6, 28, 28, 28, 28, 28, 28), _22(6, 'o', 'o', 'o', 'o', 'o', 'o'), _23(6, "base_page_file_num", "base_page_total_size", "lsn_info_meta_file_num", "lsn_info_meta_total_size", "block_info_meta_file_num", "block_info_meta_total_size"), _24(NULL), _25("gs_hot_standby_space_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "query_node_reform_info", 1, + AddBuiltinFunc(_0(2867), _1("query_node_reform_info"), _2(3), _3(true), _4(true), _5(query_node_reform_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(64), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(10, INT4OID, TEXTOID, TEXTOID, TEXTOID, BOOLOID, TEXTOID, TEXTOID, INT4OID, TEXTOID, TEXTOID), _22(10,'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "reform_node_id", "reform_type", "reform_start_time", "reform_end_time", "is_reform_success", "redo_start_time", "redo_end_time", "xlog_total_bytes", "hashmap_construct_time", "action"), _24(NULL), _25("query_node_reform_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33("query node reform information"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "query_page_distribution_info", 1, + AddBuiltinFunc(_0(2866), _1("query_page_distribution_info"), _2(3), _3(true), _4(true), _5(query_page_distribution_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(64), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, TEXTOID, INT4OID, INT4OID), _21(11, TEXTOID, INT4OID, INT4OID, INT4OID, BOOLOID, BOOLOID, BOOLOID, TEXTOID, OIDOID, OIDOID, BOOLOID), _22(11, 'i', 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(11, "relname", "fork", "blockno", "instance_id", "is_master", "is_owner", "is_copy", "lock_mode", "mem_lsn", "disk_lsn", "is_dirty"), _24(NULL), _25("query_page_distribution_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: query page distribution information "), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), diff --git a/contrib/whale/include/plugin_catalog/heap.h b/contrib/whale/include/plugin_catalog/heap.h index f2bd42173..55f4c9c65 100644 --- a/contrib/whale/include/plugin_catalog/heap.h +++ b/contrib/whale/include/plugin_catalog/heap.h @@ -135,7 +135,8 @@ extern Oid heap_create_with_catalog(const char *relname, List* ceLst = NULL, StorageType storage_type = HEAP_DISK, LOCKMODE partLockMode = AccessExclusiveLock, - ObjectAddress *typaddress= NULL); + ObjectAddress *typaddress= NULL, + List* depend_extend = NIL); extern void heap_create_init_fork(Relation rel); @@ -261,4 +262,5 @@ extern int GetIndexKeyAttsByTuple(Relation relation, HeapTuple indexTuple); extern bool GetIndexVisibleStateByTuple(HeapTuple indexTuple); extern void AddOrDropUidsAttr(Oid relOid, bool oldRelHasUids, bool newRelHasUids); +extern char* heap_serialize_row_attr(Oid rel_oid, bool* depend_undefined); #endif /* HEAP_H */ diff --git a/contrib/whale/include/plugin_commands/defrem.h b/contrib/whale/include/plugin_commands/defrem.h index da016d790..0ab2eca12 100644 --- a/contrib/whale/include/plugin_commands/defrem.h +++ b/contrib/whale/include/plugin_commands/defrem.h @@ -58,6 +58,7 @@ extern void RemoveFunctionById(Oid funcOid); extern void remove_encrypted_proc_by_id(Oid funcOid); extern void RemovePackageById(Oid pkgOid, bool isBody = false); extern void DeleteFunctionByPackageOid(Oid package_oid); +extern void DeleteFunctionByFuncTuple(HeapTuple func_tup); extern void SetFunctionReturnType(Oid funcOid, Oid newRetType); extern void SetFunctionArgType(Oid funcOid, int argIndex, Oid newArgType); extern ObjectAddress AlterFunctionOwner(List* name, List* argtypes, Oid newOwnerId); @@ -76,6 +77,7 @@ extern void IsThereOpClassInNamespace(const char *opcname, Oid opcmethod, Oid opcnamespace); extern void IsThereOpFamilyInNamespace(const char *opfname, Oid opfmethod, Oid opfnamespace); +extern void RecompileFunction(CompileStmt* stmt); /* commands/operatorcmds.c */ extern void CreatePackageCommand(CreatePackageStmt* parsetree, const char* queryString); @@ -89,6 +91,7 @@ extern ObjectAddress AlterOperatorOwner(List* name, TypeName* typeName1, TypeNam extern void AlterOperatorOwner_oid(Oid operOid, Oid newOwnerId); extern ObjectAddress AlterOperatorNamespace(List* names, List* argtypes, const char* newschema); extern Oid AlterOperatorNamespace_oid(Oid operOid, Oid newNspOid); +extern void RecompilePackage(CompileStmt* stmt); /* commands/aggregatecmds.c */ extern ObjectAddress DefineAggregate(List* name, List* args, bool oldstyle, List* parameters); diff --git a/contrib/whale/include/plugin_parser/kwlist.h b/contrib/whale/include/plugin_parser/kwlist.h index bad53b121..7342d3311 100644 --- a/contrib/whale/include/plugin_parser/kwlist.h +++ b/contrib/whale/include/plugin_parser/kwlist.h @@ -133,6 +133,7 @@ PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD) PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD) PG_KEYWORD("compact", COMPACT, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("compatible_illegal_chars", COMPATIBLE_ILLEGAL_CHARS, UNRESERVED_KEYWORD) +PG_KEYWORD("compile", COMPILE, UNRESERVED_KEYWORD) PG_KEYWORD("complete", COMPLETE, UNRESERVED_KEYWORD) PG_KEYWORD("completion", COMPLETION, UNRESERVED_KEYWORD) PG_KEYWORD("compress", COMPRESS, UNRESERVED_KEYWORD) @@ -592,6 +593,7 @@ PG_KEYWORD("snapshot", SNAPSHOT, UNRESERVED_KEYWORD) PG_KEYWORD("some", SOME, RESERVED_KEYWORD) PG_KEYWORD("source", SOURCE_P, UNRESERVED_KEYWORD) PG_KEYWORD("space", SPACE, UNRESERVED_KEYWORD) +PG_KEYWORD("specification", SPECIFICATION, UNRESERVED_KEYWORD) PG_KEYWORD("spill", SPILL, UNRESERVED_KEYWORD) PG_KEYWORD("split", SPLIT, UNRESERVED_KEYWORD) PG_KEYWORD("sql", SQL_P, UNRESERVED_KEYWORD) diff --git a/contrib/whale/include/plugin_parser/parse_type.h b/contrib/whale/include/plugin_parser/parse_type.h index 42f84eed9..e94c999da 100644 --- a/contrib/whale/include/plugin_parser/parse_type.h +++ b/contrib/whale/include/plugin_parser/parse_type.h @@ -19,14 +19,18 @@ typedef HeapTuple Type; -extern Type LookupTypeName(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool print_notice = true); +extern Type LookupTypeNameSupportUndef(ParseState *pstate, const TypeName *typeName, + int32 *typmod_p, bool print_notice = true); +extern Type LookupTypeName(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool print_notice = true, + TypeDependExtend* dependExtend = NULL); extern Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool temp_ok, - bool print_notice = true); + bool print_notice = true, TypeDependExtend* dependExtend = NULL); extern Oid LookupPctTypeInPackage(RangeVar* rel, Oid pkgOid, const char* field); extern Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid = InvalidOid, Oid namespaceId = InvalidOid); -extern Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p); +extern Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p, TypeDependExtend* dependExtend = NULL); extern Oid typenameTypeId(ParseState* pstate, const TypeName* typname); -extern void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p); +extern void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p, + TypeDependExtend* dependExtend = NULL); extern char* TypeNameToString(const TypeName* typname); extern char* TypeNameListToString(List* typenames); @@ -55,11 +59,19 @@ extern bool IsTypeSupportedByORCRelation(_in_ Oid typeOid); extern bool IsTypeSupportedByTsStore(_in_ int kvtype, _in_ Oid typeOid); extern bool IsTypeSupportedByUStore (_in_ Oid typeOid, _in_ int32 typeMod); extern TypeName *typeStringToTypeName(const char *str); -extern void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p); +extern void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p, TypeDependExtend* depenExtend = NULL); extern bool IsTypeTableInInstallationGroup(const Type type_tup); -extern HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p); +extern HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p, + TypeDependExtend* depend_extend = NULL); extern char* CastPackageTypeName(const char* typName, Oid pkgOid, bool isPackage, bool isPublic = true); extern bool IsBinaryType(Oid typid); #define ISCOMPLEX(typeid) (typeidTypeRelid(typeid) != InvalidOid) extern void check_type_supports_multi_charset(Oid typid, bool allow_array); +extern char* ParseTypeName(const char* typName, Oid pkgOid); +typedef enum TypeTupStatus { + NormalTypeTup = 0, + UndefineTypeTup = 1, + InvalidTypeTup = 2 +} TypeTupStatus; +extern TypeTupStatus GetTypeTupStatus(Type typ); #endif /* PARSE_TYPE_H */ diff --git a/contrib/whale/include/plugin_parser/scanner.h b/contrib/whale/include/plugin_parser/scanner.h index 1ab1f419c..8640517e6 100644 --- a/contrib/whale/include/plugin_parser/scanner.h +++ b/contrib/whale/include/plugin_parser/scanner.h @@ -117,6 +117,7 @@ typedef struct core_yy_extra_type { bool include_ora_comment; /* dont igore comment when ture */ int func_param_begin; /* function and procedure param string start pos,exclude left parenthesis */ int func_param_end; /* function and procedure param string end pos,exclude right parenthesis */ + int return_pos_end; bool isPlpgsqlKeyWord; const PlpgsqlKeywordValue* plKeywordValue; bool is_delimiter_name; diff --git a/contrib/whale/include/plugin_utils/plpgsql.h b/contrib/whale/include/plugin_utils/plpgsql.h index c3136b99d..063779470 100644 --- a/contrib/whale/include/plugin_utils/plpgsql.h +++ b/contrib/whale/include/plugin_utils/plpgsql.h @@ -272,7 +272,7 @@ typedef enum { PLPGSQL_NORMAL, PLPGSQL_SIGNAL, PLPGSQL_RESIGNAL_WITH_SQLSTATE, /* -------- * condition_information_item_name of the SIGNAL/RESIGNAL - * ref: https://docs.orafce.com/cd/E17952_01/mysql-5.7-en/signal.html + * ref: https://docs.oracle.com/cd/E17952_01/mysql-5.7-en/signal.html */ typedef enum { @@ -290,6 +290,35 @@ typedef enum { PLPGSQL_CURSOR_NAME } PLpgSQL_con_info_item_value; +/* + * GsDependency object type + */ +typedef enum { + GSDEPEND_OBJECT_TYPE_INVALID = 0, + GSDEPEND_OBJECT_TYPE_UNDEFIND, + GSDEPEND_OBJECT_TYPE_VARIABLE, + GSDEPEND_OBJECT_TYPE_TYPE, + GSDEPEND_OBJECT_TYPE_FUNCTION, + GSDEPEND_OBJECT_TYPE_PROCHEAD, + GSDEPEND_OBJECT_TYPE_PKG, + GSDEPEND_OBJECT_TYPE_PKG_BODY, + GSDEPEND_OBJECT_TYPE_PKG_RECOMPILE +} GsDependObjectType; + +/* +* GsDependency reference object position type +*/ +#define GSDEPEND_REFOBJ_POS_INVALID 0 +#define GSDEPEND_REFOBJ_POS_IN_TYPE 1 +#define GSDEPEND_REFOBJ_POS_IN_PKGSPEC 2 +#define GSDEPEND_REFOBJ_POS_IN_PROCHEAD 4 +#define GSDEPEND_REFOBJ_POS_IN_PROCBODY 8 +#define GSDEPEND_REFOBJ_POS_IN_PKGBODY 16 +#define GSDEPEND_REFOBJ_POS_IN_PKGRECOMPILE_OBJ (GSDEPEND_REFOBJ_POS_IN_PKGSPEC | \ + GSDEPEND_REFOBJ_POS_IN_PKGBODY | GSDEPEND_REFOBJ_POS_IN_PROCBODY) +#define GSDEPEND_REFOBJ_POS_IN_PKGALL_OBJ (GSDEPEND_REFOBJ_POS_IN_PKGRECOMPILE_OBJ) +#define GSDEPEND_REFOBJ_POS_IN_PROCALL (GSDEPEND_REFOBJ_POS_IN_PROCHEAD | GSDEPEND_REFOBJ_POS_IN_PROCBODY) + /********************************************************************** * Node and structure definitions **********************************************************************/ @@ -303,6 +332,46 @@ typedef struct PLpgSQL_datum { /* Generic datum array item */ bool ispkg; } PLpgSQL_datum; +/* + * DependenciesDatum is the common supertype for DependenciesUndefined, DependenciesVariable, + * DependenciesType, DependenciesProchead + */ +typedef struct DependenciesDatum { /* Generic datum array item */ + NodeTag type; +} DependenciesDatum; + +/* + * PLpgSQL dependencies undefined/type/variable/function/procedure + */ +typedef struct DependenciesUndefined { /* Generic datum array item */ + NodeTag type; +} DependenciesUndefined; + +typedef struct DependenciesVariable { + NodeTag type; + char* typName; + int32 typMod; + char* extraInfo; +} DependenciesVariable; + +typedef struct DependenciesType{ + NodeTag type; + char typType; + char typCategory; + char* attrInfo; + bool isRel; + char* elemTypName; + char* idxByTypName; +} DependenciesType; + +typedef struct DependenciesProchead{ + NodeTag type; + bool undefined; + char* proName; + char* proArgSrc; + char* funcHeadSrc; +} DependenciesProchead; + typedef enum PLpgSQL_trigtype { PLPGSQL_DML_TRIGGER, @@ -378,6 +447,7 @@ typedef struct PLpgSQL_expr { /* SQpL Query to plan and execute */ bool is_have_tableof_index_func; /* dno maybe is 0, so need an extra variable */ int tableof_func_dno; + uint64 unique_sql_id; } PLpgSQL_expr; typedef struct { /* openGauss data type */ @@ -403,6 +473,8 @@ typedef struct { /* openGauss data type */ * then convert to tuple descriptior. */ Oid cursorCompositeOid = InvalidOid; + Oid tableofOid; + TypeDependExtend* dependExtend; } PLpgSQL_type; typedef struct { @@ -973,7 +1045,7 @@ typedef struct { /* condition information item name for signal/resignal */ char *table_name; char *column_name; char *cursor_name; - int sqlerrcode; /* mysql_errno */ + char *sqlerrcode; /* mysql_errno */ } PLpgSQL_condition_info_item; typedef struct { /* siganl_information_item */ @@ -1161,6 +1233,10 @@ typedef struct PLpgSQL_function { /* Complete compiled function */ bool is_autonomous; bool is_plpgsql_func_with_outparam; bool is_insert_gs_source; + /* gs depend */ + bool isValid; + bool is_need_recompile; + Oid namespaceOid; } PLpgSQL_function; class AutonomousSession; @@ -1362,6 +1438,13 @@ typedef struct plpgsql_pkg_hashent { } plpgsql_pkg_HashEnt; +#define PACKAGE_INVALID 0x0 +#define PACKAGE_SPEC_VALID 0x1 +#define PACKAGE_SPEC_INVALID 0xFE +#define PACKAGE_BODY_VALID 0x2 +#define PACKAGE_BODY_INVALID 0xFD +#define PACKAGE_VALID 0x3 + typedef struct PLpgSQL_package { /* Complete compiled package */ char* pkg_signature; Oid pkg_oid; @@ -1407,6 +1490,15 @@ typedef struct PLpgSQL_package { /* Complete compiled package */ knl_u_plpgsql_pkg_context* u_pkg; Oid namespaceOid; bool isInit; + + /** + * gs_dependencies_fn.h + */ + NodeTag type; + List* preRefObjectOidList; + List* preSelfObjectList; + unsigned char status; + bool is_need_recompile; } PLpgSQL_package; @@ -1498,6 +1590,19 @@ typedef struct PlDebugEntry { PLpgSQL_function* func; } PlDebugEntry; +typedef enum AddBreakPointError { + ADD_BP_ERR_ALREADY_EXISTS = -1, + ADD_BP_ERR_OUT_OF_RANGE = -2, + ADD_BP_ERR_INVALID_BP_POS = -3 +} AddBreakPointError; + +typedef struct PLDebug_codeline { + NodeTag type; + int lineno; + char* code; + bool canBreak; +} PLDebug_codeline; + typedef List* (*RawParserHook)(const char*, List**); const int MAXINT8LEN = 25; @@ -1528,6 +1633,7 @@ const char DEBUG_STEP_INTO_HEADER = 's'; const char DEBUG_STEP_INTO_HEADER_AFTER = 'S'; const char DEBUG_BACKTRACE_HEADER = 't'; const char DEBUG_SET_VARIABLE_HEADER = 'h'; +const char DEBUG_INFOCODE_HEADER = 'i'; /* server return message */ const int DEBUG_SERVER_SUCCESS = 0; @@ -1633,6 +1739,7 @@ extern void RecvUnixMsg(const char* buf, int bufLen, char* destBuf, int destLen) extern char* ResizeDebugBufferIfNecessary(char* buffer, int* oldSize, int needSize); extern void ReleaseDebugCommIdx(int idx); extern void SendUnixMsg(int socket, const char* val, int len, bool is_client); +extern List* collect_breakable_line(PLpgSQL_function* func); /********************************************************************** * Function declarations @@ -1649,7 +1756,7 @@ typedef struct plpgsql_hashent { DListCell* cell; /* Dlist cell for delete function compile results. */ } plpgsql_HashEnt; -extern PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator); +extern PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator, bool isRecompile = false); extern void delete_function(PLpgSQL_function* func, bool fromPackage = false); extern PLpgSQL_function* plpgsql_compile_nohashkey(FunctionCallInfo fcinfo); /* parse trigger func */ extern PLpgSQL_function* plpgsql_compile_inline(char* proc_source); @@ -1667,10 +1774,10 @@ extern bool plpgsql_parse_tripword(char* word1, char* word2, char* word3, PLwdat extern bool plpgsql_parse_quadword(char* word1, char* word2, char* word3, char* word4, PLwdatum* wdatum, PLcword* cword, int* tok_flag); extern PLpgSQL_type* plpgsql_parse_wordtype(char* ident); -extern PLpgSQL_type* plpgsql_parse_cwordtype(List* idents); +extern PLpgSQL_type* plpgsql_parse_cwordtype(List* idents, TypeDependExtend* dependExtend = NULL); extern PLpgSQL_type* plpgsql_parse_wordrowtype(char* ident); extern PLpgSQL_type* plpgsql_parse_cwordrowtype(List* idents); -extern PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation); +extern PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation, TypeDependExtend* type_depend_extend = NULL); extern PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation); extern PLpgSQL_type* plpgsql_build_nested_datatype(); extern const char *plpgsql_code_int2cstring(int sqlcode); @@ -1695,12 +1802,13 @@ extern bool plpgsql_check_colocate(Query* query, RangeTblEntry* rte, void* plpgs extern void plpgsql_HashTableDeleteAll(); extern void plpgsql_hashtable_delete_and_check_invalid_item(int classId, Oid objId); extern void delete_package_and_check_invalid_item(Oid pkgOid); +extern void plpgsql_hashtable_clear_invalid_obj(bool need_clear = false); extern void plpgsql_HashTableDelete(PLpgSQL_function* func); extern bool plpgsql_get_current_value_stp_with_exception(); extern void plpgsql_restore_current_value_stp_with_exception(bool saved_current_stp_with_exception); extern void plpgsql_set_current_value_stp_with_exception(); extern void delete_pkg_in_HashTable(Oid pkgOid); -extern PLpgSQL_package* plpgsql_pkg_compile(Oid pkgOid, bool for_validator, bool isSpec, bool isCreate=false); +extern PLpgSQL_package* plpgsql_pkg_compile(Oid pkgOid, bool for_validator, bool isSpec, bool isCreate=false, bool isRecompile = false); extern PLpgSQL_datum* plpgsql_pkg_adddatum(const List* wholeName, char** objname, char** pkgname); extern int plpgsql_pkg_adddatum2ns(const List* name); extern bool plpgsql_check_insert_colocate( @@ -1817,7 +1925,7 @@ extern PLpgSQL_nsitem* plpgsql_ns_lookup( extern PLpgSQL_nsitem* plpgsql_ns_lookup_label(PLpgSQL_nsitem* ns_cur, const char* name); extern void free_func_tableof_index(); extern void free_temp_func_tableof_index(List* temp_tableof_index); - +extern char* GetPackageSchemaName(Oid packageOid); /* ---------- * Other functions in pl_funcs.c @@ -1952,6 +2060,12 @@ typedef struct ExceptionContext { PLpgSQL_declare_handler handler_type; } ExceptionContext; +/*Save the type recorded during the cursor definition*/ +typedef struct CursorRecordType { + char* cursor_name; + Oid type_oid; +} CursorRecordType; + /* Quick access array state */ #define IS_ARRAY_STATE(state_list, state) ((state_list && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) ? \ (linitial_int(state_list) == state) : false) @@ -1996,4 +2110,13 @@ extern void stp_reserve_subxact_resowner(ResourceOwner resowner); extern void stp_cleanup_subxact_resowner(int64 minStackId); extern void stp_cleanup_subxact_resource(int64 stackId); extern void InsertGsSource(Oid objId, Oid nspid, const char* name, const char* type, bool status); +extern void examine_parameter_list(List* parameters, Oid languageOid, const char* queryString, + oidvector** parameterTypes, TypeDependExtend** type_depend_extend, ArrayType** allParameterTypes, + ArrayType** parameterModes, ArrayType** parameterNames, + List** parameterDefaults, Oid* requiredResultType, List** defargpos, bool fenced, bool* has_undefined = NULL); +extern void compute_return_type( + TypeName* returnType, Oid languageOid, Oid* prorettype_p, bool* returnsSet_p, bool fenced, int startLineNumber, + TypeDependExtend* type_depend_extend, bool is_refresh_head); +void plpgsql_free_override_stack(int depth); + #endif /* PLPGSQL_H */ diff --git a/contrib/whale/plugin_optimizer/plan/planner.cpp b/contrib/whale/plugin_optimizer/plan/planner.cpp index a0d4f193f..09e188a86 100644 --- a/contrib/whale/plugin_optimizer/plan/planner.cpp +++ b/contrib/whale/plugin_optimizer/plan/planner.cpp @@ -94,6 +94,9 @@ /* Hook for plugins to get control in planner() */ THR_LOCAL ndp_pushdown_hook_type ndp_pushdown_hook = NULL; +#ifdef USE_SPQ +THR_LOCAL spq_planner_hook_type spq_planner_hook = NULL; +#endif #ifndef MIN #define MIN(A, B) ((B) < (A) ? (B) : (A)) @@ -377,6 +380,12 @@ PlannedStmt* planner(Query* parse, int cursorOptions, ParamListInfo boundParams) instr_time starttime; double totaltime = 0; +#ifdef USE_SPQ + if (spq_planner_hook) { + return (*spq_planner_hook) (parse, cursorOptions, boundParams); + } +#endif + INSTR_TIME_SET_CURRENT(starttime); #ifdef PGXC @@ -12119,6 +12128,9 @@ bool findConstraintByVar(Var* var, Oid relid, constraintType conType) ArrayType* arr = NULL; adatum = SysCacheGetAttr(CONSTROID, htup, Anum_pg_constraint_conkey, &isNull); + if (adatum == 0) { + continue; + } arr = DatumGetArrayTypeP(adatum); attnums = (int16*)ARR_DATA_PTR(arr); @@ -15931,3 +15943,88 @@ adjust_plan_for_srfs(PlannerInfo *root, Plan *plan, List *targets, List *targets } return newplan; } + +#ifdef USE_SPQ +static Node* get_spq_multiple_from_expr( + PlannerInfo* root, Node* expr, double rows, double* skew_multiple, double* bias_multiple) +{ + List* groupExprs = NIL; + Oid datatype = exprType((Node*)(expr)); + bool use_skew_multiple = true; + + if (!OidIsValid(datatype) || !IsSpqTypeDistributable(datatype)) + return NULL; + + groupExprs = list_make1(expr); + get_multiple_from_exprlist(root, groupExprs, rows, &use_skew_multiple, true, skew_multiple, bias_multiple); + list_free_ext(groupExprs); + + return expr; +} + + +List* spq_get_distributekey_from_tlist( + PlannerInfo* root, List* tlist, List* groupcls, double rows, double* result_multiple, void* skew_info) +{ + ListCell* lcell = NULL; + List* distkey = NIL; + double multiple = 0.0; + double bias_multiple = 0.0; + double skew_multiple = 0.0; + List* exprMultipleList = NIL; + + foreach (lcell, groupcls) { + Node* expr = (Node*)lfirst(lcell); + + if (IsA(expr, SortGroupClause)) + expr = get_sortgroupclause_expr((SortGroupClause*)expr, tlist); + + expr = get_spq_multiple_from_expr(root, expr, rows, &skew_multiple, &bias_multiple); + if (expr != NULL) { + /* + * we can't estimate skew of grouping sets because there's + * null added, so just add all columns and set mutiple to 1 + */ + if (root->parse->groupingSets) { + distkey = lappend(distkey, expr); + *result_multiple = 1; + continue; + } + if ((skew_multiple == 1.0) && (bias_multiple <= 1.0)) { + *result_multiple = 1; + list_free_ext(exprMultipleList); + return list_make1(expr); + } else if ((u_sess->pgxc_cxt.NumDataNodes == skew_multiple) && + (u_sess->pgxc_cxt.NumDataNodes == + bias_multiple)) { /* All the expr are const, return the first expr. */ + if (distkey == NULL) + distkey = lappend(distkey, expr); + *result_multiple = u_sess->pgxc_cxt.NumDataNodes; + + continue; + } else { + if (skew_multiple == 1.0) { + /* + * If distinct num of multiple has no skew, we should use bias multiple to + * compute mix multiple. + */ + multiple = bias_multiple; + } + else if (bias_multiple <= 1.0) /* mcf has no skew, handle skew_multiple */ + multiple = skew_multiple; + else + multiple = Max(bias_multiple, skew_multiple); + + exprMultipleList = add_multiple_to_list(expr, multiple, exprMultipleList); + } + } + } + + if (exprMultipleList != NULL) { + distkey = get_mix_diskey_by_exprlist(root, exprMultipleList, rows, result_multiple, (AggSkewInfo*)skew_info); + list_free_ext(exprMultipleList); + } + + return distkey; +} +#endif diff --git a/contrib/whale/plugin_parser/gram.y b/contrib/whale/plugin_parser/gram.y index 3d8c36c71..a707a4e40 100644 --- a/contrib/whale/plugin_parser/gram.y +++ b/contrib/whale/plugin_parser/gram.y @@ -59,6 +59,7 @@ #include "catalog/pg_proc.h" #include "catalog/gs_package.h" #include "catalog/pg_trigger.h" +#include "catalog/pg_type_fn.h" #include "plugin_commands/defrem.h" #include "commands/trigger.h" #ifdef ENABLE_MULTIPLE_NODES @@ -248,6 +249,7 @@ static void ParseUpdateMultiSet(List *set_target_list, SelectStmt *stmt, core_yy static char *GetTargetFuncArgTypeName(char *typeString, TypeName* t); static char *FormatFuncArgType(core_yyscan_t yyscanner, char *argsString, List* parameters); static char *ParseFunctionArgSrc(core_yyscan_t yyscanner); +static char *ParseFuncHeadSrc(core_yyscan_t yyscanner, bool isFunction = true); static void parameter_check_execute_direct(const char* query); static Node *make_node_from_scanbuf(int start_pos, int end_pos, core_yyscan_t yyscanner); static int64 SequenceStrGetInt64(const char *str); @@ -370,7 +372,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm DropForeignServerStmt DropUserMappingStmt ExplainStmt ExecDirectStmt FetchStmt GetDiagStmt GrantStmt GrantRoleStmt GrantDbStmt IndexStmt InsertStmt ListenStmt LoadStmt LockStmt NotifyStmt ExplainableStmt PreparableStmt - CreateFunctionStmt CreateEventStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt + CreateFunctionStmt CreateEventStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt CompileStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt RemoveFuncStmt RemoveOperStmt RemovePackageStmt RenameStmt RevokeStmt RevokeRoleStmt RevokeDbStmt RuleActionStmt RuleActionStmtOrEmpty RuleStmt SecLabelStmt SelectStmt TimeCapsuleStmt TransactionStmt TruncateStmt CallFuncStmt @@ -434,7 +436,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm start_opt preserve_opt rename_opt status_opt comments_opt action_opt end_opt definer_name_opt -%type opt_lock lock_type cast_context opt_wait +%type opt_lock lock_type cast_context opt_wait compile_pkg_opt %type vacuum_option_list vacuum_option_elem opt_verify_options %type opt_check opt_force opt_or_replace opt_grant_grant_option opt_grant_admin_option @@ -864,7 +866,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CATALOG_NAME CHAIN CHANGE CHAR_P CHARACTER CHARACTERISTICS CHARACTERSET CHARSET CHECK CHECKPOINT CLASS CLASS_ORIGIN CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COLUMN_NAME COLUMNS COMMENT COMMENTS COMMIT - COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPLETION COMPRESS CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSISTENT CONSTANT CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRAINTS + COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPILE COMPLETE COMPLETION COMPRESS CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSISTENT CONSTANT CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRAINTS CONTENT_P CONTINUE_P CONTVIEW CONVERSION_P CONVERT_P CONNECT COORDINATOR COORDINATORS COPY COST CREATE CROSS CSN CSV CUBE CURRENT_P CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA @@ -931,7 +933,7 @@ static void setDelimiterName(core_yyscan_t yyscanner, char*input, VariableSetStm SAMPLE SAVEPOINT SCHEDULE SCHEMA SCHEMA_NAME SCROLL SEARCH SECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES SERIALIZABLE SERVER SESSION SESSION_USER SET SETS SETOF SHARE SHIPPABLE SHOW SHUTDOWN SIBLINGS - SIMILAR SIMPLE SIZE SKIP SLAVE SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOURCE_P SPACE SPILL SPLIT STABLE STACKED_P STANDALONE_P START STARTS STARTWITH + SIMILAR SIMPLE SIZE SKIP SLAVE SLICE SMALLDATETIME SMALLDATETIME_FORMAT_P SMALLINT SNAPSHOT SOME SOURCE_P SPACE SPECIFICATION SPILL SPLIT STABLE STACKED_P STANDALONE_P START STARTS STARTWITH STATEMENT STATEMENT_ID STATISTICS STDIN STDOUT STORAGE STORE_P STORED STRATIFY STREAM STRICT_P STRIP_P SUBCLASS_ORIGIN SUBPARTITION SUBPARTITIONS SUBSCRIPTION SUBSTRING SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM_P SYS_REFCURSOR STARTING SQL_P @@ -1206,6 +1208,7 @@ stmt : | ClosePortalStmt | ClusterStmt | CommentStmt + | CompileStmt | ConstraintsSetStmt | CopyStmt | CreateAsStmt @@ -15008,6 +15011,7 @@ CreateFunctionStmt: CREATE opt_or_replace definer_user FUNCTION func_name_opt_arg proc_args RETURNS func_return createfunc_opt_list opt_definition { + set_function_style_pg(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); n->isOraStyle = false; n->isPrivate = false; @@ -15027,6 +15031,7 @@ CreateFunctionStmt: | CREATE opt_or_replace definer_user FUNCTION func_name_opt_arg proc_args RETURNS TABLE '(' table_func_column_list ')' createfunc_opt_list opt_definition { + set_function_style_pg(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); n->isOraStyle = false; n->isPrivate = false; @@ -15047,6 +15052,7 @@ CreateFunctionStmt: | CREATE opt_or_replace definer_user FUNCTION func_name_opt_arg proc_args createfunc_opt_list opt_definition { + set_function_style_pg(); CreateFunctionStmt *n = makeNode(CreateFunctionStmt); n->isOraStyle = false; n->isPrivate = false; @@ -15069,6 +15075,10 @@ CreateFunctionStmt: u_sess->parser_cxt.eaten_begin = false; pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true; u_sess->parser_cxt.isCreateFuncOrProc = true; + if (set_is_create_plsql_type()) { + set_create_plsql_type_start(); + set_function_style_a(); + } } subprogram_body { int rc = 0; @@ -15088,6 +15098,9 @@ CreateFunctionStmt: n->funcname = $5; n->parameters = $6; n->inputHeaderSrc = FormatFuncArgType(yyscanner, funSource->headerSrc, n->parameters); + if (enable_plpgsql_gsdependency_guc()) { + n->funcHeadSrc = ParseFuncHeadSrc(yyscanner); + } n->returnType = $8; n->options = $9; n->options = lappend(n->options, makeDefElem("as", @@ -15944,6 +15957,10 @@ CreateProcedureStmt: u_sess->parser_cxt.eaten_begin = false; pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true; u_sess->parser_cxt.isCreateFuncOrProc = true; + if (set_is_create_plsql_type()) { + set_create_plsql_type_start(); + set_function_style_a(); + } } subprogram_body { int rc = 0; @@ -15966,6 +15983,9 @@ CreateProcedureStmt: n->funcname = $5; n->parameters = $6; n->inputHeaderSrc = FormatFuncArgType(yyscanner, funSource->headerSrc, n->parameters); + if (enable_plpgsql_gsdependency_guc()) { + n->funcHeadSrc = ParseFuncHeadSrc(yyscanner, false); + } n->returnType = NULL; n->isProcedure = true; if (0 == count) @@ -15987,9 +16007,11 @@ CreateProcedureStmt: ; CreatePackageStmt: - CREATE opt_or_replace PACKAGE pkg_name invoker_rights as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;} + CREATE opt_or_replace PACKAGE pkg_name invoker_rights as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;set_function_style_a();} { - u_sess->plsql_cxt.package_as_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @6); + set_create_plsql_type_start(); + u_sess->plsql_cxt.need_create_depend = true; + u_sess->plsql_cxt.package_as_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @6); CreatePackageStmt *n = makeNode(CreatePackageStmt); char *pkgNameBegin = NULL; char *pkgNameEnd = NULL; @@ -16067,6 +16089,7 @@ CreatePackageStmt: } else { parser_yyerror("package spec is not ended correctly"); } + u_sess->plsql_cxt.isCreatePkg = false; } tok = YYLEX; } @@ -16404,8 +16427,10 @@ pkg_body_subprogram: { } ; CreatePackageBodyStmt: - CREATE opt_or_replace PACKAGE BODY_P pkg_name as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;} pkg_body_subprogram + CREATE opt_or_replace PACKAGE BODY_P pkg_name as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;set_function_style_a();} pkg_body_subprogram { + set_create_plsql_type_start(); + u_sess->plsql_cxt.need_create_depend = true; char *pkgNameBegin = NULL; char *pkgNameEnd = NULL; char *pkgName = NULL; @@ -16597,7 +16622,11 @@ param_name: type_function_name ; func_return: - func_type + func_type { + if (enable_plpgsql_gsdependency_guc()) { + pg_yyget_extra(yyscanner)->core_yy_extra.return_pos_end = yylloc; + } + } { /* We can catch over-specified results here if we want to, * but for now better to silently swallow typmod, etc. @@ -16605,7 +16634,11 @@ func_return: */ $$ = $1; } - | func_type DETERMINISTIC + | func_type { + if (enable_plpgsql_gsdependency_guc()) { + pg_yyget_extra(yyscanner)->core_yy_extra.return_pos_end = yylloc; + } + } DETERMINISTIC { $$ = $1; } @@ -17106,6 +17139,69 @@ opt_restrict: | /* EMPTY */ ; +compile_pkg_opt: + BODY_P {$$ = COMPILE_PKG_BODY;} + | PACKAGE {$$ = COMPILE_PACKAGE;} + | SPECIFICATION {$$ = COMPILE_PKG_SPECIFICATION;} + | /* EMPTY */ {$$ = COMPILE_PACKAGE;} + ; +CompileStmt: + ALTER PROCEDURE function_with_argtypes COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = ((FuncWithArgs*)$3)->funcname; + n->funcArgs = ((FuncWithArgs*)$3)->funcargs; + n->compileItem = COMPILE_PROCEDURE; + } + $$ = (Node*)n; + } + | ALTER PROCEDURE func_name_opt_arg COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = $3; + n->funcArgs = NULL; + n->compileItem = COMPILE_PROCEDURE; + } + $$ = (Node*)n; + } + | ALTER FUNCTION function_with_argtypes COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = ((FuncWithArgs*)$3)->funcname; + n->funcArgs = ((FuncWithArgs*)$3)->funcargs; + n->compileItem = COMPILE_FUNCTION; + } + $$ = (Node*)n; + } + | ALTER FUNCTION func_name_opt_arg COMPILE + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = $3; + n->funcArgs = NULL; + n->compileItem = COMPILE_FUNCTION; + } + $$ = (Node*)n; + } + | ALTER PACKAGE pkg_name COMPILE compile_pkg_opt + { + u_sess->plsql_cxt.during_compile = true; + CompileStmt *n = makeNode(CompileStmt); + if (enable_plpgsql_gsdependency_guc()) { + n->objName = $3; + n->funcArgs = NULL; + n->compileItem = (CompileEntry)$5; + } + $$ = (Node*)n; + } + ; /***************************************************************************** * @@ -28953,6 +29049,7 @@ unreserved_keyword: | COMMITTED | COMPATIBLE_ILLEGAL_CHARS | COMPLETE + | COMPILE | COMPLETION | COMPRESS | CONDITION @@ -29294,6 +29391,7 @@ unreserved_keyword: | SNAPSHOT | SOURCE_P | SPACE + | SPECIFICATION | SPILL | SPLIT | SQL_P @@ -31369,7 +31467,12 @@ static char *GetTargetFuncArgTypeName(char *typeString, TypeName* t) { Type typtup; Oid toid; - typtup = LookupTypeName(NULL, t, NULL, false); + TypeDependExtend* dependExtend = NULL; + if (enable_plpgsql_gsdependency()) { + InstanceTypeNameDependExtend(&dependExtend); + } + typtup = LookupTypeName(NULL, t, NULL, false, dependExtend); + pfree_ext(dependExtend); if (typtup) { toid = typeTypeId(typtup); @@ -31433,9 +31536,10 @@ static char *FormatFuncArgType(core_yyscan_t yyscanner, char *argsString, List* pfree(argsString); proc_header_len = proc_header_len; - yyextra->core_yy_extra.func_param_begin = 0; - yyextra->core_yy_extra.func_param_end = 0; - + if (!enable_plpgsql_gsdependency_guc()) { + yyextra->core_yy_extra.func_param_begin = 0; + yyextra->core_yy_extra.func_param_end = 0; + } return buf.data; } @@ -31468,6 +31572,32 @@ static char *ParseFunctionArgSrc(core_yyscan_t yyscanner) return proc_header_str; } +static char *ParseFuncHeadSrc(core_yyscan_t yyscanner, bool is_function) +{ + base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner); + int proc_header_src_end = 0; + char *proc_header_info = NULL; + if (is_function) { + proc_header_src_end = yyextra->core_yy_extra.return_pos_end - 1; + } else { + proc_header_src_end = yyextra->core_yy_extra.func_param_end + 1; + } + if (proc_header_src_end == 1) { + return proc_header_info; + } + yyextra->core_yy_extra.return_pos_end = 0; + yyextra->core_yy_extra.func_param_begin = 0; + yyextra->core_yy_extra.func_param_end = 0; + if (proc_header_src_end > 0) { + proc_header_info = (char*)palloc0(proc_header_src_end + 1); + errno_t rc = EOK; + rc = strncpy_s(proc_header_info, (proc_header_src_end + 1), yyextra->core_yy_extra.scanbuf, proc_header_src_end); + securec_check(rc, "\0", "\0"); + proc_header_info[proc_header_src_end] = '\0'; + } + return proc_header_info; +} + static void parameter_check_execute_direct(const char* query) { #ifndef ENABLE_MULTIPLE_NODES diff --git a/contrib/whale/plugin_parser/parse_type.cpp b/contrib/whale/plugin_parser/parse_type.cpp index 52a5fe894..5141f515a 100644 --- a/contrib/whale/plugin_parser/parse_type.cpp +++ b/contrib/whale/plugin_parser/parse_type.cpp @@ -39,6 +39,9 @@ #include "utils/pl_package.h" #include "catalog/gs_collation.h" #include "plugin_parser/parse_utilcmd.h" +#include "catalog/pg_object.h" +#include "catalog/gs_dependencies_fn.h" +#include "catalog/pg_type_fn.h" static int32 typenameTypeMod(ParseState* pstate, const TypeName* typname, Type typ); @@ -71,13 +74,35 @@ Oid LookupPctTypeInPackage(RangeVar* rel, Oid pkgOid, const char* field) } } +Type LookupTypeNameSupportUndef(ParseState *pstate, const TypeName *typeName, int32 *typmod_p, bool print_notice) +{ + Type typtup = NULL; + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + TypeDependExtend* dependExt = NULL; + InstanceTypeNameDependExtend(&dependExt); + typtup = LookupTypeName(pstate, typeName, typmod_p, print_notice, dependExt); + pfree_ext(dependExt); + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + PG_RE_THROW(); + } + PG_END_TRY(); + set_create_plsql_type(oldCreatePlsqlType); + return typtup; +} + /* * LookupTypeName * Wrapper for typical case. */ -Type LookupTypeName(ParseState *pstate, const TypeName *typeName, int32 *typmod_p, bool print_notice) +Type LookupTypeName(ParseState *pstate, const TypeName *typeName, int32 *typmod_p, bool print_notice, TypeDependExtend* dependExtend) { - return LookupTypeNameExtended(pstate, typeName, typmod_p, true, print_notice); + return LookupTypeNameExtended(pstate, typeName, typmod_p, true, print_notice, dependExtend); } /* @@ -108,14 +133,16 @@ Type LookupTypeName(ParseState *pstate, const TypeName *typeName, int32 *typmod_ * pstate is only used for error location info, and may be NULL. */ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* typmod_p, bool temp_ok, - bool print_notice) + bool print_notice, TypeDependExtend* dependExtend) { Oid typoid = InvalidOid; - HeapTuple tup; + HeapTuple tup = NULL; int32 typmod = -1; Oid pkgOid = InvalidOid; bool notPkgType = false; - + char* schemaname = NULL; + char* typeName = NULL; + char* pkgName = NULL; if (typname->names == NIL) { /* We have the OID already if it's an internally generated TypeName */ typoid = typname->typeOid; @@ -128,10 +155,12 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* char* pkgName = NULL; char* schemaName = NULL; /* deconstruct the name list */ + int typTupStatus = InvalidTypeTup; switch (list_length(typname->names)) { case 1: tup = getPLpgsqlVarTypeTup(strVal(linitial(typname->names))); - if (HeapTupleIsValid(tup)) { + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return tup; } ereport(ERROR, @@ -141,16 +170,18 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* parser_errposition(pstate, typname->location))); break; case 2: - tup = FindPkgVariableType(pstate, typname, typmod_p); - if (HeapTupleIsValid(tup)) { + tup = FindPkgVariableType(pstate, typname, typmod_p, dependExtend); + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return (Type)tup; } rel->relname = strVal(linitial(typname->names)); field = strVal(lsecond(typname->names)); break; case 3: - tup = FindPkgVariableType(pstate, typname, typmod_p); - if (HeapTupleIsValid(tup)) { + tup = FindPkgVariableType(pstate, typname, typmod_p, dependExtend); + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return (Type)tup; } pkgName = strVal(linitial(typname->names)); @@ -164,8 +195,9 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* field = strVal(lthird(typname->names)); break; case 4: - tup = FindPkgVariableType(pstate, typname, typmod_p); - if (HeapTupleIsValid(tup)) { + tup = FindPkgVariableType(pstate, typname, typmod_p, dependExtend); + typTupStatus = GetTypeTupStatus(tup); + if (typTupStatus == NormalTypeTup) { return (Type)tup; } pkgName = strVal(lsecond(typname->names)); @@ -203,14 +235,38 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid, field); } if (!OidIsValid(relid)) { - relid = RangeVarGetRelidExtended(rel, NoLock, false, false, false, true, NULL, NULL, NULL, NULL); + if (enable_plpgsql_undefined()) { + relid = RangeVarGetRelidExtended(rel, NoLock, true, false, false, true, NULL, NULL, NULL, NULL); + if (!OidIsValid(relid) && HeapTupleIsValid(tup)) { + if (NULL != dependExtend) { + dependExtend->dependUndefined = true; + } + if (GetCurrCompilePgObjStatus() && + u_sess->plsql_cxt.functionStyleType != FUNCTION_STYLE_TYPE_REFRESH_HEAD) { + ereport(WARNING, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("TYPE %s does not exist in type.", rel->relname))); + } + InvalidateCurrCompilePgObj(); + return tup; + } + } else { + relid = RangeVarGetRelidExtended(rel, NoLock, false, false, false, true, NULL, NULL, NULL, NULL); + } } attnum = get_attnum(relid, field); if (attnum == InvalidAttrNumber) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", field, rel->relname), - parser_errposition(pstate, typname->location))); + if (enable_plpgsql_undefined()) { + if (NULL != dependExtend) { + dependExtend->dependUndefined = true; + } + return SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + } else { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", field, rel->relname), + parser_errposition(pstate, typname->location))); + } } typoid = get_atttype(relid, attnum); @@ -220,6 +276,16 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* typmod = get_atttypmod(relid, attnum); } + if (enable_plpgsql_undefined() && UndefineTypeTup == typTupStatus && NULL != dependExtend) { + gsplsql_delete_unrefer_depend_obj_oid(dependExtend->undefDependObjOid, false); + dependExtend->undefDependObjOid = InvalidOid; + ReleaseSysCache(tup); + tup = NULL; + } + if (enable_plpgsql_gsdependency() && NULL != dependExtend) { + dependExtend->typeOid = get_rel_type_id(relid); + } + /* If an array reference, return the array type instead */ if (typname->arrayBounds != NIL) { typoid = get_array_type(typoid); @@ -235,10 +301,6 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* } } else { /* Normal reference to a type name */ - char* schemaname = NULL; - char* typeName = NULL; - char* pkgName = NULL; - /* Handle %ROWTYPE reference to type of an existing table. */ if (typname->pct_rowtype) { RangeVar* relvar = NULL; @@ -266,14 +328,25 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* } Oid class_oid = RangeVarGetRelidExtended(relvar, NoLock, true, false, false, true, NULL, NULL); if (!OidIsValid(class_oid)) { - pfree_ext(relvar); /* if case: cursor%rowtype */ tup = getCursorTypeTup(strVal(linitial(typname->names))); if (HeapTupleIsValid(tup)) { return (Type)tup; } - - ereport(ERROR, + if (enable_plpgsql_undefined() && NULL != dependExtend) { + Oid undefRefObjOid = gsplsql_try_build_exist_schema_undef_table(relvar); + if (OidIsValid(undefRefObjOid)) { + dependExtend->undefDependObjOid = undefRefObjOid; + dependExtend->dependUndefined = true; + InvalidateCurrCompilePgObj(); + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + if (typmod_p != NULL) { + *typmod_p = -1; + } + } + } + pfree_ext(relvar); + ereport(NULL != tup ? WARNING : ERROR, (errmodule(MOD_PARSER), errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation does not exist when parse word."), @@ -281,6 +354,7 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* NameListToString(typname->names)), errcause("incorrectly referencing relation"), erraction("check the relation name for %%ROWTYPE"))); + return (Type)tup; } char relkind = get_rel_relkind(class_oid); /* onyl table is allowed for %ROWTYPE. */ @@ -328,13 +402,23 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* /* find type in current packgae first */ typoid = LookupTypeInPackage(typname->names, typeName); } - if (isPkgType) { - typoid = LookupTypeInPackage(typname->names, typeName, pkgOid); - } - if (!OidIsValid(typoid)) { - /* Unqualified type name, so search the search path */ - typoid = TypenameGetTypidExtended(typeName, temp_ok); - notPkgType = true; /* should also track type dependency, fix when refactoring */ + if (enable_plpgsql_gsdependency_guc()) { + if (isPkgType) { + typoid = LookupTypeInPackage(typname->names, typeName, pkgOid); + } else if (!OidIsValid(typoid)) { + /* Unqualified type name, so search the search path */ + typoid = TypenameGetTypidExtended(typeName, temp_ok); + notPkgType = true; /* should also track type dependency, fix when refactoring */ + } + } else { + if (isPkgType) { + typoid = LookupTypeInPackage(typname->names, typeName, pkgOid); + } + if (!OidIsValid(typoid)) { + /* Unqualified type name, so search the search path */ + typoid = TypenameGetTypidExtended(typeName, temp_ok); + notPkgType = true; /* should also track type dependency, fix when refactoring */ + } } } @@ -355,29 +439,58 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* if (typmod_p != NULL) { *typmod_p = -1; } - return NULL; - } - - /* Don't support the type in blacklist. */ - bool is_unsupported_type = !u_sess->attr.attr_common.IsInplaceUpgrade && IsTypeInBlacklist(typoid); - if (is_unsupported_type) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type %s is not yet supported.", format_type_be(typoid)))); - } + if (enable_plpgsql_undefined() && NULL != dependExtend) { + if (NULL != schemaname && NULL == pkgName && !OidIsValid(get_namespace_oid(schemaname, true))) { + pkgName = schemaname; + schemaname = NULL; + } + GsDependObjDesc objDesc; + objDesc.schemaName = schemaname; + char* activeSchemaName = NULL; + if (schemaname == NULL) { + activeSchemaName = get_namespace_name(get_compiled_object_nspoid()); + objDesc.schemaName = activeSchemaName; + } + objDesc.packageName = pkgName; + objDesc.name = typeName; + objDesc.type = GSDEPEND_OBJECT_TYPE_TYPE; + if (u_sess->plsql_cxt.functionStyleType != FUNCTION_STYLE_TYPE_REFRESH_HEAD) { + dependExtend->undefDependObjOid = gsplsql_flush_undef_ref_depend_obj(&objDesc); + } else { + dependExtend->undefDependObjOid = InvalidOid; + } + dependExtend->dependUndefined = true; + pfree_ext(activeSchemaName); + if (GetCurrCompilePgObjStatus() && + u_sess->plsql_cxt.functionStyleType != FUNCTION_STYLE_TYPE_REFRESH_HEAD) { + ereport(WARNING, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("Type %s does not exist.", typeName))); + } + InvalidateCurrCompilePgObj(); + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + } + } else { + /* Don't support the type in blacklist. */ + bool is_unsupported_type = !u_sess->attr.attr_common.IsInplaceUpgrade && IsTypeInBlacklist(typoid); + if (is_unsupported_type) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type %s is not yet supported.", format_type_be(typoid)))); + } - tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid)); + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid)); - /* should not happen */ - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", typoid))); - } - if (!typname->pct_type) { - typmod = typenameTypeMod(pstate, typname, (Type)tup); - } - if (typmod_p != NULL) { - *typmod_p = typmod; + /* should not happen */ + if (!HeapTupleIsValid(tup)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", typoid))); + } + if (!typname->pct_type) { + typmod = typenameTypeMod(pstate, typname, (Type)tup); + } + if (typmod_p != NULL) { + *typmod_p = typmod; + } } - return (Type)tup; } @@ -388,11 +501,11 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* * a suitable error message if the type cannot be found or is not defined. * Callers of this can therefore assume the result is a fully valid type. */ -Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p) +Type typenameType(ParseState* pstate, const TypeName* typname, int32* typmod_p, TypeDependExtend* dependExtend) { Type tup; - tup = LookupTypeName(pstate, typname, typmod_p); + tup = LookupTypeName(pstate, typname, typmod_p, true, dependExtend); /* * If the type is relation, then we check @@ -447,11 +560,11 @@ Oid typenameTypeId(ParseState* pstate, const TypeName* typname) * This is equivalent to typenameType, but we only hand back the type OID * and typmod, not the syscache entry. */ -void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p) +void typenameTypeIdAndMod(ParseState* pstate, const TypeName* typname, Oid* typeid_p, int32* typmod_p, TypeDependExtend* dependExtend) { Type tup; - tup = typenameType(pstate, typname, typmod_p); + tup = typenameType(pstate, typname, typmod_p, dependExtend); *typeid_p = HeapTupleGetOid(tup); ReleaseSysCache(tup); } @@ -906,7 +1019,7 @@ static void pts_error_callback(void* arg) * such as "int4" or "integer" or "character varying(32)", parse * the string and convert it to a type OID and type modifier. */ -void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p) +void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p, TypeDependExtend* dependExtend) { StringInfoData buf; buf.data = NULL; @@ -970,7 +1083,7 @@ void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p) goto fail; } - typenameTypeIdAndMod(NULL, typname, typeid_p, typmod_p); + typenameTypeIdAndMod(NULL, typname, typeid_p, typmod_p, dependExtend); pfree_ext(buf.data); @@ -979,7 +1092,13 @@ void parseTypeString(const char* str, Oid* typeid_p, int32* typmod_p) fail: pfree_ext(buf.data); InsertErrorMessage("invalid type name", u_sess->plsql_cxt.plpgsql_yylloc); - ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid type name \"%s\"", str))); + if (enable_plpgsql_undefined()) { + InvalidateCurrCompilePgObj(); + *typeid_p = UNDEFINEDOID; + ereport(WARNING, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid type name \"%s\"", str))); + } else { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid type name \"%s\"", str))); + } } /* @@ -1448,8 +1567,31 @@ char* CastPackageTypeName(const char* typName, Oid objOid, bool isPackage, bool return castTypName.data; } +char* ParseTypeName(const char* typName, Oid pkgOid) +{ + if (!OidIsValid(pkgOid)) { + return NULL; + } + char* oldStr = NULL; + const int oldStrLen =12; + oldStr = (char*)palloc0(oldStrLen * sizeof(char)); + pg_ltoa(pkgOid, oldStr); + int len = strlen(oldStr); + char* pos = strstr((char*)typName, oldStr); + pfree_ext(oldStr); + if (NULL == pos) { + return NULL; + } + pos +=len; + if (*pos != '.') { + return NULL; + } + return pstrdup(++pos); +} + /* find if %type ref a package variable type */ -HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p) +HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32* typmod_p, + TypeDependExtend* depend_extend) { HeapTuple tup = NULL; @@ -1457,8 +1599,7 @@ HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32 return tup; #else int32 typmod = -1; - - if (u_sess->plsql_cxt.curr_compile_context == NULL) { + if (!enable_plpgsql_gsdependency_guc() && u_sess->plsql_cxt.curr_compile_context == NULL) { return tup; } @@ -1469,15 +1610,14 @@ HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32 } /* find package.var%TYPE second */ - if (list_length(typname->names) <= 1 || list_length(typname->names) >= 4) { + if (list_length(typname->names) <= 1) { + return tup; + } + if (list_length(typname->names) >= (enable_plpgsql_gsdependency_guc() ? 5 :4)) { return tup; } PLpgSQL_datum* datum = GetPackageDatum(typname->names); if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_VAR) { - if (OidIsValid(((PLpgSQL_var*)datum)->datatype->tableOfIndexType)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("not support ref table of variable as procedure argument type"))); - } Oid typOid = ((PLpgSQL_var*)datum)->datatype->typoid; tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typOid)); /* should not happen */ @@ -1489,6 +1629,19 @@ HeapTuple FindPkgVariableType(ParseState* pstate, const TypeName* typname, int32 if (typmod_p != NULL) { *typmod_p = typmod; } + if (enable_plpgsql_gsdependency() && NULL != depend_extend) { + DeconstructQualifiedName(typname->names, &depend_extend->schemaName, + &depend_extend->objectName, &depend_extend->packageName); + } + } else if (enable_plpgsql_undefined() && NULL != depend_extend) { + Oid undefRefObjOid = gsplsql_try_build_exist_pkg_undef_var(typname->names); + if (OidIsValid(undefRefObjOid)) { + depend_extend->undefDependObjOid = undefRefObjOid; + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(UNDEFINEDOID)); + if (typmod_p != NULL) { + *typmod_p = -1; + } + } } return tup; #endif @@ -1535,13 +1688,18 @@ Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid, Oid n /* pkgOid is invalid, try to find the type in current compile package */ if (!OidIsValid(pkgOid)) { - /* if not compiling packgae, just return invalid oid */ - if (u_sess->plsql_cxt.curr_compile_context == NULL || - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL) { - return typOid; + if (enable_plpgsql_gsdependency_guc() && + u_sess->plsql_cxt.functionStyleType == FUNCTION_STYLE_TYPE_REFRESH_HEAD && + OidIsValid(u_sess->plsql_cxt.currRefreshPkgOid)) { + pkgOid = u_sess->plsql_cxt.currRefreshPkgOid; + } else { + /* if not compiling packgae, just return invalid oid */ + if (u_sess->plsql_cxt.curr_compile_context == NULL || + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL) { + return typOid; + } + pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; } - - pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; /* find public package type first */ castTypeName = CastPackageTypeName(typeName, pkgOid, true, true); typOid = TypenameGetTypidExtended(castTypeName, false); @@ -1574,7 +1732,13 @@ Oid LookupTypeInPackage(List* typeNames, const char* typeName, Oid pkgOid, Oid n pfree_ext(castTypeName); if (OidIsValid(typOid)) { - check_record_nest_tableof_index_type(NULL, typeNames); + bool pkgValid = true; + if (enable_plpgsql_gsdependency_guc()) { + pkgValid = GetPgObjectValid(pkgOid, OBJECT_TYPE_PKGSPEC); + } + if (pkgValid) { + // check_record_nest_tableof_index_type(NULL, typeNames); + } return typOid; } @@ -1639,3 +1803,11 @@ void check_type_supports_multi_charset(Oid typid, bool allow_array) errmsg("multi character set for datatype '%s' is not supported", get_typename(typid)))); } } + +TypeTupStatus GetTypeTupStatus(Type typ) +{ + if (HeapTupleIsValid(typ)) { + return (UNDEFINEDOID == HeapTupleGetOid(typ) ? UndefineTypeTup : NormalTypeTup); + } + return InvalidTypeTup; +} \ No newline at end of file diff --git a/contrib/whale/plugin_parser/scan.l b/contrib/whale/plugin_parser/scan.l index 1d419c2d4..bbba4fb9a 100644 --- a/contrib/whale/plugin_parser/scan.l +++ b/contrib/whale/plugin_parser/scan.l @@ -1368,6 +1368,7 @@ scanner_init(const char *str, yyext->include_ora_comment = false; yyext->func_param_begin = 0; yyext->func_param_end = 0; + yyext->return_pos_end = 0; /* * Make a scan buffer with special termination needed by flex. diff --git a/contrib/whale/plugin_pl/plpgsql/src/gram.y b/contrib/whale/plugin_pl/plpgsql/src/gram.y index 0f24b8551..4d0b78fa4 100644 --- a/contrib/whale/plugin_pl/plpgsql/src/gram.y +++ b/contrib/whale/plugin_pl/plpgsql/src/gram.y @@ -18,6 +18,7 @@ #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/gs_package.h" +#include "catalog/gs_dependencies_fn.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_synonym.h" @@ -240,7 +241,7 @@ static Oid get_table_type(PLpgSQL_datum* datum); static Node* make_columnDef_from_attr(PLpgSQL_rec_attr* attr); static TypeName* make_typename_from_datatype(PLpgSQL_type* datatype); static Oid plpgsql_build_package_record_type(const char* typname, List* list, bool add2namespace); -static void plpgsql_build_package_array_type(const char* typname, Oid elemtypoid, char arraytype); +static void plpgsql_build_package_array_type(const char* typname, Oid elemtypoid, char arraytype, TypeDependExtend* dependExtend = NULL); static void plpgsql_build_package_refcursor_type(const char* typname); int plpgsql_yylex_single(void); static void get_datum_tok_type(PLpgSQL_datum* target, int* tok_flag); @@ -1299,6 +1300,9 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no parser_errposition(@5))); } } + if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_type_in_body_dependency($3); + } } pfree_ext(varname->name); } @@ -1442,7 +1446,7 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no plpgsql_build_varrayType($2->name, $2->lineno, $9, true); if (IS_PACKAGE) { - plpgsql_build_package_array_type($2->name, $9->typoid, TYPCATEGORY_ARRAY); + plpgsql_build_package_array_type($2->name, $9->typoid, TYPCATEGORY_ARRAY, $9->dependExtend); } pfree_ext($2->name); pfree($2); @@ -1596,7 +1600,7 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no } plpgsql_build_tableType($2->name, $2->lineno, $6, true); if (IS_PACKAGE) { - plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF); + plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF, $6->dependExtend); } pfree_ext($2->name); pfree($2); @@ -1767,9 +1771,9 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no if (IS_PACKAGE) { if ($10->typoid == VARCHAROID) { - plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_VARCHAR); + plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_VARCHAR, $6->dependExtend); } else { - plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_INTEGER); + plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_INTEGER, $6->dependExtend); } } pfree_ext($2->name); @@ -1914,6 +1918,11 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no } if (IS_PACKAGE) { newp->typoid = plpgsql_build_package_record_type($2->name, $6, true); + } else if (enable_plpgsql_gsdependency()) { + ListCell* cell = NULL; + foreach(cell, $6) { + gsplsql_build_gs_type_in_body_dependency(((PLpgSQL_rec_attr*)lfirst(cell))->type); + } } pfree_ext($2->name); pfree($2); @@ -5968,6 +5977,9 @@ cursor_variable : T_DATUM $1.ident), parser_errposition(@1))); } + if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_variable_dependency($1.idents); + } $$ = $1.dno; } | T_WORD @@ -8279,6 +8291,12 @@ static bool construct_cword(StringInfo ds, ArrayParseContext *context, int *tok, } else { yyerror("syntax error"); } + if (enable_plpgsql_gsdependency()) { + FuncCandidateList clist = FuncnameGetCandidates(idents, -1, NIL, false, false, true); + if (clist == NULL) { + gsplsql_build_gs_variable_dependency(idents); + } + } if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { int dno = -1; char *name = NameListToString(idents); @@ -8300,7 +8318,21 @@ static bool construct_cword(StringInfo ds, ArrayParseContext *context, int *tok, int curloc = yylloc; *tok = yylex(); plpgsql_push_back_token(*tok); - return construct_object_type(ds, context, makeTypeNameFromNameList(idents), tok, parenlevel, curloc, loc); + bool result; + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + result = construct_object_type(ds, context, makeTypeNameFromNameList(idents), tok, parenlevel, curloc, loc); + set_create_plsql_type(oldCreatePlsqlType); + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + PG_RE_THROW(); + } + PG_END_TRY(); + return result; } /* Convenience routine to read an expression with one possible terminator */ @@ -8879,6 +8911,9 @@ read_sql_construct6(int until, idents = yylval.wdatum.idents; int var_dno = yylval.wdatum.dno; + if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_variable_dependency(idents); + } if (type_flag == PLPGSQL_TOK_TABLE_VAR) { /* * table var name may be schema.pkg.table_var @@ -9591,16 +9626,20 @@ read_datatype(int tok) if (tok_is_keyword(tok, &yylval, K_TYPE, "type")) { + TypeDependExtend* typeDependExtend = NULL; + if (enable_plpgsql_gsdependency()) { + InstanceTypeNameDependExtend(&typeDependExtend); + } /* find val.col%TYPE first */ HeapTuple tup = NULL; int collectionType = PLPGSQL_COLLECTION_NONE; Oid tableOfIndexType = InvalidOid; int32 typMod = -1; - tup = FindRowVarColType(dtnames, &collectionType, &tableOfIndexType, &typMod); + tup = FindRowVarColType(dtnames, &collectionType, &tableOfIndexType, &typMod, typeDependExtend); if (tup != NULL) { Oid typOid = typeTypeId(tup); ReleaseSysCache(tup); - PLpgSQL_type* type = plpgsql_build_datatype(typOid, typMod, InvalidOid); + PLpgSQL_type* type = plpgsql_build_datatype(typOid, typMod, InvalidOid, typeDependExtend); if (OidIsValid(tableOfIndexType)) { type->collectionType = collectionType; type->tableOfIndexType = tableOfIndexType; @@ -9610,22 +9649,47 @@ read_datatype(int tok) /* find pkg.var%TYPE second */ PLpgSQL_datum* datum = GetPackageDatum(dtnames); - if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_VAR) { - PLpgSQL_var* var = (PLpgSQL_var*)datum; - Oid typOid = var->datatype->typoid; - int32 typmod = var->datatype->atttypmod; - Oid collation = var->datatype->collation; - int collectionType = var->datatype->collectionType; - Oid tableOfIndexType = var->datatype->tableOfIndexType; - - PLpgSQL_type* type = plpgsql_build_datatype(typOid, typmod, collation); - type->collectionType = collectionType; - type->tableOfIndexType = tableOfIndexType; - return type; + if (datum != NULL) { + if (datum->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = (PLpgSQL_var*)datum; + Oid typOid = var->datatype->typoid; + int32 typmod = var->datatype->atttypmod; + Oid collation = var->datatype->collation; + int collectionType = var->datatype->collectionType; + Oid tableOfIndexType = var->datatype->tableOfIndexType; + if (var->pkg != NULL && enable_plpgsql_gsdependency()) { + typeDependExtend->objectName = pstrdup(var->refname); + typeDependExtend->packageName = pstrdup(var->pkg->pkg_signature); + typeDependExtend->schemaName = get_namespace_name(var->pkg->namespaceOid); + } + PLpgSQL_type* type = plpgsql_build_datatype(typOid, typmod, collation, typeDependExtend); + type->collectionType = collectionType; + type->tableOfIndexType = tableOfIndexType; + return type; + } else if (datum->dtype == PLPGSQL_DTYPE_ROW){ + PLpgSQL_row* row = (PLpgSQL_row*)datum; + if (row->rowtupdesc && row->rowtupdesc->tdtypeid != RECORDOID && + OidIsValid(row->rowtupdesc->tdtypeid)) { + if (row->pkg != NULL && enable_plpgsql_gsdependency()) { + typeDependExtend->objectName = pstrdup(row->refname); + typeDependExtend->packageName = pstrdup(row->pkg->pkg_signature); + typeDependExtend->schemaName = get_namespace_name(row->pkg->namespaceOid); + } + return plpgsql_build_datatype(row->rowtupdesc->tdtypeid, -1, InvalidOid, typeDependExtend); + } + } } - result = plpgsql_parse_cwordtype(dtnames); + result = plpgsql_parse_cwordtype(dtnames, typeDependExtend); if (result) return result; + if (enable_plpgsql_undefined()) { + Oid tryUndefObjOid = gsplsql_try_build_exist_pkg_undef_var(dtnames); + if (OidIsValid(tryUndefObjOid)) { + typeDependExtend->undefDependObjOid = tryUndefObjOid; + typeDependExtend->dependUndefined = true; + return plpgsql_build_datatype(UNDEFINEDOID, -1, InvalidOid, typeDependExtend); + } + } } else if (tok_is_keyword(tok, &yylval, K_ROWTYPE, "rowtype")) @@ -9775,6 +9839,7 @@ make_execsql_stmt(int firsttoken, int location) bool insert_array_record = false; int values_end_loc = -1; int before_semi_loc = -1; + const char* err_msg = "The label name can only contain letters, digits and underscores"; PLpgSQL_row* row_data = NULL; PLpgSQL_rec* rec_data = NULL; PLpgSQL_var* array_data = NULL; @@ -9884,22 +9949,50 @@ make_execsql_stmt(int firsttoken, int location) { char* name = NULL; errno_t rc = 0; + int num = -1; int len = Min(NAMEDATALEN, lb.len - count + 1); name = (char*)palloc(len); rc = strncpy_s(name, len, lb.data, len - 1); securec_check_c(rc, "\0", "\0"); + num = strspn(pg_strtolower(name), "abcdefghijklmnopqrstuvwxyz0123456789_"); + + if(num != len - 1 || (name[0] >= '0' && name[0] <= '9')) { + pfree(name); + pfree_ext(lb.data); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg(err_msg), + parser_errposition(location + num))); + } plpgsql_ns_additem(PLPGSQL_NSTYPE_LABEL, 0, pg_strtolower(name)); pfree(name); } - else - { + else { + int valid_len = lb.len; if(lb.len >= NAMEDATALEN) { lb.data[NAMEDATALEN - 1] = '\0'; + valid_len = NAMEDATALEN - 1; } + int len = -1; + len = strspn(pg_strtolower(lb.data), "abcdefghijklmnopqrstuvwxyz0123456789_"); + if(len != valid_len) { + pfree_ext(lb.data); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg(err_msg), + parser_errposition(location + len))); + } + if(lb.data[0] >= '0' && lb.data[0] <= '9') { + pfree_ext(lb.data); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg(err_msg), + parser_errposition(location))); + } plpgsql_ns_additem(PLPGSQL_NSTYPE_LABEL, 0, pg_strtolower(lb.data)); } pfree_ext(lb.data); @@ -9943,22 +10036,14 @@ make_execsql_stmt(int firsttoken, int location) securec_check_c(rc, "\0", "\0"); len = strspn(pg_strtolower(name), "abcdefghijklmnopqrstuvwxyz0123456789_"); - if(len != lb.len - count) { + if(len != lb.len - count || (name[0] >= '0' && name[0] <= '9')) { pfree(name); pfree_ext(lb.data); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), + errmsg(err_msg), parser_errposition(location + len))); } - if(name[0] >= '0' && name[0] <= '9') { - pfree(name); - pfree_ext(lb.data); - ereport(errstate, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), - parser_errposition(location))); - } if(lb.len-count >= NAMEDATALEN) { @@ -9983,14 +10068,14 @@ make_execsql_stmt(int firsttoken, int location) pfree_ext(lb.data); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), + errmsg(err_msg), parser_errposition(location + len))); } if(lb.data[0] >= '0' && lb.data[0] <= '9') { pfree_ext(lb.data); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("The label name is invalid"), + errmsg(err_msg), parser_errposition(location))); } if(lb.len >= NAMEDATALEN) @@ -10064,6 +10149,7 @@ make_execsql_stmt(int firsttoken, int location) PLpgSQL_nsitem* ns = plpgsql_ns_lookup(plpgsql_ns_top(), false, yylval.word.ident, NULL, NULL, NULL); if (ns == NULL) { yyerror("insert an nonexistent variable."); + continue; } PLpgSQL_datum* datum = u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[ns->itemno]; @@ -11281,8 +11367,16 @@ read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, bool *strict, int firstto errmsg("record or row variable cannot be part of multiple-item INTO list"), parser_errposition(yylloc))); } - if (tok == T_DATUM || tok == T_VARRAY_VAR - || tok == T_TABLE_VAR || tok == T_PACKAGE_VARIABLE) { + if (tok == '.') { + const char* message = "Improper use of '.*'. The '.*' operator cannot be used with a row type variable."; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("Improper use of '.*'. The '.*' operator cannot be used with a row type variable."), + parser_errposition(yylloc))); + } + if (!DB_IS_CMPT(PG_FORMAT) && (tok == T_DATUM || tok == T_VARRAY_VAR + || tok == T_TABLE_VAR || tok == T_PACKAGE_VARIABLE)) { const char* message = "syntax error, expected \",\""; InsertErrorMessage(message, plpgsql_yylloc); ereport(errstate, @@ -11305,8 +11399,8 @@ read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, bool *strict, int firstto errmsg("record or row variable cannot be part of multiple-item INTO list"), parser_errposition(yylloc))); } - if (tok == T_DATUM || tok == T_VARRAY_VAR - || tok == T_TABLE_VAR || tok == T_PACKAGE_VARIABLE) { + if (!DB_IS_CMPT(PG_FORMAT) && (tok == T_DATUM || tok == T_VARRAY_VAR + || tok == T_TABLE_VAR || tok == T_PACKAGE_VARIABLE)) { const char* message = "syntax error, expected \",\""; InsertErrorMessage(message, plpgsql_yylloc); ereport(errstate, @@ -11733,8 +11827,8 @@ read_into_array_table_scalar_list(char *initial_name, } } - if (tok == T_DATUM || tok == T_VARRAY_VAR - || tok == T_TABLE_VAR || tok == T_PACKAGE_VARIABLE) { + if (!DB_IS_CMPT(PG_FORMAT) && (tok == T_DATUM || tok == T_VARRAY_VAR + || tok == T_TABLE_VAR || tok == T_PACKAGE_VARIABLE)) { const char* message = "syntax error, expected \",\""; InsertErrorMessage(message, plpgsql_yylloc); ereport(errstate, @@ -11921,7 +12015,7 @@ check_sql_expr(const char *stmt, int location, int leaderlen) u_sess->plsql_cxt.plpgsql_yylloc = plpgsql_yylloc; RawParserHook parser_hook= raw_parser; #if (!defined(ENABLE_MULTIPLE_NODES)) && (!defined(ENABLE_PRIVATEGAUSS)) - if (u_sess->attr.attr_sql.whale || u_sess->attr.attr_sql.whale) { + if (u_sess->attr.attr_sql.whale || u_sess->attr.attr_sql.dolphin) { int id = GetCustomParserId(); if (id >= 0 && g_instance.raw_parser_hook[id] != NULL) { parser_hook = (RawParserHook)g_instance.raw_parser_hook[id]; @@ -12001,7 +12095,25 @@ parse_datatype(const char *string, int location) u_sess->plsql_cxt.plpgsql_yylloc = plpgsql_yylloc; /* Let the main parser try to parse it under standard SQL rules */ - parseTypeString(string, &type_id, &typmod); + TypeDependExtend* typeDependExtend = NULL; + if (enable_plpgsql_gsdependency()) { + InstanceTypeNameDependExtend(&typeDependExtend); + CreatePlsqlType oldCreatePlsqlType = u_sess->plsql_cxt.createPlsqlType; + PG_TRY(); + { + set_create_plsql_type_not_check_nsp_oid(); + parseTypeString(string, &type_id, &typmod, typeDependExtend); + set_create_plsql_type(oldCreatePlsqlType); + } + PG_CATCH(); + { + set_create_plsql_type(oldCreatePlsqlType); + PG_RE_THROW(); + } + PG_END_TRY(); + } else { + parseTypeString(string, &type_id, &typmod, typeDependExtend); + } (void)MemoryContextSwitchTo(oldCxt); @@ -12011,11 +12123,11 @@ parse_datatype(const char *string, int location) /* Okay, build a PLpgSQL_type data structure for it */ if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { - return plpgsql_build_datatype(type_id, typmod, 0); + return plpgsql_build_datatype(type_id, typmod, 0, typeDependExtend); } return plpgsql_build_datatype(type_id, typmod, - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_input_collation); + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_input_collation, typeDependExtend); } /* Build a arrary_type by elem_type. */ @@ -12390,6 +12502,9 @@ static PLpgSQL_type* build_type_from_record_var(int dno, int location) /* already build one, just use it */ if(IsPackageDependType(oldtypeoid, pkgoid)) { newtypeoid = oldtypeoid; + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(newtypeoid); + } } else { ereport(errstate, (errmodule(MOD_PLSQL), @@ -12429,6 +12544,9 @@ static PLpgSQL_type* build_type_from_record_var(int dno, int location) /* build dependency on created composite type. */ buildDependencyForCompositeType(newtypeoid); + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(newtypeoid); + } } /* build datatype of the created composite type. */ @@ -12466,6 +12584,7 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo Oid oldtypeoid = InvalidOid; Oid newtypeoid = InvalidOid; char* schamaName = NULL; + Oid pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; Oid pkgNamespaceOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->namespaceOid; if (OidIsValid(pkgNamespaceOid)) { schamaName = get_namespace_name(pkgNamespaceOid); @@ -12473,7 +12592,7 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo pkgNamespaceOid = getCurrentNamespace(); } char* casttypename = CastPackageTypeName(typname, - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid, true, + pkgOid, true, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->is_spec_compiling); if (strlen(casttypename) >= NAMEDATALEN ) { ereport(errstate, @@ -12488,10 +12607,14 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo } oldtypeoid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(casttypename), ObjectIdGetDatum(pkgNamespaceOid)); - if (OidIsValid(oldtypeoid)) { + bool oldTypeOidIsValid = OidIsValid(oldtypeoid); + if (oldTypeOidIsValid) { /* already build on, just use it */ - if(IsPackageDependType(oldtypeoid, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid)) { + if(IsPackageDependType(oldtypeoid, pkgOid)) { newtypeoid = oldtypeoid; + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(newtypeoid); + } } else { ereport(errstate, (errmodule(MOD_PLSQL), @@ -12538,6 +12661,9 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo CommandCounterIncrement(); pfree_ext(r); list_free_deep(codeflist); + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + gsplsql_build_ref_type_dependency(newtypeoid); + } } PLpgSQL_type *newtype = NULL; @@ -12553,7 +12679,7 @@ static Oid plpgsql_build_package_record_type(const char* typname, List* list, bo return newtypeoid; } -static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid, char arraytype) +static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid, char arraytype, TypeDependExtend* dependExtend) { char typtyp; ObjectAddress myself, referenced; @@ -12574,8 +12700,25 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid pkgNamespaceOid = getCurrentNamespace(); } + Oid pkgOid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; Oid oldtypeoid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(casttypename), ObjectIdGetDatum(pkgNamespaceOid)); + bool oldtypeoidIsValid = OidIsValid(oldtypeoid); + if (enable_plpgsql_gsdependency() && u_sess->plsql_cxt.need_create_depend) { + char* schemaName = get_namespace_name(pkgNamespaceOid); + char* packageName = GetPackageName(pkgOid); + bool dependUndef = gsplsql_check_type_depend_undefined(schemaName, packageName, typname); + pfree_ext(schemaName); + pfree_ext(packageName); + if (dependUndef) { + ObjectAddress address; + address.classId = TypeRelationId; + address.objectId = oldtypeoid; + address.objectSubId = 0; + performDeletion(&address, DROP_CASCADE, PERFORM_DELETION_INTERNAL); + oldtypeoidIsValid = false; + } + } if (OidIsValid(oldtypeoid)) { /* alread build one, just return */ if(IsPackageDependType(oldtypeoid, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid)) { @@ -12594,7 +12737,9 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid if (arraytype == TYPCATEGORY_TABLEOF || arraytype == TYPCATEGORY_TABLEOF_VARCHAR || arraytype == TYPCATEGORY_TABLEOF_INTEGER) { - elemtypoid = get_array_type(elemtypoid); + if (UNDEFINEDOID != elemtypoid) { + elemtypoid = get_array_type(elemtypoid); + } typtyp = TYPTYPE_TABLEOF; } else { typtyp = TYPTYPE_BASE; @@ -12635,7 +12780,8 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid -1, /* typmod */ 0, /* array dimensions for typBaseType */ false, /* Type NOT NULL */ - get_typcollation(elemtypoid)); + get_typcollation(elemtypoid), + dependExtend); CommandCounterIncrement(); @@ -12648,6 +12794,7 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid pfree_ext(casttypename); } + static void plpgsql_build_package_refcursor_type(const char* typname) { CreateSynonymStmt stmt; @@ -12759,7 +12906,7 @@ static Node* make_columnDef_from_attr(PLpgSQL_rec_attr* attr) static TypeName* make_typename_from_datatype(PLpgSQL_type* datatype) { - return makeTypeNameFromOid(datatype->typoid, datatype->atttypmod); + return makeTypeNameFromOid(datatype->typoid, datatype->atttypmod, datatype->dependExtend); } /* @@ -13085,11 +13232,20 @@ static void read_signal_sqlstate(PLpgSQL_stmt_signal *newp, int tok) yyerror("unexpected end of function definition"); } - if (strcmp(yylval.str, "value") == 0) { - if (yylex() != SCONST) { - yyerror("syntax error, the expected value is a string."); + if (tok != SCONST && tok != T_WORD) { + yyerror("syntax error, the expected value is a string."); + } + + if (tok == T_WORD) { + if (strcmp(yylval.str, "value") == 0) { + if (yylex() != SCONST) { + yyerror("syntax error, the expected value is a string."); + } + } else { + yyerror("syntax error, the expected word is value."); } } + sqlstate_value = yylval.str; if (strlen(sqlstate_value) != 5 || diff --git a/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp b/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp index bca20e221..ccbe17579 100644 --- a/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp +++ b/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp @@ -15,6 +15,7 @@ #include "plugin_utils/plpgsql.h" #include "utils/pl_package.h" +#include "utils/plpgsql.h" #include @@ -23,6 +24,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_proc_fn.h" #include "catalog/gs_package.h" +#include "catalog/gs_package_fn.h" #include "catalog/pg_type.h" #include "commands/sqladvisor.h" #include "executor/spi.h" @@ -47,7 +49,9 @@ #include "miscadmin.h" #include "tcop/tcopprot.h" #include "commands/event_trigger.h" - +#include "catalog/gs_dependencies_fn.h" +#include "catalog/pg_object.h" +#include "catalog/pg_type_fn.h" /* functions reference other modules */ extern THR_LOCAL List* baseSearchPath; @@ -77,7 +81,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, extern void plpgsql_compile_error_callback(void* arg); static void add_parameter_name(int item_type, int item_no, const char* name); static void add_dummy_return(PLpgSQL_function* func); -static Node* a_plpgsql_pre_column_ref(ParseState* pstate, ColumnRef* cref); +static Node* plpgsql_pre_column_ref(ParseState* pstate, ColumnRef* cref); static Node* plpgsql_post_column_ref(ParseState* pstate, ColumnRef* cref, Node* var); static Node* plpgsql_param_ref(ParseState* pstate, ParamRef* pref); static Node* resolve_column_ref(ParseState* pstate, PLpgSQL_expr* expr, ColumnRef* cref, bool error_if_no_field); @@ -103,12 +107,6 @@ extern bool is_func_need_cache(Oid funcid, const char* func_name); extern bool plpgsql_check_insert_colocate( Query* query, List* qry_part_attr_num, List* trig_part_attr_num, PLpgSQL_function* func); -#ifdef WHALE -static void ReplaceWhaleProBegin(char *proc_source); -#define BEGIN_P_STR " BEGIN_B_PROC " /* used in whale type proc body*/ -#define BEGIN_P_LEN 14 -#define BEGIN_N_STR " BEGIN " /* BEGIN_P_STR to same length*/ -#endif /* ---------- * plpgsql_compile Make an execution tree for a PL/pgSQL function. * @@ -129,13 +127,14 @@ static bool plpgsql_check_search_path(PLpgSQL_function* func, HeapTuple proc_tup return check_search_path_interface(func->fn_searchpath->schemas, proc_tup); } -PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator) +PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator, bool isRecompile) { Oid func_oid = fcinfo->flinfo->fn_oid; PLpgSQL_func_hashkey hashkey; bool function_valid = false; bool hashkey_valid = false; bool isnull = false; + bool func_valid = true; /* * Lookup the pg_proc tuple by Oid; we'll need it in any case */ @@ -154,7 +153,25 @@ PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool for_validator) Datum pkgoiddatum = SysCacheGetAttr(PROCOID, proc_tup, Anum_pg_proc_packageid, &isnull); Oid packageOid = DatumGetObjectId(pkgoiddatum); Oid old_value = saveCallFromPkgOid(packageOid); - + if (enable_plpgsql_gsdependency_guc()) { + if (func == NULL) { + /* Compute hashkey using function signature and actual arg types */ + compute_function_hashkey(proc_tup, fcinfo, proc_struct, &hashkey, for_validator); + hashkey_valid = true; + /* And do the lookup */ + func = plpgsql_HashTableLookup(&hashkey); + } + /** + * only check for func need recompile or not, + */ + if (func_oid >= FirstNormalObjectId) { + func_valid = GetPgObjectValid(func_oid, OBJECT_TYPE_PROC); + } + if (!func_valid) { + fcinfo->flinfo->fn_extra = NULL; + } + } + recheck: if (func == NULL) { /* Compute hashkey using function signature and actual arg types */ @@ -165,10 +182,16 @@ recheck: func = plpgsql_HashTableLookup(&hashkey); } + if (!func_valid && func != NULL && !u_sess->plsql_cxt.need_create_depend && + !isRecompile && u_sess->SPI_cxt._connected >= 0 && !u_sess->plsql_cxt.during_compile) { + func->is_need_recompile = true; + } + if (func != NULL) { /* We have a compiled function, but is it still valid? */ if (func->fn_xmin == HeapTupleGetRawXmin(proc_tup) && - ItemPointerEquals(&func->fn_tid, &proc_tup->t_self) && plpgsql_check_search_path(func, proc_tup)) { + ItemPointerEquals(&func->fn_tid, &proc_tup->t_self) && plpgsql_check_search_path(func, proc_tup) && + !isRecompile && !func->is_need_recompile) { function_valid = true; } else { /* @@ -235,9 +258,14 @@ recheck: */ PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; int save_compile_status = getCompileStatus(); + bool save_curr_status = GetCurrCompilePgObjStatus(); PG_TRY(); { + List* ref_obj_list = gsplsql_prepare_recompile_func(func_oid, proc_struct->pronamespace, packageOid, isRecompile); + SetCurrCompilePgObjStatus(true); func = do_compile(fcinfo, proc_tup, func, &hashkey, for_validator); + UpdateCurrCompilePgObjStatus(save_curr_status); + gsplsql_complete_recompile_func(ref_obj_list); (void)CompileStatusSwtichTo(save_compile_status); } PG_CATCH(); @@ -250,6 +278,7 @@ recheck: InsertError(func_oid); } #endif + SetCurrCompilePgObjStatus(save_compile_status); popToOldCompileContext(save_compile_context); (void)CompileStatusSwtichTo(save_compile_status); PG_RE_THROW(); @@ -595,6 +624,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, int* in_arg_varnos = NULL; PLpgSQL_variable** out_arg_variables; Oid pkgoid = InvalidOid; + Oid namespaceOid = InvalidOid; Oid* saved_pseudo_current_userId = NULL; char* signature = NULL; @@ -636,6 +666,8 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, /* Null prokind items are created when there is no procedure */ isFunc = true; } + Datum pronamespaceDatum = SysCacheGetAttr(PROCOID, proc_tup, Anum_pg_proc_pronamespace, &isnull); + namespaceOid = DatumGetObjectId(pronamespaceDatum); /* * Setup error traceback support for ereport() */ @@ -699,10 +731,6 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, } u_sess->plsql_cxt.curr_compile_context = curr_compile; pushCompileContext(); -#ifdef WHALE - /* PreCheck And replace BEGIN_B_PROC INTO BEGIN if NEEDED*/ - ReplaceWhaleProBegin(proc_source); -#endif plpgsql_scanner_init(proc_source); curr_compile->plpgsql_curr_compile = func; curr_compile->plpgsql_error_funcname = pstrdup(NameStr(proc_struct->proname)); @@ -724,6 +752,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(curr_compile->compile_cxt); func->fn_signature = pstrdup(signature); func->is_private = BoolGetDatum(proisprivatedatum); + func->namespaceOid = namespaceOid; /* * if function belong to a package, it will use package search path. */ @@ -1205,8 +1234,63 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, * Now parse the function's text */ bool saved_flag = u_sess->plsql_cxt.have_error; - u_sess->plsql_cxt.have_error = false; - parse_rc = plpgsql_yyparse(); + ResourceOwnerData* oldowner = NULL; + int64 stackId = 0; + MemoryContext oldcxt; + volatile bool has_error = false; + if (enable_plpgsql_gsdependency_guc() && u_sess->plsql_cxt.isCreateFunction && !IsInitdb) { + oldowner = t_thrd.utils_cxt.CurrentResourceOwner; + oldcxt = CurrentMemoryContext; + SPI_savepoint_create("createFunction"); + stackId = u_sess->plsql_cxt.nextStackEntryId; + MemoryContextSwitchTo(oldcxt); + bool save_isPerform = u_sess->parser_cxt.isPerform; + PG_TRY(); + { + u_sess->parser_cxt.isPerform = false; + parse_rc = plpgsql_yyparse(); + u_sess->parser_cxt.isPerform = save_isPerform; + SPI_savepoint_release("createFunction"); + stp_cleanup_subxact_resource(stackId); + MemoryContextSwitchTo(oldcxt); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; + } + PG_CATCH(); + { + u_sess->parser_cxt.isPerform = save_isPerform; + SPI_savepoint_rollbackAndRelease("createFunction", InvalidTransactionId); + stp_cleanup_subxact_resource(stackId); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; + MemoryContextSwitchTo(oldcxt); + has_error = true; + ErrorData* edata = &t_thrd.log_cxt.errordata[t_thrd.log_cxt.errordata_stack_depth]; + ereport(WARNING, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("%s", edata->message), + errdetail("N/A"), + errcause("compile package or procedure error."), + erraction("check package or procedure error and redefine"))); + if (edata->sqlerrcode == ERRCODE_OUT_OF_LOGICAL_MEMORY) { + PG_RE_THROW(); + } + FlushErrorState(); + } + PG_END_TRY(); + }else { + bool save_isPerform = u_sess->parser_cxt.isPerform; + u_sess->parser_cxt.isPerform = false; + parse_rc = plpgsql_yyparse(); + u_sess->parser_cxt.isPerform = save_isPerform; + } + if (enable_plpgsql_gsdependency_guc() && has_error) { + plpgsql_scanner_finish(); + pfree_ext(proc_source); + PopOverrideSearchPath(); + u_sess->plsql_cxt.curr_compile_context = popCompileContext(); + clearCompileContext(curr_compile); + return NULL; + } #ifndef ENABLE_MULTIPLE_NODES if (u_sess->plsql_cxt.have_error && u_sess->attr.attr_common.plsql_show_all_error) { u_sess->plsql_cxt.have_error = false; @@ -1332,6 +1416,31 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, if (curr_compile->plpgsql_DumpExecTree) { plpgsql_dumptree(func); } + + if (enable_plpgsql_gsdependency_guc()) { + bool curr_compile_status = GetCurrCompilePgObjStatus(); + if (curr_compile_status) { + bool is_undefined = gsplsql_is_undefined_func(func->fn_oid); + func->isValid = !is_undefined; + + if (!func->isValid && u_sess->plsql_cxt.createPlsqlType == CREATE_PLSQL_TYPE_RECOMPILE) { + GsDependObjDesc obj = gsplsql_construct_func_head_obj(func->fn_oid, func->namespaceOid, func->pkg_oid); + obj.type = GSDEPEND_OBJECT_TYPE_PROCHEAD; + gsplsql_do_refresh_proc_header(&obj, &is_undefined); + } + + if (is_undefined && !u_sess->plsql_cxt.compile_has_warning_info) { + u_sess->plsql_cxt.compile_has_warning_info = true; + ereport(WARNING, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The header information of function %s is not defined.", NameStr(proc_struct->proname)))); + } + UpdateCurrCompilePgObjStatus(!is_undefined); + } else { + func->isValid = GetCurrCompilePgObjStatus(); + } + } else { + func->isValid = true; + } /* * add it to the hash table except specified function. */ @@ -1621,9 +1730,9 @@ static void add_dummy_return(PLpgSQL_function* func) * when we are ready to evaluate a SQL query or expression that has not * previously been parsed and planned. */ -void a_plpgsql_parser_setup(struct ParseState* pstate, PLpgSQL_expr* expr) +void plpgsql_parser_setup(struct ParseState* pstate, PLpgSQL_expr* expr) { - pstate->p_pre_columnref_hook = a_plpgsql_pre_column_ref; + pstate->p_pre_columnref_hook = plpgsql_pre_column_ref; pstate->p_post_columnref_hook = plpgsql_post_column_ref; pstate->p_paramref_hook = plpgsql_param_ref; /* no need to use p_coerce_param_hook */ @@ -1633,7 +1742,7 @@ void a_plpgsql_parser_setup(struct ParseState* pstate, PLpgSQL_expr* expr) /* * plpgsql_pre_column_ref parser callback before parsing a ColumnRef */ -static Node* a_plpgsql_pre_column_ref(ParseState* pstate, ColumnRef* cref) +static Node* plpgsql_pre_column_ref(ParseState* pstate, ColumnRef* cref) { PLpgSQL_expr* expr = (PLpgSQL_expr*)pstate->p_ref_hook_state; @@ -1833,14 +1942,7 @@ static Node* resolve_column_ref(ParseState* pstate, PLpgSQL_expr* expr, ColumnRe return NULL; } -#ifdef WHALE - name1 = pg_strtolower(pstrdup(name1)); - name2 = pg_strtolower(pstrdup(name2)); - name3 = pg_strtolower(pstrdup(name3)); - nse = plpgsql_ns_lookup(expr->ns, false, name1, name2, name3, &nnames); -#else nse = plpgsql_ns_lookup(expr->ns, false, name1, name2, name3, &nnames); -#endif if (nse == NULL) { return NULL; /* name not known to plpgsql */ @@ -1902,11 +2004,7 @@ static Node* resolve_column_ref(ParseState* pstate, PLpgSQL_expr* expr, ColumnRe fld = (PLpgSQL_recfield*)estate->datums[i]; } if (fld->dtype == PLPGSQL_DTYPE_RECFIELD && fld->recparentno == nse->itemno && -#ifdef WHALE - strcasecmp(fld->fieldname, colname) == 0) { -#else strcmp(fld->fieldname, colname) == 0) { -#endif return make_datum_param(expr, i, cref->location); } } @@ -1939,11 +2037,7 @@ static Node* resolve_column_ref(ParseState* pstate, PLpgSQL_expr* expr, ColumnRe row = (PLpgSQL_row*)estate->datums[nse->itemno]; } for (i = 0; i < row->nfields; i++) { -#ifdef WHALE - if (row->fieldnames[i] && strcasecmp(row->fieldnames[i], colname) == 0) { -#else if (row->fieldnames[i] && strcmp(row->fieldnames[i], colname) == 0) { -#endif switch (nnames_wholerow) { case 2: /* row.col */ return make_datum_param(expr, row->varnos[i], cref->location); @@ -2099,7 +2193,7 @@ void getTableofTypeFromVar(PLpgSQL_var* var, int* collectionType, Oid* tableofIn } } -HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofIndexType, int32* typMod) +HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofIndexType, int32* typMod, TypeDependExtend* dependExtend) { if (u_sess->plsql_cxt.curr_compile_context == NULL) { return NULL; @@ -2107,28 +2201,31 @@ HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofInd PLpgSQL_datum* datum = NULL; char* field = NULL; - + char* schemaName = NULL; + char* packageName = NULL; + char* objectName = NULL; /* find row var and field first */ switch (list_length(nameList)) { case 2: { + objectName = strVal(linitial(nameList)); datum = plpgsql_lookup_datum(false, strVal(linitial(nameList)), NULL, NULL, NULL); field = strVal(lsecond(nameList)); break; } case 3: { - char* word1 = strVal(linitial(nameList)); - char* word2 = strVal(lsecond(nameList)); - List *names2 = list_make2(makeString(word1), makeString(word2)); + packageName = strVal(linitial(nameList)); + objectName = strVal(lsecond(nameList)); + List *names2 = list_make2(makeString(packageName), makeString(objectName)); datum = GetPackageDatum(names2); list_free_ext(names2); field = strVal(lthird(nameList)); break; } case 4: { - char* word1 = strVal(linitial(nameList)); - char* word2 = strVal(lsecond(nameList)); - char* word3 = strVal(lthird(nameList)); - List *names3 = list_make3(makeString(word1), makeString(word2), makeString(word3)); + schemaName = strVal(linitial(nameList)); + packageName = strVal(lsecond(nameList)); + objectName = strVal(lthird(nameList)); + List *names3 = list_make3(makeString(schemaName), makeString(packageName), makeString(objectName)); datum = GetPackageDatum(names3); list_free_ext(names3); field = strVal(lfourth(nameList)); @@ -2178,6 +2275,12 @@ HeapTuple FindRowVarColType(List* nameList, int* collectionType, Oid* tableofInd return NULL; } + if (enable_plpgsql_gsdependency() && NULL != dependExtend) { + dependExtend->schemaName = schemaName; + dependExtend->packageName = packageName; + dependExtend->objectName = objectName; + } + HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typOid)); /* should not happen */ if (!HeapTupleIsValid(tup)) { @@ -3059,11 +3162,26 @@ PLpgSQL_type* plpgsql_parse_wordtype(char* ident) return NULL; } +static PLpgSQL_type* gsplsql_make_type_for_pkg_var_ref_type(GsDependObjDesc* obj, PLpgSQL_datum* datum, + TypeDependExtend* dependExtend) +{ + InstanceTypeNameDependExtend(&dependExtend); + dependExtend->schemaName = pstrdup(obj->schemaName); + dependExtend->packageName = pstrdup(obj->packageName); + dependExtend->objectName = pstrdup(obj->name); + PLpgSQL_var* var = (PLpgSQL_var*)datum; + PLpgSQL_type* type = plpgsql_build_datatype(var->datatype->typoid, var->datatype->atttypmod, + var->datatype->collation, dependExtend); + type->collectionType = var->datatype->collectionType; + type->tableOfIndexType = var->datatype->tableOfIndexType; + return type; +} + /* ---------- * plpgsql_parse_cwordtype Same lookup for compositeword%TYPE * ---------- */ -PLpgSQL_type* plpgsql_parse_cwordtype(List* idents) +PLpgSQL_type* plpgsql_parse_cwordtype(List* idents, TypeDependExtend* dependExtend) { PLpgSQL_type* dtype = NULL; PLpgSQL_nsitem* nse = NULL; @@ -3143,6 +3261,21 @@ PLpgSQL_type* plpgsql_parse_cwordtype(List* idents) goto done; } fldname = strVal(lthird(idents)); + } else if (enable_plpgsql_gsdependency_guc()) { + GsDependObjDesc objDesc; + Oid schemaOId = gsplsql_parse_pkg_var_obj4(&objDesc, idents); + if (!OidIsValid(schemaOId) || !OidIsValid(PackageNameGetOid(objDesc.packageName, schemaOId))) { + goto done; + } + List* new_var_name = list_make3(makeString(objDesc.schemaName), makeString(objDesc.packageName), + makeString(objDesc.name)); + PLpgSQL_datum* datum = GetPackageDatum(new_var_name); + list_free_ext(new_var_name); + if (datum != NULL && datum->dtype == PLPGSQL_DTYPE_VAR) { + MemoryContextSwitchTo(old_cxt); + return gsplsql_make_type_for_pkg_var_ref_type(&objDesc, datum, dependExtend); + } + goto done; } else { goto done; } @@ -3184,6 +3317,13 @@ PLpgSQL_type* plpgsql_parse_cwordtype(List* idents) */ MemoryContextSwitchTo(old_cxt); dtype = build_datatype(type_tup, attr_struct->atttypmod, attr_struct->attcollation); + if (enable_plpgsql_gsdependency() && NULL != dtype) { + Oid typ_oid = get_rel_type_id(class_oid); + AssertEreport(InvalidOid != typ_oid, MOD_PLSQL, "all relation must have type"); + dtype->dependExtend = dependExtend; + InstanceTypeNameDependExtend(&dtype->dependExtend); + dtype->dependExtend->typeOid = typ_oid; + } MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); done: @@ -3215,20 +3355,45 @@ PLpgSQL_type* plpgsql_parse_wordrowtype(char* ident) * but no need to collect more errdetails. */ (void)RelnameGetRelidExtended(ident, &class_oid); - + Oid typ_oid = InvalidOid; + TypeDependExtend* dependExtend = NULL; if (!OidIsValid(class_oid)) { char message[MAXSTRLEN]; errno_t rc = 0; rc = sprintf_s(message, MAXSTRLEN, "relation \"%s\" does not exist when parse word.", ident); securec_check_ss(rc, "", ""); InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc, true); - ereport(ERROR, - (errmodule(MOD_PLSQL), - errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("relation \"%s\" does not exist when parse word.", ident))); + if (enable_plpgsql_undefined()) { + RangeVar rangvar; + rangvar.schemaname = NULL; + rangvar.relname = ident; + Oid undefRefObjOid = gsplsql_try_build_exist_schema_undef_table(&rangvar); + if (OidIsValid(undefRefObjOid)) { + InstanceTypeNameDependExtend(&dependExtend); + dependExtend->undefDependObjOid = undefRefObjOid; + dependExtend->dependUndefined = true; + typ_oid = UNDEFINEDOID; + ereport(WARNING, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation \"%s\" does not exist when parse word.", ident))); + } else { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation \"%s\" does not exist when parse word.", ident))); + } + } else { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation \"%s\" does not exist when parse word.", ident))); + } + } else { + typ_oid = get_rel_type_id(class_oid); } /* Build and return the row type struct */ - return plpgsql_build_datatype(get_rel_type_id(class_oid), -1, InvalidOid); + return plpgsql_build_datatype(typ_oid, -1, InvalidOid, dependExtend); } /* ---------- @@ -3242,23 +3407,64 @@ PLpgSQL_type* plpgsql_parse_cwordrowtype(List* idents) RangeVar* relvar = NULL; MemoryContext old_cxt = NULL; - if (list_length(idents) != 2) { + if (!enable_plpgsql_gsdependency_guc() && list_length(idents) != 2) { return NULL; } + switch (list_length(idents)) + { + case 1: + relvar = makeRangeVar(NULL, strVal(linitial(idents)), -1); + break; + case 2: + relvar = makeRangeVar(strVal(linitial(idents)), strVal(lsecond(idents)), -1); + break; + case 3: + relvar = makeRangeVar(strVal(lsecond(idents)), strVal(lthird(idents)), -1); + relvar->catalogname = strVal(linitial(idents)); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("improper %%ROWTYPE reference"))); + break; + } /* Avoid memory leaks in long-term function context */ old_cxt = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); /* Look up relation name. Can't lock it - we might not have privileges. */ - relvar = makeRangeVar(strVal(linitial(idents)), strVal(lsecond(idents)), -1); - + Oid typ_oid = InvalidOid; + TypeDependExtend* dependExtend = NULL; /* Here relvar is allowed to be a synonym object. */ - class_oid = RangeVarGetRelidExtended(relvar, NoLock, false, false, false, true, NULL, NULL); + if (!enable_plpgsql_undefined()) { + class_oid = RangeVarGetRelidExtended(relvar, NoLock, false, false, false, true, NULL, NULL); + pfree_ext(relvar); + typ_oid = get_rel_type_id(class_oid); + } else { + class_oid = RangeVarGetRelidExtended(relvar, NoLock, true, false, false, true, NULL, NULL); + typ_oid = get_rel_type_id(class_oid); + if (!OidIsValid(typ_oid) && enable_plpgsql_undefined()) { + Oid undefRefObjOid = gsplsql_try_build_exist_schema_undef_table(relvar); + pfree_ext(relvar); + if (OidIsValid(undefRefObjOid)) { + InstanceTypeNameDependExtend(&dependExtend); + dependExtend->undefDependObjOid = undefRefObjOid; + dependExtend->dependUndefined = true; + typ_oid = UNDEFINEDOID; + } + } + if (!OidIsValid(typ_oid) || UNDEFINEDOID == typ_oid) { + ereport((typ_oid == UNDEFINEDOID ? WARNING : ERROR), + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("relation does not exist when parse word."))); + } + } + MemoryContextSwitchTo(old_cxt); /* Build and return the row type struct */ - return plpgsql_build_datatype(get_rel_type_id(class_oid), -1, InvalidOid); + return plpgsql_build_datatype(typ_oid, -1, InvalidOid, dependExtend); } /* cursor generate a composite type, find its col type */ @@ -3994,7 +4200,7 @@ PLpgSQL_row* build_row_from_rec_type(const char* rowname, int lineno, PLpgSQL_re * If collation is not InvalidOid then it overrides the type's default * collation. But collation is ignored if the datatype is non-collatable. */ -PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation) +PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation, TypeDependExtend* type_depend_extend) { HeapTuple type_tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); if (!HeapTupleIsValid(type_tup)) { @@ -4027,7 +4233,11 @@ PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation) typ = build_datatype(type_tup, typmod, collation); } ReleaseSysCache(type_tup); - + if (enable_plpgsql_gsdependency() && NULL != typ) { + InstanceTypeNameDependExtend(&type_depend_extend); + typ->dependExtend = type_depend_extend; + typ->dependExtend->typeOid = typeOid; + } return typ; } @@ -4069,6 +4279,7 @@ PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation) case TYPTYPE_DOMAIN: case TYPTYPE_ENUM: case TYPTYPE_RANGE: + case TYPTYPE_UNDEFINE: typ->ttype = PLPGSQL_TTYPE_SCALAR; break; case TYPTYPE_COMPOSITE: @@ -4099,6 +4310,7 @@ PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation) typ->typrelid = type_struct->typrelid; typ->typioparam = getTypeIOParam(type_tup); typ->collation = type_struct->typcollation; + typ->dependExtend = NULL; if (OidIsValid(collation) && OidIsValid(typ->collation)) { typ->collation = collation; } @@ -4460,7 +4672,7 @@ PLpgSQL_condition* plpgsql_parse_err_condition_b(const char* condname) if (prev == NULL) { prev = plpgsql_parse_err_condition_b_signal(condname); } - + return prev; } @@ -4958,6 +5170,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP expr->func->datums = u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums; expr->func->ndatums = u_sess->plsql_cxt.curr_compile_context->plpgsql_nDatums; TupleDesc tupleDesc = NULL; + NodeTag old_node_tag = t_thrd.postgres_cxt.cur_command_tag; PG_TRY(); { List* parsetreeList = pg_parse_query(expr->query); @@ -4969,6 +5182,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP List* queryList = NIL; foreach(cell, parsetreeList) { Node *parsetree = (Node *)lfirst(cell); + t_thrd.postgres_cxt.cur_command_tag = transform_node_tag(parsetree); if (nodeTag(parsetree) == T_SelectStmt) { if (checkSelectIntoParse((SelectStmt*)parsetree)) { list_free_deep(parsetreeList); @@ -4992,7 +5206,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP } } queryList = pg_analyze_and_rewrite_params(parsetree, expr->query, - (ParserSetupHook)a_plpgsql_parser_setup, (void*)expr); + (ParserSetupHook)plpgsql_parser_setup, (void*)expr); } if (queryList == NULL) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -5015,6 +5229,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP } PG_CATCH(); { + t_thrd.postgres_cxt.cur_command_tag = old_node_tag; /* Save error info */ MemoryContext ecxt = MemoryContextSwitchTo(current_context); ErrorData* edata = CopyErrorData(); @@ -5035,6 +5250,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP } PG_END_TRY(); + t_thrd.postgres_cxt.cur_command_tag = old_node_tag; return tupleDesc; } static int get_inner_type_ind(Oid typeoid) @@ -5060,7 +5276,7 @@ Node* plpgsql_check_match_var(Node* node, ParseState* pstate, ColumnRef* cref) { Node* ans = NULL; if (node != NULL && IsA(node, Var) && - pstate->p_pre_columnref_hook == a_plpgsql_pre_column_ref && + pstate->p_pre_columnref_hook == plpgsql_pre_column_ref && cref->indnum > 0) { Var* colvar = (Var*)node; if (get_inner_type_ind(colvar->vartype) -1 != cref->indnum) { @@ -5069,17 +5285,3 @@ Node* plpgsql_check_match_var(Node* node, ParseState* pstate, ColumnRef* cref) } return ans; } - -#ifdef WHALE -void ReplaceWhaleProBegin(char *proc_source) -{ - errno_t rc = EOK; - if (pg_strncasecmp(proc_source, BEGIN_P_STR, BEGIN_P_LEN) == 0) { - GetSessionContext()->is_a_declare = true; - rc = memcpy_s(proc_source, strlen(proc_source), BEGIN_N_STR, BEGIN_P_LEN); - securec_check(rc, "\0", "\0"); - } else { - GetSessionContext()->is_a_declare = false; - } -} -#endif diff --git a/contrib/whale/plugin_pl/plpgsql/src/pl_handler.cpp b/contrib/whale/plugin_pl/plpgsql/src/pl_handler.cpp index f84bfac54..ee0fdc51e 100644 --- a/contrib/whale/plugin_pl/plpgsql/src/pl_handler.cpp +++ b/contrib/whale/plugin_pl/plpgsql/src/pl_handler.cpp @@ -43,6 +43,8 @@ #include "executor/spi_priv.h" #include "distributelayer/streamMain.h" #include "commands/event_trigger.h" +#include "catalog/pg_object.h" +#include "catalog/gs_dependencies_fn.h" #ifdef STREAMPLAN #include "optimizer/streamplan.h" @@ -56,6 +58,8 @@ PG_MODULE_MAGIC; #endif #define MAXSTRLEN ((1 << 11) - 1) +static void init_do_stmt(PLpgSQL_package *pkg, bool isCreate, ListCell *cell, int oldCompileStatus, + PLpgSQL_compile_context *curr_compile, List *temp_tableof_index, MemoryContext oldcxt); static void auditExecPLpgSQLFunction(PLpgSQL_function* func, AuditResult result) { char details[PGAUDIT_MAXLENGTH]; @@ -779,6 +783,8 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) #endif int connect = SPI_connectid(); Oid firstLevelPkgOid = InvalidOid; + bool save_need_create_depend = u_sess->plsql_cxt.need_create_depend; + bool save_curr_status = GetCurrCompilePgObjStatus(); PG_TRY(); { PGSTAT_START_PLSQL_TIME_RECORD(); @@ -789,7 +795,33 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) bool saved_current_stp_with_exception = plpgsql_get_current_value_stp_with_exception(); /* Find or compile the function */ if (func == NULL) { + u_sess->plsql_cxt.compile_has_warning_info = false; + SetCurrCompilePgObjStatus(true); + if (enable_plpgsql_gsdependency_guc()) { + if (gsplsql_is_undefined_func(func_oid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), + (errmsg("\"%s\" header is undefined, you can try to recreate", get_func_name(func_oid))))); + } + if (GetPgObjectValid(func_oid, OBJECT_TYPE_PROC)) { + u_sess->plsql_cxt.need_create_depend = false; + } else { + u_sess->plsql_cxt.need_create_depend = true; + } + } func = plpgsql_compile(fcinfo, false); + if (func == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_FUNCTION_PROVIDED), errmodule(MOD_PLSQL), + errmsg("compile function error."), + errdetail("It may be because the compilation encountered an error and the exception was caught."), + errcause("compile procedure error."), + erraction("compile function result is null, it has error"))); + } + if (enable_plpgsql_gsdependency_guc()) { + if (!OidIsValid(func->pkg_oid)) { + SetPgObjectValid(func_oid, OBJECT_TYPE_PROC, true); + } + } + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; } if (func->fn_readonly) { stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_IMMUTABLE); @@ -818,6 +850,7 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) /* Must save and restore prior value of cur_estate and debug_info */ save_cur_estate = func->cur_estate; save_debug_info = func->debug; + NodeTag old_node_tag = t_thrd.postgres_cxt.cur_command_tag; // set the procedure's search_path as the current search_path validate_search_path(func); @@ -889,7 +922,7 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) u_sess->plsql_cxt.cur_exception_cxt = NULL; t_thrd.log_cxt.call_stack = saveplcallstack; - + t_thrd.postgres_cxt.cur_command_tag = old_node_tag; #ifndef ENABLE_MULTIPLE_NODES /* for restore parent session and automn session package var values */ @@ -967,6 +1000,7 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) DecreasePackageUseCount(func); func->cur_estate = save_cur_estate; func->debug = save_debug_info; + t_thrd.postgres_cxt.cur_command_tag = old_node_tag; // resume the search_path when the procedure has executed PopOverrideSearchPath(); @@ -979,6 +1013,8 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) } PG_CATCH(); { + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; + SetCurrCompilePgObjStatus(save_curr_status); /* clean stp save pointer if the outermost function is end. */ if (u_sess->SPI_cxt._connected == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = NULL; @@ -1000,7 +1036,7 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) /* destory all the SPI connect created in this PL function. */ SPI_disconnect(connect); - + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; /* re-throw the original error messages */ ReThrowError(edata); } @@ -1030,6 +1066,7 @@ Datum a_plpgsql_call_handler(PG_FUNCTION_ARGS) u_sess->opt_cxt.is_stream_support = outer_is_stream_support; } #endif + UpdateCurrCompilePgObjStatus(save_curr_status); if (has_switch) { SetUserIdAndSecContext(old_user, save_sec_context); u_sess->exec_cxt.cast_owner = InvalidOid; @@ -1136,6 +1173,8 @@ Datum a_plpgsql_inline_handler(PG_FUNCTION_ARGS) save_compile_context = u_sess->plsql_cxt.curr_compile_context; int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); int save_compile_status = u_sess->plsql_cxt.compile_status; + DebugInfo* save_debug_info = func->debug; + NodeTag old_node_tag = t_thrd.postgres_cxt.cur_command_tag; FormatCallStack* saveplcallstack = t_thrd.log_cxt.call_stack; PG_TRY(); { @@ -1157,11 +1196,22 @@ Datum a_plpgsql_inline_handler(PG_FUNCTION_ARGS) DecreasePackageUseCount(func); #ifndef ENABLE_MULTIPLE_NODES + /* debug finished, close debug resource */ + if (func->debug) { + /* if debuger is waiting for end msg, send end */ + server_send_end_msg(func->debug); + /* pass opt to upper debug function */ + server_pass_upper_debug_opt(func->debug); + clean_up_debug_server(func->debug, false, true); + delete_debug_func(InvalidOid); + } + func->debug = save_debug_info; /* for restore parent session and automn session package var values */ (void)processAutonmSessionPkgsInException(func); dopControl.ResetSmp(); #endif + t_thrd.postgres_cxt.cur_command_tag = old_node_tag; ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), errmsg("%s clear curr_compile_context because of error.", __func__))); /* reset nest plpgsql compile */ @@ -1177,6 +1227,19 @@ Datum a_plpgsql_inline_handler(PG_FUNCTION_ARGS) PG_RE_THROW(); } PG_END_TRY(); +#ifndef ENABLE_MULTIPLE_NODES + /* debug finished, close debug resource */ + if (func->debug) { + /* if debuger is waiting for end msg, send end */ + server_send_end_msg(func->debug); + /* pass opt to upper debug function */ + server_pass_upper_debug_opt(func->debug); + clean_up_debug_server(func->debug, false, true); + delete_debug_func(InvalidOid); + } + func->debug = save_debug_info; +#endif + t_thrd.postgres_cxt.cur_command_tag = old_node_tag; if (u_sess->SPI_cxt._connected == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = NULL; } @@ -1314,6 +1377,7 @@ Datum a_plpgsql_validator(PG_FUNCTION_ARGS) } ReleaseSysCache(tuple); + bool save_curr_status = GetCurrCompilePgObjStatus(); /* Postpone body checks if !u_sess->attr.attr_sql.check_function_bodies */ if (u_sess->attr.attr_sql.check_function_bodies) { FunctionCallInfoData fake_fcinfo; @@ -1351,12 +1415,14 @@ Datum a_plpgsql_validator(PG_FUNCTION_ARGS) /* Test-compile the function */ PG_TRY(); { + SetCurrCompilePgObjStatus(true); u_sess->parser_cxt.isCreateFuncOrProc = true; func = plpgsql_compile(&fake_fcinfo, true); u_sess->parser_cxt.isCreateFuncOrProc = false; } PG_CATCH(); { + SetCurrCompilePgObjStatus(save_curr_status); #ifndef ENABLE_MULTIPLE_NODES u_sess->parser_cxt.isPerform = false; bool insertError = (u_sess->attr.attr_common.plsql_show_all_error || @@ -1415,6 +1481,7 @@ Datum a_plpgsql_validator(PG_FUNCTION_ARGS) pl_validate_function_sql(func, replace); u_sess->ClientAuthInProgress = saved_client_auth; } + UpdateCurrCompilePgObjStatus(save_curr_status); } #ifndef ENABLE_MULTIPLE_NODES if (!IsInitdb && u_sess->plsql_cxt.isCreateFunction) { @@ -1554,7 +1621,7 @@ void FunctionInPackageCompile(PLpgSQL_package* pkg) * ---------- */ #ifndef ENABLE_MULTIPLE_NODES -void PackageInit(PLpgSQL_package* pkg, bool isCreate) +void PackageInit(PLpgSQL_package* pkg, bool isCreate, bool isSpec, bool isNeedCompileFunc) { if (likely(pkg != NULL)) { if (likely(pkg->isInit)) { @@ -1567,23 +1634,23 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) PushOverrideSearchPath(pkg->pkg_searchpath); ListCell* cell = NULL; int oldCompileStatus = getCompileStatus(); - if (isCreate) { - CompileStatusSwtichTo(COMPILIE_PKG); - } + CompileStatusSwtichTo(COMPILIE_PKG); PLpgSQL_compile_context* curr_compile = createCompileContext("PL/pgSQL package context"); SPI_NESTCOMPILE_LOG(curr_compile->compile_cxt); MemoryContext temp = NULL; - if (u_sess->plsql_cxt.curr_compile_context != NULL) { + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt != NULL) { temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); } u_sess->plsql_cxt.curr_compile_context = curr_compile; pushCompileContext(); curr_compile->plpgsql_curr_compile_package = pkg; checkCompileMemoryContext(pkg->pkg_cxt); + MemoryContext oldcxt = MemoryContextSwitchTo(pkg->pkg_cxt); if (isCreate) { int exception_num = 0; - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + curr_compile->compile_tmp_cxt = oldcxt; processPackageProcList(pkg); foreach(cell, pkg->proc_list) { @@ -1598,9 +1665,11 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) } PG_CATCH(); { + set_create_plsql_type_end(); if (u_sess->plsql_cxt.create_func_error) { u_sess->plsql_cxt.create_func_error = false; exception_num += 1; + FlushErrorState(); } else { PG_RE_THROW(); } @@ -1620,16 +1689,15 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) errcause("compile procedure error."), erraction("check procedure error and redefine procedure"))); } - (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); } else { - if (pkg->is_bodycompiled) { + if (pkg->is_bodycompiled && !isSpec && isNeedCompileFunc) { (void)CompileStatusSwtichTo(COMPILIE_PKG_FUNC); - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + curr_compile->compile_tmp_cxt = oldcxt; FunctionInPackageCompile(pkg); - (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); (void)CompileStatusSwtichTo(oldCompileStatus); } } + (void*)MemoryContextSwitchTo(oldcxt); if (u_sess->attr.attr_common.plsql_show_all_error) { PopOverrideSearchPath(); ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), @@ -1651,71 +1719,43 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); int save_compile_status = u_sess->plsql_cxt.compile_status; List* temp_tableof_index = NULL; + bool save_is_package_instantiation = u_sess->plsql_cxt.is_package_instantiation; + bool needExecDoStmt = true; + if (enable_plpgsql_undefined()) { + needExecDoStmt = GetCurrCompilePgObjStatus(); + } + ResourceOwnerData* oldowner = NULL; + int64 stackId = 0; + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + oldowner = t_thrd.utils_cxt.CurrentResourceOwner; + SPI_savepoint_create("PackageInit"); + stackId = u_sess->plsql_cxt.nextStackEntryId; + } + bool save_isPerform = u_sess->parser_cxt.isPerform; PG_TRY(); { u_sess->plsql_cxt.is_package_instantiation = true; - foreach(cell, pkg->proc_list) { - if (IsA(lfirst(cell), DoStmt)) { - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); - DoStmt* doStmt = (DoStmt*)lfirst(cell); - if (!isCreate) { - if (!doStmt->isExecuted) { - (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); - temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; - u_sess->plsql_cxt.func_tableof_index = NULL; - if (u_sess->SPI_cxt._connected > -1 && - u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { - SPI_STACK_LOG("begin", NULL, NULL); - _SPI_begin_call(false); - ExecuteDoStmt(doStmt, true); - SPI_STACK_LOG("end", NULL, NULL); - _SPI_end_call(false); - } else { - ExecuteDoStmt(doStmt, true); - } - if (!doStmt->isSpec) { - pkg->isInit = true; - - } - free_func_tableof_index(); - u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; - (void)CompileStatusSwtichTo(oldCompileStatus); - doStmt->isExecuted = true; - } - } else { - if (doStmt->isSpec && !doStmt->isExecuted) { - (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); - temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; - u_sess->plsql_cxt.func_tableof_index = NULL; - if (u_sess->SPI_cxt._connected > -1 && - u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { - SPI_STACK_LOG("begin", NULL, NULL); - _SPI_begin_call(false); - ExecuteDoStmt(doStmt, true); - SPI_STACK_LOG("end", NULL, NULL); - _SPI_end_call(false); - } else if (!doStmt->isExecuted) { - ExecuteDoStmt(doStmt, true); - } - free_func_tableof_index(); - u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; - (void)CompileStatusSwtichTo(oldCompileStatus); - doStmt->isExecuted = true; - } - } - (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); - } + if (needExecDoStmt) { + init_do_stmt(pkg, isCreate, cell, oldCompileStatus, curr_compile, temp_tableof_index, oldcxt); + } + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + SPI_savepoint_release("PackageInit"); + stp_cleanup_subxact_resource(stackId); + MemoryContextSwitchTo(oldcxt); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; } stp_reset_xact_state_and_err_msg(oldStatus, needResetErrMsg); - u_sess->plsql_cxt.is_package_instantiation = false; + u_sess->plsql_cxt.is_package_instantiation = save_is_package_instantiation; ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), errmsg("%s finish compile, level: %d", __func__, list_length(u_sess->plsql_cxt.compile_context_list)))); u_sess->plsql_cxt.curr_compile_context = popCompileContext(); CompileStatusSwtichTo(oldCompileStatus); clearCompileContext(curr_compile); + PopOverrideSearchPath(); } PG_CATCH(); { + u_sess->parser_cxt.isPerform = save_isPerform; stp_reset_xact_state_and_err_msg(oldStatus, needResetErrMsg); u_sess->plsql_cxt.is_package_instantiation = false; free_temp_func_tableof_index(temp_tableof_index); @@ -1726,10 +1766,35 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) u_sess->plsql_cxt.curr_compile_context = save_compile_context; u_sess->plsql_cxt.compile_status = save_compile_status; clearCompileContextList(save_compile_list_length); - PG_RE_THROW(); + u_sess->plsql_cxt.curr_compile_context = popCompileContext(); + /*avoid memeory leak*/ + clearCompileContext(curr_compile); + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + SPI_savepoint_rollbackAndRelease("PackageInit", InvalidTransactionId); + stp_cleanup_subxact_resource(stackId); + if (likely(u_sess->SPI_cxt._curid >= 0)) { + if (likely(u_sess->SPI_cxt._current == &(u_sess->SPI_cxt._stack[u_sess->SPI_cxt._curid]))) { + _SPI_end_call(true); + } + } + SPI_finish(); + t_thrd.utils_cxt.CurrentResourceOwner = oldowner; + MemoryContextSwitchTo(oldcxt); + ErrorData* edata = &t_thrd.log_cxt.errordata[t_thrd.log_cxt.errordata_stack_depth]; + ereport(WARNING, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("%s", edata->message), + errdetail("N/A"), + errcause("compile package or procedure error."), + erraction("check package or procedure error and redefine"))); + FlushErrorState(); + } else { + PG_RE_THROW(); + } } PG_END_TRY(); - PopOverrideSearchPath(); + MemoryContextSwitchTo(oldcxt); restoreCallFromPkgOid(old_value); } #endif @@ -1849,3 +1914,62 @@ void DecreasePackageUseCount(PLpgSQL_function* func) } } +static void init_do_stmt(PLpgSQL_package *pkg, bool isCreate, ListCell *cell, int oldCompileStatus, + PLpgSQL_compile_context *curr_compile, List *temp_tableof_index, MemoryContext oldcxt) +{ + foreach(cell, pkg->proc_list) { + if (IsA(lfirst(cell), DoStmt)) { + curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + DoStmt* doStmt = (DoStmt*)lfirst(cell); + if (!isCreate) { + if (!doStmt->isExecuted) { + (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); + temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; + u_sess->plsql_cxt.func_tableof_index = NULL; + if (u_sess->SPI_cxt._connected > -1 && + u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { + SPI_STACK_LOG("begin", NULL, NULL); + _SPI_begin_call(false); + ExecuteDoStmt(doStmt, true); + SPI_STACK_LOG("end", NULL, NULL); + _SPI_end_call(false); + } else { + ExecuteDoStmt(doStmt, true); + } + if (!doStmt->isSpec) { + pkg->isInit = true; + + } + free_func_tableof_index(); + u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; + (void)CompileStatusSwtichTo(oldCompileStatus); + doStmt->isExecuted = true; + } + } else { + if (isCreate && enable_plpgsql_gsdependency_guc() && !IsInitdb) { + MemoryContextSwitchTo(oldcxt); + } + if (doStmt->isSpec && !doStmt->isExecuted) { + (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); + temp_tableof_index = u_sess->plsql_cxt.func_tableof_index; + u_sess->plsql_cxt.func_tableof_index = NULL; + if (u_sess->SPI_cxt._connected > -1 && + u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { + SPI_STACK_LOG("begin", NULL, NULL); + _SPI_begin_call(false); + ExecuteDoStmt(doStmt, true); + SPI_STACK_LOG("end", NULL, NULL); + _SPI_end_call(false); + } else if (!doStmt->isExecuted) { + ExecuteDoStmt(doStmt, true); + } + free_func_tableof_index(); + u_sess->plsql_cxt.func_tableof_index = temp_tableof_index; + (void)CompileStatusSwtichTo(oldCompileStatus); + doStmt->isExecuted = true; + } + } + (void*)MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); + } + } +} diff --git a/contrib/whale/plugin_utility.cpp b/contrib/whale/plugin_utility.cpp index d18b64987..206d4496e 100644 --- a/contrib/whale/plugin_utility.cpp +++ b/contrib/whale/plugin_utility.cpp @@ -470,6 +470,7 @@ static void check_xact_readonly(Node* parse_tree) case T_AlterDatabaseSetStmt: case T_AlterDomainStmt: case T_AlterFunctionStmt: + case T_CompileStmt: case T_AlterRoleSetStmt: case T_AlterObjectSchemaStmt: case T_AlterOwnerStmt: @@ -3382,13 +3383,21 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, #endif PG_TRY(); { + set_create_plsql_type_start(); + u_sess->plsql_cxt.isCreatePkg = true; CreatePackageCommand((CreatePackageStmt*)parse_tree, query_string); + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; } PG_CATCH(); { if (u_sess->plsql_cxt.debug_query_string) { pfree_ext(u_sess->plsql_cxt.debug_query_string); } + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; PG_RE_THROW(); } PG_END_TRY(); @@ -3402,13 +3411,21 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, #endif PG_TRY(); { + set_create_plsql_type_start(); + u_sess->plsql_cxt.isCreatePkg = true; CreatePackageBodyCommand((CreatePackageBodyStmt*)parse_tree, query_string); + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; } PG_CATCH(); { if (u_sess->plsql_cxt.debug_query_string) { pfree_ext(u_sess->plsql_cxt.debug_query_string); } + set_create_plsql_type_end(); + set_function_style_none(); + u_sess->plsql_cxt.isCreatePkg = false; PG_RE_THROW(); } PG_END_TRY(); @@ -5719,10 +5736,15 @@ ProcessUtilitySlow(Node *parse_tree, { PG_TRY(); { + set_create_plsql_type_start(); address = CreateFunction((CreateFunctionStmt*)parse_tree, query_string, InvalidOid); + set_create_plsql_type_end(); + set_function_style_none(); } PG_CATCH(); { + set_create_plsql_type_end(); + set_function_style_none(); #ifndef ENABLE_MULTIPLE_NODES CreateFunctionStmt* stmt = (CreateFunctionStmt*)parse_tree; char* schemaname = NULL; @@ -5827,6 +5849,29 @@ ProcessUtilitySlow(Node *parse_tree, #endif } break; + case T_CompileStmt: + { + if (u_sess->SPI_cxt._connected == -1) { + plpgsql_hashtable_clear_invalid_obj(true); + } + u_sess->plsql_cxt.during_compile = true; + u_sess->plsql_cxt.isCreateFunction = true; + if (!enable_plpgsql_gsdependency_guc()) { + u_sess->plsql_cxt.during_compile = false; + ereport(ERROR, (errmsg("This operation is not supported."))); + break; + } + u_sess->plsql_cxt.is_alter_compile_stmt = true; + CompileStmt* tmpStmt = (CompileStmt*)parse_tree; + if (tmpStmt->compileItem == COMPILE_FUNCTION || tmpStmt->compileItem == COMPILE_PROCEDURE) { + RecompileFunction(tmpStmt); + } else { + RecompilePackage(tmpStmt); + } + u_sess->plsql_cxt.during_compile = false; + u_sess->plsql_cxt.is_alter_compile_stmt = false; + } break; + case T_IndexStmt: /* CREATE INDEX */ { IndexStmt* stmt = (IndexStmt*)parse_tree; @@ -8452,6 +8497,29 @@ static const char* AlterObjectTypeCommandTag(ObjectType obj_type) return tag; } +static const char* CompileTag(CompileEntry compileItem) +{ + const char* tag = NULL; + switch (compileItem) { + case COMPILE_PROCEDURE: + tag = "ALTER PROCEDURE"; + break; + case COMPILE_FUNCTION: + tag = "ALTER FUNCTION"; + break; + case COMPILE_PACKAGE: + tag = "ALTER PACKAGE"; + break; + case COMPILE_PKG_SPECIFICATION: + tag = "ALTER PACKAGE SPECIFICATION"; + break; + case COMPILE_PKG_BODY: + tag = "ALTER PACKAGE BODY"; + break; + } + return tag; +} + /* * CreateCommandTag * utility to get a string representation of the command operation, @@ -8870,9 +8938,11 @@ const char* CreateCommandTag(Node* parse_tree) tag = "COPY"; break; - case T_RenameStmt: - tag = AlterObjectTypeCommandTag(((RenameStmt*)parse_tree)->renameType); + case T_RenameStmt: { + ObjectType RenameType = ((RenameStmt*)parse_tree)->renameType == OBJECT_COLUMN ? ((RenameStmt*)parse_tree)->relationType:((RenameStmt*)parse_tree)->renameType; + tag = AlterObjectTypeCommandTag(RenameType); break; + } case T_AlterObjectSchemaStmt: tag = AlterObjectTypeCommandTag(((AlterObjectSchemaStmt*)parse_tree)->objectType); @@ -8894,6 +8964,17 @@ const char* CreateCommandTag(Node* parse_tree) tag = "ALTER FUNCTION"; break; + case T_CompileStmt: { + u_sess->plsql_cxt.during_compile = true; + if (!enable_plpgsql_gsdependency_guc()) { + u_sess->plsql_cxt.during_compile = false; + ereport(ERROR, (errmsg("This operation is not supported."))); + break; + } + CompileStmt* stmt = (CompileStmt*)parse_tree; + tag = CompileTag(stmt->compileItem); + } break; + case T_GrantStmt: { GrantStmt* stmt = (GrantStmt*)parse_tree; @@ -9552,6 +9633,9 @@ const char* CreateCommandTag(Node* parse_tree) case T_GetDiagStmt: tag = "GET DIAGNOSTICS"; break; + case T_DolphinCallStmt: + tag = "CALL"; + break; default: elog(WARNING, "unrecognized node type: %d", (int)nodeTag(parse_tree)); tag = "?\?\?"; @@ -10048,6 +10132,7 @@ LogStmtLevel GetCommandLogLevel(Node* parse_tree) break; case T_AlterFunctionStmt: + case T_CompileStmt: case T_CreateEventStmt: case T_AlterEventStmt: case T_DropEventStmt: diff --git a/contrib/whale/plugin_utils/adt/ruleutils.cpp b/contrib/whale/plugin_utils/adt/ruleutils.cpp index 13536231b..87113f898 100644 --- a/contrib/whale/plugin_utils/adt/ruleutils.cpp +++ b/contrib/whale/plugin_utils/adt/ruleutils.cpp @@ -4564,7 +4564,7 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) int oldlen; char* p = NULL; bool isOraFunc = false; - NameData* pkgname = NULL; + char* pkgname = NULL; initStringInfo(&buf); /* Look up the function */ @@ -4610,7 +4610,7 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) if (proIsProcedure) { if (pkgname != NULL) { appendStringInfo(&buf, "CREATE OR REPLACE PROCEDURE %s(", - quote_qualified_identifier(nsp, pkgname->data, name)); + quote_qualified_identifier(nsp, pkgname, name)); } else if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { appendStringInfo(&buf, "CREATE DEFINER = %s PROCEDURE %s(", GetUserNameFromId(proc->proowner), quote_qualified_identifier(nsp, name)); @@ -4622,7 +4622,7 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) } else { if (pkgname != NULL) { appendStringInfo(&buf, "CREATE OR REPLACE FUNCTION %s(", - quote_qualified_identifier(nsp, pkgname->data, name)); + quote_qualified_identifier(nsp, pkgname, name)); } else if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { appendStringInfo(&buf, "CREATE DEFINER = %s FUNCTION %s(", GetUserNameFromId(proc->proowner), quote_qualified_identifier(nsp, name)); @@ -5204,6 +5204,10 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) */ if (IsA(ps, AppendState)) dpns->outer_planstate = ((AppendState*)ps)->appendplans[0]; +#ifdef USE_SPQ + else if (IsA(ps, SequenceState)) + dpns->outer_planstate = ((SequenceState *) ps)->subplans[1]; +#endif else if (IsA(ps, VecAppendState)) dpns->outer_planstate = ((VecAppendState*)ps)->appendplans[0]; else if (IsA(ps, MergeAppendState)) @@ -5231,6 +5235,10 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) */ if (IsA(ps, SubqueryScanState)) dpns->inner_planstate = ((SubqueryScanState*)ps)->subplan; +#ifdef USE_SPQ + else if (IsA(ps, SequenceState)) + dpns->inner_planstate = ((SequenceState *) ps)->subplans[0]; +#endif else if (IsA(ps, VecSubqueryScanState)) dpns->inner_planstate = ((VecSubqueryScanState*)ps)->subplan; else if (IsA(ps, CteScanState)) @@ -10591,7 +10599,7 @@ static void get_agg_expr(Aggref* aggref, deparse_context* context) aggform = (Form_pg_aggregate)GETSTRUCT(aggTuple); if (OidIsValid(aggform->aggfinalfn)) { - appendStringInfo(buf, "%s(", generate_function_name(aggform->aggfinalfn, 0, NULL, NULL, NULL, NULL)); + appendStringInfo(buf, "%s(", generate_function_name(aggform->aggfinalfn, 0, NULL, NULL, false, NULL)); added_finalfn = true; } ReleaseSysCache(aggTuple); @@ -12222,7 +12230,7 @@ static char* generate_function_name( int p_nvargs; Oid* p_true_typeids = NULL; Oid p_vatype; - NameData* pkgname = NULL; + char* pkgname = NULL; Datum pkgOiddatum; Oid pkgOid = InvalidOid; bool isnull = true; @@ -12278,7 +12286,7 @@ static char* generate_function_name( else nspname = get_namespace_name(procform->pronamespace); if (OidIsValid(pkgOid)) { - result = quote_qualified_identifier(nspname, pkgname->data, proname); + result = quote_qualified_identifier(nspname, pkgname, proname); } else { result = quote_qualified_identifier(nspname, proname); } diff --git a/contrib/whale/plugin_utils/adt/varlena.cpp b/contrib/whale/plugin_utils/adt/varlena.cpp index 7dc806b62..2b3d50522 100644 --- a/contrib/whale/plugin_utils/adt/varlena.cpp +++ b/contrib/whale/plugin_utils/adt/varlena.cpp @@ -141,6 +141,8 @@ static void text_format_append_string(StringInfo buf, const char* str, int flags // adapt A db's substrb static text* get_substring_really(Datum str, int32 start, int32 length, bool length_not_specified); +static text* get_result_of_concat(text* result, FunctionCallInfo fcinfo); + /***************************************************************************** * CONVERSION ROUTINES EXPORTED FOR USE BY C CODE * *****************************************************************************/ @@ -804,6 +806,26 @@ Datum unknownsend(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } +Datum undefinedin(PG_FUNCTION_ARGS) +{ + return unknownin(fcinfo); +} + +Datum undefinedout(PG_FUNCTION_ARGS) +{ + return unknownout(fcinfo); +} + +Datum undefinedrecv(PG_FUNCTION_ARGS) +{ + return unknownrecv(fcinfo); +} + +Datum undefinedsend(PG_FUNCTION_ARGS) +{ + return unknownsend(fcinfo); +} + static Datum text_length_huge(Datum str) { if (pg_database_encoding_max_length() == 1) { @@ -3315,7 +3337,7 @@ Datum bytea_substr_orclcompat(PG_FUNCTION_ARGS) total = toast_raw_datum_size(str) - VARHDRSZ; if ((length < 0) || (start > total) || (start + total < 0)) { - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT || + if ((u_sess->attr.attr_sql.sql_compatibility == A_FORMAT&& !ACCEPT_EMPTY_STR) || u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) PG_RETURN_NULL(); else { @@ -3329,10 +3351,12 @@ Datum bytea_substr_orclcompat(PG_FUNCTION_ARGS) */ result = bytea_substring_orclcompat(str, start, length, false); - if ((NULL == result || 0 == VARSIZE_ANY_EXHDR(result)) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (NULL == result || (0 == VARSIZE_ANY_EXHDR(result) && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR)) { PG_RETURN_NULL(); - else - PG_RETURN_BYTEA_P(result); + } + + PG_RETURN_BYTEA_P(result); } // adapt A db's substr(bytea x,integer y) @@ -3347,7 +3371,7 @@ Datum bytea_substr_no_len_orclcompat(PG_FUNCTION_ARGS) total = toast_raw_datum_size(str) - VARHDRSZ; if ((start > total) || (start + total < 0)) { - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) PG_RETURN_NULL(); else { result = PG_STR_GET_BYTEA(""); @@ -3360,10 +3384,12 @@ Datum bytea_substr_no_len_orclcompat(PG_FUNCTION_ARGS) */ result = bytea_substring_orclcompat(str, start, -1, true); - if ((NULL == result || 0 == VARSIZE_ANY_EXHDR(result)) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (NULL == result || (0 == VARSIZE_ANY_EXHDR(result) && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR)) { PG_RETURN_NULL(); - else - PG_RETURN_BYTEA_P(result); + } + + PG_RETURN_BYTEA_P(result); } // Does the real work for bytea_substr_orclcompat() and bytea_substr_no_len_orclcompat(). @@ -4321,14 +4347,20 @@ Datum replace_text_with_two_args(PG_FUNCTION_ARGS) { if (PG_ARGISNULL(0)) PG_RETURN_NULL(); - if (PG_ARGISNULL(1)) PG_RETURN_TEXT_P(PG_GETARG_TEXT_PP(0)); - - return DirectFunctionCall3(replace_text, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1), - CStringGetTextDatum("\0")); + FunctionCallInfoData locfcinfo; + Datum result; + InitFunctionCallInfoData(locfcinfo, NULL, 3, InvalidOid, NULL, NULL); + locfcinfo.arg[0] = PG_GETARG_DATUM(0); + locfcinfo.arg[1] = PG_GETARG_DATUM(1); + locfcinfo.arg[2] = CStringGetTextDatum("\0"); + locfcinfo.argnull[0] = false; + locfcinfo.argnull[1] = false; + locfcinfo.argnull[2] = false; + result = (*replace_text)(&locfcinfo); + fcinfo->isnull = locfcinfo.isnull; + return result; } /* @@ -4428,10 +4460,12 @@ Datum replace_text(PG_FUNCTION_ARGS) ret_text = cstring_to_text_with_len(str.data, str.len); pfree_ext(str.data); - if (VARHDRSZ == VARSIZE(ret_text) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (VARHDRSZ == VARSIZE(ret_text) && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); - else - PG_RETURN_TEXT_P(ret_text); + } + + PG_RETURN_TEXT_P(ret_text); } /* @@ -4719,7 +4753,7 @@ Datum split_text(PG_FUNCTION_ARGS) if (inputstring_len < 1) { text_position_cleanup(&state); - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !RETURN_NS) { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR && !RETURN_NS) { PG_RETURN_NULL(); } @@ -4734,7 +4768,7 @@ Datum split_text(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(inputstring); } - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !RETURN_NS) { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR && !RETURN_NS) { PG_RETURN_NULL(); } @@ -4753,7 +4787,7 @@ Datum split_text(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(inputstring); } - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); } @@ -4780,7 +4814,7 @@ Datum split_text(PG_FUNCTION_ARGS) result_text = text_substring(PointerGetDatum(inputstring), start_posn, end_posn - start_posn, false); } - if (TEXTISORANULL(result_text) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + if (TEXTISORANULL(result_text) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); } @@ -4972,7 +5006,7 @@ Datum array_to_text(PG_FUNCTION_ARGS) result = array_to_text_internal(fcinfo, v, fldsep, NULL); /* To A db, empty string need return NULL.*/ - if (0 == VARSIZE_ANY_EXHDR(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + if (0 == VARSIZE_ANY_EXHDR(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); } else { PG_RETURN_TEXT_P(result); @@ -5009,7 +5043,7 @@ Datum array_to_text_null(PG_FUNCTION_ARGS) result = array_to_text_internal(fcinfo, v, fldsep, null_string); /* To A db, empty string need return NULL.*/ - if (0 == VARSIZE_ANY_EXHDR(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + if (0 == VARSIZE_ANY_EXHDR(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); } else { PG_RETURN_TEXT_P(result); @@ -6339,11 +6373,25 @@ static text* concat_internal(const char* sepstr, int seplen, int argidx, Functio result = cstring_to_text_with_len(str.data, str.len); pfree_ext(str.data); - if ((result == NULL || (0 == VARSIZE_ANY_EXHDR(result) && !DB_IS_CMPT(B_FORMAT | PG_FORMAT))) && - (CONCAT_VARIADIC || DB_IS_CMPT(A_FORMAT))) + return get_result_of_concat(result, fcinfo); +} + +static text* get_result_of_concat(text* result, FunctionCallInfo fcinfo) +{ + if (result == NULL) { PG_RETURN_NULL(); - else + } + + if (VARSIZE_ANY_EXHDR(result) > 0 || + DB_IS_CMPT(B_FORMAT | PG_FORMAT) || + (DB_IS_CMPT(A_FORMAT) && ACCEPT_EMPTY_STR)) { return result; + } + + if (DB_IS_CMPT(A_FORMAT) || CONCAT_VARIADIC) { + PG_RETURN_NULL(); + } + return result; } /* @@ -6462,7 +6510,7 @@ Datum text_left(PG_FUNCTION_ARGS) } rlen = pg_mbcharcliplen(p, len, part_off); - if (0 == rlen && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + if (0 == rlen && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); } @@ -6499,7 +6547,7 @@ Datum text_right(PG_FUNCTION_ARGS) } } off = pg_mbcharcliplen(p, len, part_off); - if (0 == (len - off) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + if (0 == (len - off) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); } @@ -6798,7 +6846,8 @@ Datum text_format(PG_FUNCTION_ARGS) result = cstring_to_text_with_len(str.data, str.len); pfree_ext(str.data); - if ((result == NULL || VARSIZE_ANY_EXHDR(result) == 0) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (result == NULL || + (VARSIZE_ANY_EXHDR(result) == 0 && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR)) PG_RETURN_NULL(); else PG_RETURN_TEXT_P(result); @@ -7164,7 +7213,7 @@ Datum substrb_with_lenth(PG_FUNCTION_ARGS) int32 total = 0; total = toast_raw_datum_size(str) - VARHDRSZ; if ((length < 0) || (total == 0) || (start > total) || (start + total < 0)) { - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) PG_RETURN_NULL(); else { result = cstring_to_text(""); @@ -7173,10 +7222,10 @@ Datum substrb_with_lenth(PG_FUNCTION_ARGS) } result = get_substring_really(str, start, length, false); -#ifndef WHALE - if ((NULL == result || 0 == VARSIZE_ANY_EXHDR(result)) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if ((NULL == result || 0 == VARSIZE_ANY_EXHDR(result)) && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); -#endif + } PG_RETURN_TEXT_P(result); } @@ -7191,7 +7240,7 @@ Datum substrb_without_lenth(PG_FUNCTION_ARGS) int32 total = 0; total = toast_raw_datum_size(str) - VARHDRSZ; if ((total == 0) || (start > total) || (start + total < 0)) { - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) PG_RETURN_NULL(); else { result = cstring_to_text(""); @@ -7200,10 +7249,10 @@ Datum substrb_without_lenth(PG_FUNCTION_ARGS) } result = get_substring_really(str, start, -1, true); -#ifndef WHALE - if ((NULL == result || 0 == VARSIZE_ANY_EXHDR(result)) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + if ((NULL == result || 0 == VARSIZE_ANY_EXHDR(result)) && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !ACCEPT_EMPTY_STR) { PG_RETURN_NULL(); -#endif + } PG_RETURN_TEXT_P(result); } @@ -7338,3 +7387,11 @@ Datum btvarstrequalimage(PG_FUNCTION_ARGS) else PG_RETURN_BOOL(false); } + +Datum text_interval(PG_FUNCTION_ARGS) +{ + char* input = TextDatumGetCString(PG_GETARG_TEXT_P(0)); + int32 typmod = PG_GETARG_INT32(1); + return DirectFunctionCall3(interval_in, CStringGetDatum(input), ObjectIdGetDatum(InvalidOid), + Int32GetDatum(typmod)); +} diff --git a/contrib/whale/tablecmds.cpp b/contrib/whale/tablecmds.cpp index 736434ff2..091582f32 100644 --- a/contrib/whale/tablecmds.cpp +++ b/contrib/whale/tablecmds.cpp @@ -176,6 +176,7 @@ #include "pgstat.h" #include "postmaster/rbcleaner.h" #include "catalog/gs_collation.h" +#include "catalog/gs_dependencies_fn.h" #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/utils/ts_relcache.h" #include "tsdb/common/ts_tablecmds.h" @@ -2067,6 +2068,7 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object bool relhasuids = false; Oid nspdefcoll = InvalidOid; Oid rel_coll_oid = InvalidOid; + List* depend_extend = NIL; /* * isalter is true, change the owner of the objects as the owner of the @@ -2603,6 +2605,14 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object } else ofTypeId = InvalidOid; + if (enable_plpgsql_gsdependency()) { + ListCell* cell = NULL; + foreach(cell, schema) { + ColumnDef* col_def = (ColumnDef*)lfirst(cell); + depend_extend = lappend(depend_extend, col_def->typname->dependExtend); + } + } + /* * Look up inheritance ancestors and generate relation schema, including * inherited attributes. @@ -2922,7 +2932,8 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object ceLst, storage_type, AccessShareLock, - typaddress); + typaddress, + depend_extend); if (bucketinfo != NULL) { pfree_ext(bucketinfo->bucketcol); pfree_ext(bucketinfo->bucketlist); @@ -3066,7 +3077,11 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object relation_close(rel, NoLock); list_free_ext(rawDefaults); list_free_ext(ceLst); - + if (enable_plpgsql_gsdependency_guc() && relkind != RELKIND_TOASTVALUE) { + if (CompileWhich() == PLPGSQL_COMPILE_NULL) { + (void)gsplsql_build_ref_type_dependency(get_rel_type_id(relationId)); + } + } return address; } @@ -5981,7 +5996,33 @@ ObjectAddress renameatt(RenameStmt* stmt) } TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); - + if (enable_plpgsql_gsdependency_guc()) { + Oid type_oid = get_rel_type_id(relid); + if (OidIsValid(type_oid)) { + GsDependObjDesc obj; + gsplsql_get_depend_obj_by_typ_id(&obj, type_oid, InvalidOid); + HeapTuple obj_tup = gsplsql_search_object(&obj, false); + if (HeapTupleIsValid(obj_tup)) { + heap_freetuple(obj_tup); + pfree_ext(obj.schemaName); + pfree_ext(obj.packageName); + pfree_ext(obj.name); + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("cannot rename attribute of the type because it is dependent on another object."))); + } + obj.refPosType = GSDEPEND_REFOBJ_POS_IN_TYPE; + bool exist_dep = gsplsql_exist_dependency(&obj); + pfree_ext(obj.schemaName); + pfree_ext(obj.packageName); + pfree_ext(obj.name); + if (exist_dep) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("cannot rename attribute of the type because it is dependent on another object."))); + } + } + } // Check relations's internal mask Relation rel = relation_open(relid, AccessShareLock); if ((((uint32)RelationGetInternalMask(rel)) & INTERNAL_MASK_DALTER)) @@ -6562,7 +6603,19 @@ ObjectAddress RenameRelation(RenameStmt* stmt) errdetail("%s table doesn't support this ALTER yet.", ISMLOG(relname) ? "mlog" : "matviewmap")))); } ReleaseSysCache(tuple); - + if (enable_plpgsql_gsdependency_guc()) { + bool exist_dep = false; + char rel_kind = get_rel_relkind(relid); + if (RELKIND_RELATION == rel_kind) { + exist_dep = gsplsql_is_object_depend(get_rel_type_id(relid), GSDEPEND_OBJECT_TYPE_TYPE); + } + if (exist_dep) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The rename operator on %s is not allowed, " + "because it is dependent on another object.", stmt->relation->relname))); + } + } TrForbidAccessRbObject(RelationRelationId, relid, stmt->relation->relname); /* If table has history table, we need rename corresponding history table */ if (is_ledger_usertable(relid)) { @@ -7811,6 +7864,9 @@ void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt* stmt) if (stmt->cmds != NIL) { /* process 'ALTER TABLE' cmd */ ATController(stmt, rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt), lockmode); + if (enable_plpgsql_gsdependency_guc()) { + (void)gsplsql_build_ref_type_dependency(get_rel_type_id(relid)); + } } else { /* if do not call ATController, close the relation in here, but keep lock until commit */ relation_close(rel, NoLock); -- Gitee From 6450f73cdbf8d64ca68febb7cc57e00ecc3f195c Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Tue, 10 Oct 2023 09:23:58 +0800 Subject: [PATCH 031/434] =?UTF-8?q?=E4=B8=80=E9=98=B6=E6=AE=B5=E4=BA=A4?= =?UTF-8?q?=E4=BB=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/src/CMakeLists.txt | 2 - .../src/bgw/launcher_interface.cpp | 10 +- contrib/timescaledb/src/cache.cpp | 17 +- contrib/timescaledb/src/cache.h | 36 + contrib/timescaledb/src/cache_invalidate.cpp | 2 + contrib/timescaledb/src/catalog.cpp | 12 +- .../timescaledb/src/chunk_append/planner.cpp | 5 +- contrib/timescaledb/src/compat.h | 34 +- .../src/constraint_aware_append.cpp | 36 +- contrib/timescaledb/src/func_cache.cpp | 2 +- contrib/timescaledb/src/guc.cpp | 12 +- contrib/timescaledb/src/hypertable_cache.cpp | 10 +- contrib/timescaledb/src/hypertable_cache.h | 6 + contrib/timescaledb/src/indexing.cpp | 2 +- contrib/timescaledb/src/init.cpp | 98 +- contrib/timescaledb/src/license_guc.cpp | 5 +- contrib/timescaledb/src/loader/CMakeLists.txt | 2 - .../timescaledb/src/loader/bgw_launcher.cpp | 5 +- contrib/timescaledb/src/loader/loader.cpp | 4 +- contrib/timescaledb/src/net/conn_plain.cpp | 2 +- contrib/timescaledb/src/net/conn_ssl.cpp | 2 +- contrib/timescaledb/src/planner.cpp | 3 +- contrib/timescaledb/src/process_utility.cpp | 8 +- contrib/timescaledb/src/tsdb.cpp | 38 +- contrib/timescaledb/src/tsdb.h | 21 +- .../timescaledb/src/tsdb_event_trigger.cpp | 2217 -------------- contrib/timescaledb/src/tsdb_event_trigger.h | 192 -- contrib/timescaledb/src/tsdb_extension.cpp | 10 +- contrib/timescaledb/src/tsdb_get_obj.cpp | 2553 ----------------- contrib/timescaledb/src/tsdb_get_obj.h | 66 - contrib/timescaledb/src/tsdb_head.h | 29 +- contrib/timescaledb/src/tsdb_static.cpp | 178 +- contrib/timescaledb/src/utils.cpp | 12 +- contrib/timescaledb/test/src/CMakeLists.txt | 7 + .../timescaledb/test/src/bgw/CMakeLists.txt | 8 +- contrib/timescaledb/test/src/bgw/log.cpp | 2 +- contrib/timescaledb/test/src/bgw/params.cpp | 11 +- .../test/src/bgw/test_job_refresh.cpp | 3 +- .../test/src/loader/CMakeLists.txt | 6 + contrib/timescaledb/test/src/loader/init.cpp | 8 +- .../timescaledb/test/src/net/CMakeLists.txt | 6 + .../test/src/telemetry/CMakeLists.txt | 6 + contrib/timescaledb/tsl/CMakeLists.txt | 6 + contrib/timescaledb/tsl/src/CMakeLists.txt | 2 - .../tsl/src/compression/create.cpp | 2 +- .../tsl/src/continuous_aggs/create.cpp | 2 +- contrib/timescaledb/tsl/src/tsdb_tsl.cpp | 4 +- .../tsl/test/isolation/CMakeLists.txt | 6 + .../timescaledb/tsl/test/src/CMakeLists.txt | 6 + .../tsl/test/src/test_auto_policy.cpp | 2 +- .../tsl/test/src/test_chunk_stats.cpp | 2 +- .../tsl/test/src/test_compression.cpp | 20 +- .../tsl/test/src/test_ddl_hook.cpp | 4 +- 53 files changed, 384 insertions(+), 5360 deletions(-) delete mode 100644 contrib/timescaledb/src/tsdb_event_trigger.cpp delete mode 100644 contrib/timescaledb/src/tsdb_event_trigger.h delete mode 100644 contrib/timescaledb/src/tsdb_get_obj.cpp delete mode 100644 contrib/timescaledb/src/tsdb_get_obj.h diff --git a/contrib/timescaledb/src/CMakeLists.txt b/contrib/timescaledb/src/CMakeLists.txt index bf9a1db28..04dd8a914 100644 --- a/contrib/timescaledb/src/CMakeLists.txt +++ b/contrib/timescaledb/src/CMakeLists.txt @@ -3,8 +3,6 @@ agg_bookend.cpp base64_compat.cpp tsdb_dsm.cpp tsdb_shm.cpp -tsdb_event_trigger.cpp -tsdb_get_obj.cpp tsdb_extension.cpp tsdb_head.cpp tsdb_static.cpp diff --git a/contrib/timescaledb/src/bgw/launcher_interface.cpp b/contrib/timescaledb/src/bgw/launcher_interface.cpp index a0bd9f800..f5e3d400c 100644 --- a/contrib/timescaledb/src/bgw/launcher_interface.cpp +++ b/contrib/timescaledb/src/bgw/launcher_interface.cpp @@ -16,7 +16,8 @@ extern bool ts_bgw_worker_reserve(void) { - PGFunction reserve = load_external_function(EXTENSION_SO, "ts_bgw_worker_reserve", true, NULL,0); + CFunInfo temp_for_tsdb = load_external_function(EXTENSION_SO, "ts_bgw_worker_reserve", true, NULL); + PGFunction reserve = temp_for_tsdb.user_fn; return DatumGetBool( DirectFunctionCall1(reserve, BoolGetDatum(false))); /* no function call zero */ @@ -25,7 +26,8 @@ ts_bgw_worker_reserve(void) extern void ts_bgw_worker_release(void) { - PGFunction release = load_external_function(EXTENSION_SO, "ts_bgw_worker_release", true, NULL,0); + CFunInfo temp_for_tsdb = load_external_function(EXTENSION_SO, "ts_bgw_worker_reserve", true, NULL); + PGFunction release = temp_for_tsdb.user_fn; DirectFunctionCall1(release, BoolGetDatum(false)); /* no function call zero */ } @@ -33,8 +35,8 @@ ts_bgw_worker_release(void) extern int ts_bgw_num_unreserved(void) { - PGFunction unreserved = - load_external_function(EXTENSION_SO, "ts_bgw_num_unreserved", true, NULL,0); + CFunInfo temp_for_tsdb = load_external_function(EXTENSION_SO, "ts_bgw_num_unreserved", true, NULL); + PGFunction unreserved =temp_for_tsdb.user_fn; return DatumGetInt32( DirectFunctionCall1(unreserved, BoolGetDatum(false))); /* no function call zero */ diff --git a/contrib/timescaledb/src/cache.cpp b/contrib/timescaledb/src/cache.cpp index 1eeff86e5..eefae56da 100644 --- a/contrib/timescaledb/src/cache.cpp +++ b/contrib/timescaledb/src/cache.cpp @@ -6,13 +6,16 @@ #include #include + #include "cache.h" #include "compat.h" + + + /* List of pinned caches. A cache occurs once in this list for every pin * taken */ -static List *pinned_caches = NIL; -static MemoryContext pinned_caches_mctx = NULL; + typedef struct CachePin @@ -28,7 +31,7 @@ cache_reset_pinned_caches(void) MemoryContextDelete(pinned_caches_mctx); pinned_caches_mctx = - AllocSetContextCreate(u_sess->cache_mem_cxt, "Cache pins", ALLOCSET_DEFAULT_SIZES); + AllocSetContextCreate(TopMemoryContext, "Cache pins", ALLOCSET_DEFAULT_SIZES); pinned_caches = NIL; } @@ -91,12 +94,15 @@ ts_cache_invalidate(Cache *cache) extern Cache * ts_cache_pin(Cache *cache) { + + MemoryContext old = MemoryContextSwitchTo(pinned_caches_mctx); CachePin *cp = (CachePin*)palloc(sizeof(CachePin)); cp->cache = cache; cp->subtxnid = GetCurrentSubTransactionId(); pinned_caches = lappend(pinned_caches, cp); + MemoryContextSwitchTo(old); cache->refcount++; return cache; @@ -343,6 +349,9 @@ cache_subxact_abort(SubXactEvent event, SubTransactionId subtxn_id, SubTransacti } } + + + void _cache_init(void) { @@ -360,3 +369,5 @@ _cache_fini(void) UnregisterXactCallback(cache_xact_end, NULL); UnregisterSubXactCallback(cache_subxact_abort, NULL); } + + diff --git a/contrib/timescaledb/src/cache.h b/contrib/timescaledb/src/cache.h index 08c1223ed..05d9afa7a 100644 --- a/contrib/timescaledb/src/cache.h +++ b/contrib/timescaledb/src/cache.h @@ -10,7 +10,16 @@ #include #include +#include "tsdb_head.h" #include "export.h" +#include "commands/extension.h" + + +typedef enum TelemetryLevel +{ + TELEMETRY_OFF, + TELEMETRY_BASIC, +} TelemetryLevel; typedef enum CacheQueryFlags { @@ -56,6 +65,26 @@ typedef struct Cache * VACUUM */ } Cache; +typedef struct tsdb_session_context { + List *tsdb_pinned_caches; + MemoryContext tsdb_pinned_caches_mctx; + Cache *tsdb_hypertable_cache_current; + List *tsdb_planner_hcaches; + const char *tsdb_TS_CTE_EXPAND; + ExtensiblePathMethods tsdb_constraint_aware_append_path_methods; + ExtensiblePlanMethods tsdb_constraint_aware_append_plan_methods; + ExtensibleExecMethods tsdb_constraint_aware_append_state_methods; + ExtensiblePlanMethods tsdb_chunk_append_plan_methods; + bool tsdb_expect_chunk_modification; + + + struct config_enum_entry tsdb_telemetry_level_options[3]; + TelemetryLevel tsdb_on_level; + bool tsdb_first_start; +} tsdb_session_context; + + + extern void ts_cache_init(Cache *cache); extern void ts_cache_invalidate(Cache *cache); extern void *ts_cache_fetch(Cache *cache, CacheQuery *query); @@ -66,7 +95,14 @@ extern MemoryContext ts_cache_memory_ctx(Cache *cache); extern Cache *ts_cache_pin(Cache *cache); extern TSDLLEXPORT int ts_cache_release(Cache *cache); + + extern void _cache_init(void); extern void _cache_fini(void); +extern "C" void set_extension_index(uint32 index); +extern "C" void init_session_vars(void); + +extern tsdb_session_context* get_session_context(bool is_from_PG_init=false); + #endif /* TIMESCALEDB_CACHE_H */ diff --git a/contrib/timescaledb/src/cache_invalidate.cpp b/contrib/timescaledb/src/cache_invalidate.cpp index 4a6627cbe..9b9966dda 100644 --- a/contrib/timescaledb/src/cache_invalidate.cpp +++ b/contrib/timescaledb/src/cache_invalidate.cpp @@ -139,6 +139,7 @@ _cache_invalidate_init(void) RegisterXactCallback(cache_invalidate_xact_end, NULL); RegisterSubXactCallback(cache_invalidate_subxact_end, NULL); CacheRegisterThreadRelcacheCallback(cache_invalidate_callback, PointerGetDatum(NULL)); + tsdb_first_start = true; } void @@ -146,5 +147,6 @@ _cache_invalidate_fini(void) { UnregisterXactCallback(cache_invalidate_xact_end, NULL); UnregisterSubXactCallback(cache_invalidate_subxact_end, NULL); + tsdb_first_start = false; /* No way to unregister relcache callback */ } diff --git a/contrib/timescaledb/src/catalog.cpp b/contrib/timescaledb/src/catalog.cpp index a3e1d69c2..b495a2a1f 100644 --- a/contrib/timescaledb/src/catalog.cpp +++ b/contrib/timescaledb/src/catalog.cpp @@ -20,6 +20,7 @@ #include "compat.h" #include "catalog.h" #include "extension.h" +#include "utils/numeric.h" #if !PG96 #include @@ -550,14 +551,19 @@ TSDLLEXPORT int64 ts_catalog_table_next_seq_id(Catalog *catalog, CatalogTable table) { Oid relid = catalog->tables[table].serial_relid; - + NumericVar x; + int64 val; if (!OidIsValid(relid)) elog(ERROR, "no serial ID column for table \"%s.%s\"", catalog_table_names[table].schema_name, catalog_table_name(table)); - - return DatumGetInt64(DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(relid))); + Datum temp = (DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(relid))); + Numeric num = (Numeric)temp; + + init_var_from_num(num, &x); + numericvar_to_int64(&x,&val); + return val; } Oid diff --git a/contrib/timescaledb/src/chunk_append/planner.cpp b/contrib/timescaledb/src/chunk_append/planner.cpp index c09fdedde..b574055ef 100644 --- a/contrib/timescaledb/src/chunk_append/planner.cpp +++ b/contrib/timescaledb/src/chunk_append/planner.cpp @@ -38,10 +38,7 @@ static Sort *make_sort(Plan *lefttree, int numCols, AttrNumber *sortColIdx, Oid static Plan *adjust_childscan(PlannerInfo *root, Plan *plan, Path *path, List *pathkeys, List *tlist, AttrNumber *sortColIdx); -static ExtensiblePlanMethods chunk_append_plan_methods = { - .ExtensibleName = "ChunkAppend", - .CreateExtensiblePlanState = ts_chunk_append_state_create, -}; + void _chunk_append_init(void) diff --git a/contrib/timescaledb/src/compat.h b/contrib/timescaledb/src/compat.h index 75662b87b..5f2274d41 100644 --- a/contrib/timescaledb/src/compat.h +++ b/contrib/timescaledb/src/compat.h @@ -7,6 +7,7 @@ #ifndef TIMESCALEDB_COMPAT_H #define TIMESCALEDB_COMPAT_H #include "c.h" +#include "postgres.h" #include #include #include @@ -52,6 +53,19 @@ #error "Unsupported PostgreSQL version" #endif +#define pinned_caches (get_session_context()->tsdb_pinned_caches) +#define pinned_caches_mctx (get_session_context()->tsdb_pinned_caches_mctx) +#define hypertable_cache_current (get_session_context()->tsdb_hypertable_cache_current) +#define planner_hcaches (get_session_context()->tsdb_planner_hcaches) +#define TS_CTE_EXPAND (get_session_context()->tsdb_TS_CTE_EXPAND) +#define constraint_aware_append_path_methods (get_session_context()->tsdb_constraint_aware_append_path_methods) +#define constraint_aware_append_plan_methods (get_session_context()->tsdb_constraint_aware_append_plan_methods) +#define constraint_aware_append_state_methods (get_session_context()->tsdb_constraint_aware_append_state_methods) +#define chunk_append_plan_methods (get_session_context()->tsdb_chunk_append_plan_methods) +#define expect_chunk_modification (get_session_context()->tsdb_expect_chunk_modification) +#define on_level (get_session_context()->tsdb_on_level) +#define telemetry_level_options (get_session_context()->tsdb_telemetry_level_options) +#define tsdb_first_start (get_session_context(true)->tsdb_first_start) /* * The following are compatibility functions for different versions of * PostgreSQL. Each compatibility function (or group) has its own logic for @@ -190,8 +204,24 @@ * partitioned tables, InvalidOid otherwise. * The PG96 interface is used for compatibility. */ +#ifdef OG30 +#define DefineIndexCompat(relationId, \ + stmt, \ + indexRelationId, \ + is_alter_table, \ + check_rights, \ + skip_build, \ + quiet) \ + DefineIndex(relationId, \ + stmt, \ + indexRelationId, \ + is_alter_table, \ + check_rights, \ + skip_build, \ + quiet) +#else #if PG96 -#define DefineIndexCompat DefineIndex_tsdb +#define DefineIndexCompat DefineIndex #elif PG10 #define DefineIndexCompat(relationId, \ stmt, \ @@ -227,7 +257,7 @@ skip_build, \ quiet) #endif - +#endif #if PG96 #define DefineRelationCompat(stmt, relkind, ownerid, typaddress, queryString) \ DefineRelation(stmt, relkind, ownerid, typaddress,0) diff --git a/contrib/timescaledb/src/constraint_aware_append.cpp b/contrib/timescaledb/src/constraint_aware_append.cpp index efecf5b01..a64dfb071 100644 --- a/contrib/timescaledb/src/constraint_aware_append.cpp +++ b/contrib/timescaledb/src/constraint_aware_append.cpp @@ -420,14 +420,7 @@ ca_append_explain(ExtensiblePlanState *node, List *ancestors, ExplainState *es) es); } -static ExtensibleExecMethods constraint_aware_append_state_methods = { - .ExtensibleName = "", - .BeginExtensiblePlan = ca_append_begin, - .ExecExtensiblePlan = ca_append_exec, - .EndExtensiblePlan = ca_append_end, - .ReScanExtensiblePlan = ca_append_rescan, - .ExplainExtensiblePlan = ca_append_explain, -}; + static Node * constraint_aware_append_state_create(ExtensiblePlan *cscan) @@ -443,10 +436,6 @@ constraint_aware_append_state_create(ExtensiblePlan *cscan) return (Node *) state; } -static ExtensiblePlanMethods constraint_aware_append_plan_methods = { - .ExtensibleName = "ConstraintAwareAppend", - .CreateExtensiblePlanState = constraint_aware_append_state_create, -}; static Plan * constraint_aware_append_plan_create(PlannerInfo *root, RelOptInfo *rel, ExtensiblePath *path, @@ -560,10 +549,7 @@ constraint_aware_append_plan_create(PlannerInfo *root, RelOptInfo *rel, Extensib return &cscan->scan.plan; } -static ExtensiblePathMethods constraint_aware_append_path_methods = { - .ExtensibleName = "ConstraintAwareAppend", - .PlanExtensiblePath = constraint_aware_append_plan_create, -}; + Path * ts_constraint_aware_append_path_create(PlannerInfo *root, Hypertable *ht, Path *subpath) @@ -654,4 +640,22 @@ void _constraint_aware_append_init(void) { RegisterCustomScanMethods(&constraint_aware_append_plan_methods); + constraint_aware_append_path_methods = { + .ExtensibleName = "ConstraintAwareAppend", + .PlanExtensiblePath = constraint_aware_append_plan_create, + }; + + constraint_aware_append_plan_methods = { + .ExtensibleName = "ConstraintAwareAppend", + .CreateExtensiblePlanState = constraint_aware_append_state_create, + }; + + constraint_aware_append_state_methods = { + .ExtensibleName = "", + .BeginExtensiblePlan = ca_append_begin, + .ExecExtensiblePlan = ca_append_exec, + .EndExtensiblePlan = ca_append_end, + .ReScanExtensiblePlan = ca_append_rescan, + .ExplainExtensiblePlan = ca_append_explain, + }; } diff --git a/contrib/timescaledb/src/func_cache.cpp b/contrib/timescaledb/src/func_cache.cpp index 451120975..cd3554793 100644 --- a/contrib/timescaledb/src/func_cache.cpp +++ b/contrib/timescaledb/src/func_cache.cpp @@ -326,7 +326,7 @@ initialize_func_info() .keycopy = 0, .alloc = 0, .dealloc = 0, - .hcxt = u_sess->cache_mem_cxt, + .hcxt = LocalMyDBCacheMemCxt(), .hctl = NULL, }; Oid extension_nsp = get_namespace_oid(ts_extension_schema_name(), false); diff --git a/contrib/timescaledb/src/guc.cpp b/contrib/timescaledb/src/guc.cpp index 9c3cf7981..a772eab5a 100644 --- a/contrib/timescaledb/src/guc.cpp +++ b/contrib/timescaledb/src/guc.cpp @@ -13,16 +13,12 @@ #include "hypertable_cache.h" #include "telemetry/telemetry.h" -typedef enum TelemetryLevel -{ - TELEMETRY_OFF, - TELEMETRY_BASIC, -} TelemetryLevel; + /* Define which level means on. We use this object to have at least one object * of type TelemetryLevel in the code, otherwise pgindent won't work for the * type */ -static const TelemetryLevel on_level = TELEMETRY_BASIC; + bool ts_telemetry_on() @@ -30,9 +26,7 @@ ts_telemetry_on() return ts_guc_telemetry_level == on_level; } -static const struct config_enum_entry telemetry_level_options[] = { - { "off", TELEMETRY_OFF, false }, { "basic", TELEMETRY_BASIC, false }, { NULL, 0, false } -}; + bool ts_guc_disable_optimizations = false; bool ts_guc_optimize_non_hypertables = false; diff --git a/contrib/timescaledb/src/hypertable_cache.cpp b/contrib/timescaledb/src/hypertable_cache.cpp index ef1cd7c9e..7e70260b2 100644 --- a/contrib/timescaledb/src/hypertable_cache.cpp +++ b/contrib/timescaledb/src/hypertable_cache.cpp @@ -18,6 +18,7 @@ #include "dimension.h" #include "tablespace.h" + static void *hypertable_cache_create_entry(Cache *cache, CacheQuery *query); static void hypertable_cache_missing_error(const Cache *cache, const CacheQuery *query); @@ -49,11 +50,11 @@ hypertable_cache_valid_result(const void *result) return ((HypertableCacheEntry *) result)->hypertable != NULL; } -static Cache * +Cache * hypertable_cache_create() { MemoryContext ctx = - AllocSetContextCreate(u_sess->cache_mem_cxt, "Hypertable cache", ALLOCSET_DEFAULT_SIZES); + AllocSetContextCreate(LocalMyDBCacheMemCxt(), "Hypertable cache", ALLOCSET_DEFAULT_SIZES); Cache *cache =(Cache *) MemoryContextAlloc(ctx, sizeof(Cache)); Cache templatee = @@ -97,7 +98,7 @@ hypertable_cache_create() return cache; } -static Cache *hypertable_cache_current = NULL; + static ScanTupleResult hypertable_tuple_found(TupleInfo *ti, void *data) @@ -231,13 +232,14 @@ ts_hypertable_cache_get_entry_with_table(Cache *cache, const Oid relid, const ch extern TSDLLEXPORT Cache * ts_hypertable_cache_pin() { + return ts_cache_pin(hypertable_cache_current); } void _hypertable_cache_init(void) { - CreateCacheMemoryContext(); + hypertable_cache_current = hypertable_cache_create(); } diff --git a/contrib/timescaledb/src/hypertable_cache.h b/contrib/timescaledb/src/hypertable_cache.h index f7f7dd1a4..07ae888c6 100644 --- a/contrib/timescaledb/src/hypertable_cache.h +++ b/contrib/timescaledb/src/hypertable_cache.h @@ -28,7 +28,13 @@ extern void ts_hypertable_cache_invalidate_callback(void); extern TSDLLEXPORT Cache *ts_hypertable_cache_pin(void); + +extern Cache *hypertable_cache_create(); + extern void _hypertable_cache_init(void); extern void _hypertable_cache_fini(void); + + + #endif /* TIMESCALEDB_HYPERTABLE_CACHE_H */ diff --git a/contrib/timescaledb/src/indexing.cpp b/contrib/timescaledb/src/indexing.cpp index 852d82dcb..721a78c98 100644 --- a/contrib/timescaledb/src/indexing.cpp +++ b/contrib/timescaledb/src/indexing.cpp @@ -140,7 +140,7 @@ create_default_index(Hypertable *ht, List *indexelems) IndexStmt stmt = { .type = T_IndexStmt, .missing_ok= false, - .schemaname = "", + .schemaname = "public", .idxname = NULL, .relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0), .accessMethod = DEFAULT_INDEX_TYPE, diff --git a/contrib/timescaledb/src/init.cpp b/contrib/timescaledb/src/init.cpp index 8198d01f2..281ecd727 100644 --- a/contrib/timescaledb/src/init.cpp +++ b/contrib/timescaledb/src/init.cpp @@ -20,11 +20,21 @@ #include "config.h" #include "license_guc.h" #include "constraint_aware_append.h" +#include "hypertable_cache.h" + +#include "cache.h" +#include "chunk_append/exec.h" + + +static uint32 tsdb_index; #ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; #endif + + + extern void _hypertable_cache_init(void); extern void _hypertable_cache_fini(void); @@ -56,8 +66,15 @@ extern void _chunk_append_init(); extern void TSDLLEXPORT _PG_init(void); extern void TSDLLEXPORT _PG_fini(void); +extern "C" void set_extension_index(uint32 index); +extern "C" void init_session_vars(void); + +extern tsdb_session_context* get_session_context(bool is_from_PG_init); + TS_FUNCTION_INFO_V1(ts_post_load_init); + + void _PG_init(void) { @@ -69,18 +86,27 @@ _PG_init(void) ts_extension_check_server_version(); ts_bgw_check_loader_api_version(); + + + + if (!tsdb_first_start) + { _cache_init(); _hypertable_cache_init(); + _constraint_aware_append_init(); _cache_invalidate_init(); _planner_init(); - _constraint_aware_append_init(); _chunk_append_init(); _process_utility_init(); _guc_init(); _conn_plain_init(); -#ifdef TS_USE_OPENSSL - _conn_ssl_init(); -#endif + } + // + + + + + } @@ -116,3 +142,67 @@ ts_post_load_init(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + + + +void set_extension_index(uint32 index) { + tsdb_index = index; +} + + + + +tsdb_session_context* get_session_context(bool is_from_PG_init) +{ + if (u_sess->attr.attr_common.extension_session_vars_array[tsdb_index] == NULL && !is_from_PG_init) { + init_session_vars(); + } + return (tsdb_session_context*)u_sess->attr.attr_common.extension_session_vars_array[tsdb_index]; +} + +void init_session_vars(void) +{ + if (u_sess->attr.attr_common.extension_session_vars_array[tsdb_index]!=NULL) + return + + RepallocSessionVarsArrayIfNecessary(); + tsdb_session_context* psc = (tsdb_session_context*)MemoryContextAllocZero(u_sess->self_mem_cxt, sizeof(tsdb_session_context)); + u_sess->attr.attr_common.extension_session_vars_array[tsdb_index] = psc; + psc->tsdb_pinned_caches = NIL; + psc->tsdb_pinned_caches_mctx = NULL; + psc->tsdb_hypertable_cache_current = NULL; + psc->tsdb_planner_hcaches = NIL; + psc->tsdb_TS_CTE_EXPAND= "ts_expand"; + + + psc->tsdb_chunk_append_plan_methods = { + .ExtensibleName = "ChunkAppend", + .CreateExtensiblePlanState = ts_chunk_append_state_create, + }; + + psc->tsdb_telemetry_level_options[0] = { "off", TELEMETRY_OFF, false }; + psc->tsdb_telemetry_level_options[1] = { "basic", TELEMETRY_BASIC, false }; + psc->tsdb_telemetry_level_options[2] = { NULL, 0, false }; + psc->tsdb_on_level = TELEMETRY_BASIC; + + + psc->tsdb_expect_chunk_modification = false; + _constraint_aware_append_init(); + + + if (!ts_extension_is_loaded()) + return; + + + if (!tsdb_first_start) + { + _cache_init(); + _hypertable_cache_init(); + _cache_invalidate_init(); + + _planner_init(); + _chunk_append_init(); + _process_utility_init(); + } + +} \ No newline at end of file diff --git a/contrib/timescaledb/src/license_guc.cpp b/contrib/timescaledb/src/license_guc.cpp index 9b4801edb..449c076c9 100644 --- a/contrib/timescaledb/src/license_guc.cpp +++ b/contrib/timescaledb/src/license_guc.cpp @@ -243,7 +243,7 @@ static bool load_tsl(void) { char soname[MAX_SO_NAME_LEN] = { 0 }; - + CFunInfo temp_for_tsdb; if (tsl_handle != NULL) { Assert(tsl_startup_fn != NULL); @@ -258,11 +258,12 @@ load_tsl(void) snprintf(soname, MAX_SO_NAME_LEN, TS_LIBDIR "%s-%s", TSL_LIBRARY_NAME, TIMESCALEDB_VERSION_MOD); - tsl_startup_fn = load_external_function( + temp_for_tsdb = load_external_function( /* filename= */ soname, /* funcname= */ "ts_module_init", /* signalNotFound= */ false, /* filehandle= */ &tsl_handle); + tsl_startup_fn = temp_for_tsdb.user_fn; if (tsl_handle == NULL || tsl_startup_fn == NULL) goto loading_failed; diff --git a/contrib/timescaledb/src/loader/CMakeLists.txt b/contrib/timescaledb/src/loader/CMakeLists.txt index f0e2290a5..2ab11d7c1 100644 --- a/contrib/timescaledb/src/loader/CMakeLists.txt +++ b/contrib/timescaledb/src/loader/CMakeLists.txt @@ -8,8 +8,6 @@ set(SOURCES tsdb_loader.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_dsm.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_shm.cpp - ${PROJECT_SOURCE_DIR}/src/tsdb_event_trigger.cpp - ${PROJECT_SOURCE_DIR}/src/tsdb_get_obj.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_extension.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_head.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_static.cpp diff --git a/contrib/timescaledb/src/loader/bgw_launcher.cpp b/contrib/timescaledb/src/loader/bgw_launcher.cpp index 8b9ea554a..a9bfb03e2 100644 --- a/contrib/timescaledb/src/loader/bgw_launcher.cpp +++ b/contrib/timescaledb/src/loader/bgw_launcher.cpp @@ -940,10 +940,11 @@ ts_bgw_db_scheduler_entrypoint(PG_FUNCTION_ARGS) { char soname[MAX_SO_NAME_LEN]; PGFunction versioned_scheduler_main; + CFunInfo temp_for_tsdb; snprintf(soname, MAX_SO_NAME_LEN, "%s-%s", EXTENSION_SO, version); - versioned_scheduler_main = - load_external_function(soname, BGW_DB_SCHEDULER_FUNCNAME, false, NULL); + temp_for_tsdb = load_external_function(soname, BGW_DB_SCHEDULER_FUNCNAME, false, NULL); + versioned_scheduler_main = temp_for_tsdb.user_fn; if (versioned_scheduler_main == NULL) ereport(LOG, (errmsg("TimescaleDB version %s does not have a background worker, exiting", diff --git a/contrib/timescaledb/src/loader/loader.cpp b/contrib/timescaledb/src/loader/loader.cpp index 173f924d2..3f72601c8 100644 --- a/contrib/timescaledb/src/loader/loader.cpp +++ b/contrib/timescaledb/src/loader/loader.cpp @@ -602,8 +602,8 @@ static void inline do_load() */ PG_TRY(); { - PGFunction ts_post_load_init = - load_external_function(soname, POST_LOAD_INIT_FN, false, NULL); + CFunInfo temp_for_tsdb = load_external_function(soname, POST_LOAD_INIT_FN, false, NULL); + PGFunction ts_post_load_init = temp_for_tsdb.user_fn; if (ts_post_load_init != NULL) DirectFunctionCall1(ts_post_load_init, CharGetDatum(0)); diff --git a/contrib/timescaledb/src/net/conn_plain.cpp b/contrib/timescaledb/src/net/conn_plain.cpp index e46d2dc85..0b95b4c37 100644 --- a/contrib/timescaledb/src/net/conn_plain.cpp +++ b/contrib/timescaledb/src/net/conn_plain.cpp @@ -255,7 +255,7 @@ ts_plain_errmsg(Connection *conn) return errmsg; } -static ConnOps plain_ops = { +static THR_LOCAL ConnOps plain_ops = { .size = sizeof(Connection), .init = NULL, .connect = ts_plain_connect, diff --git a/contrib/timescaledb/src/net/conn_ssl.cpp b/contrib/timescaledb/src/net/conn_ssl.cpp index 77b0d6609..97c1ea1e8 100644 --- a/contrib/timescaledb/src/net/conn_ssl.cpp +++ b/contrib/timescaledb/src/net/conn_ssl.cpp @@ -242,7 +242,7 @@ ssl_errmsg(Connection *conn) return errbuf; } -static ConnOps ssl_ops = { +static THR_LOCAL ConnOps ssl_ops = { .size = sizeof(SSLConnection), .init = NULL, .connect = ssl_connect, diff --git a/contrib/timescaledb/src/planner.cpp b/contrib/timescaledb/src/planner.cpp index 80afc4445..0d809f017 100644 --- a/contrib/timescaledb/src/planner.cpp +++ b/contrib/timescaledb/src/planner.cpp @@ -92,7 +92,7 @@ static void cagg_reorder_groupby_clause(RangeTblEntry *subq_rte, int rtno, List * expansion, we also have to account for the case when our custom expansion * is turned off with a GUC. */ -static const char *TS_CTE_EXPAND = "ts_expand"; + static void rte_mark_for_expansion(RangeTblEntry *rte) @@ -125,7 +125,6 @@ rte_is_marked_for_expansion(const RangeTblEntry *rte) * holds the objects it was warmed with. Since the planner can be invoked * recursively, we also need to stack and pop cache objects. */ -static List *planner_hcaches = NIL; static Cache * planner_hcache_push(void) diff --git a/contrib/timescaledb/src/process_utility.cpp b/contrib/timescaledb/src/process_utility.cpp index 9257289ce..3d875b494 100644 --- a/contrib/timescaledb/src/process_utility.cpp +++ b/contrib/timescaledb/src/process_utility.cpp @@ -70,7 +70,7 @@ void _process_utility_init(void); void _process_utility_fini(void); static ProcessUtility_hook_type prev_ProcessUtility_hook; -static bool expect_chunk_modification = false; + static bool process_altertable_set_options(AlterTableCmd *cmd, Hypertable *ht); static bool process_altertable_reset_options(AlterTableCmd *cmd, Hypertable *ht); @@ -3441,6 +3441,12 @@ process_ddl_sql_drop(EventTriggerDropObject *obj) } } +/* + * ProcessUtility hook for DDL commands that have not yet been processed by + * PostgreSQL. +*/ + + static void timescaledb_ddl_command_start( processutility_context* processutility_cxt, diff --git a/contrib/timescaledb/src/tsdb.cpp b/contrib/timescaledb/src/tsdb.cpp index 06e58307f..1561b9607 100644 --- a/contrib/timescaledb/src/tsdb.cpp +++ b/contrib/timescaledb/src/tsdb.cpp @@ -49,6 +49,7 @@ #include "plan_agg_bookend.h" + #include "tsdb_dsm.h" #include "tsdb_static.cpp" @@ -86,12 +87,7 @@ void heap_endscan(HeapScanDescData*& scan) { } -PGFunction load_external_function(char *filename, char *funcname, - bool signalNotFound, void **filehandle,int tsdb) -{ - PGFunction p; - return p; -} + List *make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist) @@ -154,18 +150,7 @@ bool reindex_relation(Oid relid, int flags, int options) { return false; } -ObjectAddress -DefineIndex_tsdb(Oid relationId, - IndexStmt *stmt, - Oid indexRelationId, - bool is_alter_table, - bool check_rights, - bool skip_build, - bool quiet) - { - ObjectAddress o; - return o; - } + void cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) { @@ -1547,18 +1532,7 @@ array_create_iterator(ArrayType *arr, int slice_ndim, ArrayMetaState *mstate) return iterator; } -void -CreateCacheMemoryContext(void) -{ - /* - * Purely for paranoia, check that context doesn't exist; caller probably - * did so already. - */ - if (u_sess->cache_mem_cxt == nullptr ) { - u_sess->cache_mem_cxt = - AllocSetContextCreate(u_sess->top_mem_cxt, "SessionCacheMemoryContext", ALLOCSET_DEFAULT_SIZES); - } -} + @@ -3507,4 +3481,6 @@ ts_preprocess_first_last_aggregates(PlannerInfo *root, List *tlist) /* Let's replace Aggref node since we will use subquery we've generated */ replace_aggref_in_tlist(minmaxagg_path); add_path(grouped_rel, (Path *) minmaxagg_path); -} \ No newline at end of file +} + + diff --git a/contrib/timescaledb/src/tsdb.h b/contrib/timescaledb/src/tsdb.h index 1ed859711..c6e040e86 100644 --- a/contrib/timescaledb/src/tsdb.h +++ b/contrib/timescaledb/src/tsdb.h @@ -25,6 +25,7 @@ #include "commands/vacuum.h" #include "utils/evtcache.h" #include "tsdb_head.h" +#include "cache.h" extern CopyState BeginCopyFrom(Relation rel, const char *filename, bool is_program, List *attnamelist, List *options); @@ -34,8 +35,7 @@ extern ObjectAddress CreateTrigger(CreateTrigStmt *stmt, const char *queryString extern ObjectAddress DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, ObjectAddress *typaddress, int tsdb=0); extern void heap_endscan(HeapScanDescData*& scan); -extern PGFunction load_external_function(char *filename, char *funcname, - bool signalNotFound, void **filehandle,int tsdb=0); + extern List *make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist); @@ -81,21 +81,14 @@ extern List *ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid, EState *estate, bool noDupErr, bool *specConflict, List *arbiterIndexes); -extern void CreateCacheMemoryContext(void); + ArrayIterator array_create_iterator(ArrayType *arr, int slice_ndim, ArrayMetaState *mstate); extern Oid RangeVarGetRelidExtended_tsdb(const RangeVar *relation, LOCKMODE lockmode, bool missing_ok, bool nowait, RangeVarGetRelidCallback_tsdb callback, void *callback_arg,int tsdb = 0); -extern ObjectAddress -DefineIndex_tsdb(Oid relationId, - IndexStmt *stmt, - Oid indexRelationId, - bool is_alter_table, - bool check_rights, - bool skip_build, - bool quiet); + void @@ -198,6 +191,8 @@ extern void build_aggregate_finalfn_expr(Oid *agg_input_types, extern bool reindex_relation(Oid relid, int flags, int options); extern ColumnDef *makeColumnDef(const char *colname, Oid typeOid, int32 typmod, Oid collOid); +// extern void *MemoryContextAllocExtended(MemoryContext context, +// Size size, int flags); extern ArrayBuildStateArr *initArrayResultArr(Oid array_type, Oid element_type, MemoryContext rcontext, bool subcontext); extern ArrayBuildState *initArrayResult(Oid element_type, @@ -226,6 +221,8 @@ extern SortPath *create_sort_path(PlannerInfo *root, double limit_tuples); extern bool IsInParallelMode(void); +// extern void RunObjectPostCreateHook(Oid classId, Oid objectId, int subId, +// bool is_internal); extern void InitPostgres(const char *in_dbname, Oid dboid, const char *username, Oid useroid, char *out_dbname); @@ -270,4 +267,6 @@ extern bool check_functions_in_node(Node *node, check_function_callback checker, void *context); extern char func_parallel(Oid funcid); extern Relids find_childrel_parents(PlannerInfo *root, RelOptInfo *rel); + + #endif \ No newline at end of file diff --git a/contrib/timescaledb/src/tsdb_event_trigger.cpp b/contrib/timescaledb/src/tsdb_event_trigger.cpp deleted file mode 100644 index 75fe0d4cb..000000000 --- a/contrib/timescaledb/src/tsdb_event_trigger.cpp +++ /dev/null @@ -1,2217 +0,0 @@ -/*------------------------------------------------------------------------- - * - * event_trigger.c - * PostgreSQL EVENT TRIGGER support code. - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * IDENTIFICATION - * src/backend/commands/event_trigger.c - * - *------------------------------------------------------------------------- - */ -#include "postgres.h" - -#include "access/htup.h" -#include "access/xact.h" -#include "catalog/dependency.h" -#include "catalog/indexing.h" -#include "catalog/objectaccess.h" -#include "catalog/pg_namespace.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_opfamily.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_ts_config.h" -#include "catalog/pg_type.h" -#include "commands/dbcommands.h" -#include "commands/extension.h" -#include "commands/trigger.h" -#include "funcapi.h" -#include "parser/parse_func.h" -#include "pgstat.h" -#include "lib/ilist.h" -#include "miscadmin.h" -#include "utils/acl.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/rel.h" -#include "utils/syscache.h" -#include "tcop/utility.h" -#include "utils/evtcache.h" - - -#include "tsdb_get_obj.h" -#include "tsdb_event_trigger.h" -#include "tsdb_head.h" - - -THR_LOCAL PGDLLIMPORT int SessionReplicationRole; - -static EventTriggerQueryState *currentEventTriggerState = NULL; - -typedef struct -{ - const char *obtypename; - bool supported; -} event_trigger_support_data; - -typedef enum -{ - EVENT_TRIGGER_COMMAND_TAG_OK, - EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED, - EVENT_TRIGGER_COMMAND_TAG_NOT_RECOGNIZED -} event_trigger_command_tag_check_result; - -/* XXX merge this with ObjectTypeMap? */ -static event_trigger_support_data event_trigger_support[] = { - {"ACCESS METHOD", true}, - {"AGGREGATE", true}, - {"CAST", true}, - {"CONSTRAINT", true}, - {"COLLATION", true}, - {"CONVERSION", true}, - {"DATABASE", false}, - {"DOMAIN", true}, - {"EXTENSION", true}, - {"EVENT TRIGGER", false}, - {"FOREIGN DATA WRAPPER", true}, - {"FOREIGN TABLE", true}, - {"FUNCTION", true}, - {"INDEX", true}, - {"LANGUAGE", true}, - {"MATERIALIZED VIEW", true}, - {"OPERATOR", true}, - {"OPERATOR CLASS", true}, - {"OPERATOR FAMILY", true}, - {"POLICY", true}, - {"ROLE", false}, - {"RULE", true}, - {"SCHEMA", true}, - {"SEQUENCE", true}, - {"SERVER", true}, - {"TABLE", true}, - {"TABLESPACE", false}, - {"TRANSFORM", true}, - {"TRIGGER", true}, - {"TEXT SEARCH CONFIGURATION", true}, - {"TEXT SEARCH DICTIONARY", true}, - {"TEXT SEARCH PARSER", true}, - {"TEXT SEARCH TEMPLATE", true}, - {"TYPE", true}, - {"USER MAPPING", true}, - {"VIEW", true}, - {NULL, false} -}; - -/* Support for dropped objects */ -typedef struct SQLDropObject -{ - ObjectAddress address; - const char *schemaname; - const char *objname; - const char *objidentity; - const char *objecttype; - List *addrnames; - List *addrargs; - bool original; - bool normal; - bool istemp; - slist_node next; -} SQLDropObject; - -static void AlterEventTriggerOwner_internal(Relation rel, - HeapTuple tup, - Oid newOwnerId); -static event_trigger_command_tag_check_result check_ddl_tag(const char *tag); -static event_trigger_command_tag_check_result check_table_rewrite_ddl_tag( - const char *tag); -static void error_duplicate_filter_variable(const char *defname); -static Datum filter_list_to_array(List *filterlist); -static Oid insert_event_trigger_tuple(char *trigname, char *eventname, - Oid evtOwner, Oid funcoid, List *tags); -static void validate_ddl_tags(const char *filtervar, List *taglist); -static void validate_table_rewrite_tags(const char *filtervar, List *taglist); -static void EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata); -static const char *stringify_grantobjtype(GrantObjectType objtype); -static const char *stringify_adefprivs_objtype(GrantObjectType objtype); - -/* - * Create an event trigger. - */ -Oid -CreateEventTrigger(CreateEventTrigStmt *stmt) -{ - HeapTuple tuple; - Oid funcoid; - Oid funcrettype; - Oid fargtypes[1]; /* dummy */ - Oid evtowner = GetUserId(); - ListCell *lc; - List *tags = NULL; - - /* - * It would be nice to allow database owners or even regular users to do - * this, but there are obvious privilege escalation risks which would have - * to somehow be plugged first. - */ - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to create event trigger \"%s\"", - stmt->trigname), - errhint("Must be superuser to create an event trigger."))); - - /* Validate event name. */ - if (strcmp(stmt->eventname, "ddl_command_start") != 0 && - strcmp(stmt->eventname, "ddl_command_end") != 0 && - strcmp(stmt->eventname, "sql_drop") != 0 && - strcmp(stmt->eventname, "table_rewrite") != 0) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unrecognized event name \"%s\"", - stmt->eventname))); - - /* Validate filter conditions. */ - foreach(lc, stmt->whenclause) - { - DefElem *def = (DefElem *) lfirst(lc); - - if (strcmp(def->defname, "tag") == 0) - { - if (tags != NULL) - error_duplicate_filter_variable(def->defname); - tags = (List *) def->arg; - } - else - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unrecognized filter variable \"%s\"", def->defname))); - } - - /* Validate tag list, if any. */ - if ((strcmp(stmt->eventname, "ddl_command_start") == 0 || - strcmp(stmt->eventname, "ddl_command_end") == 0 || - strcmp(stmt->eventname, "sql_drop") == 0) - && tags != NULL) - validate_ddl_tags("tag", tags); - else if (strcmp(stmt->eventname, "table_rewrite") == 0 - && tags != NULL) - validate_table_rewrite_tags("tag", tags); - - /* - * Give user a nice error message if an event trigger of the same name - * already exists. - */ - tuple = SearchSysCache1(EVENTTRIGGERNAME, CStringGetDatum(stmt->trigname)); - if (HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("event trigger \"%s\" already exists", - stmt->trigname))); - - /* Find and validate the trigger function. */ - funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false); - funcrettype = get_func_rettype(funcoid); - if (funcrettype != EVTTRIGGEROID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("function %s must return type %s", - NameListToString(stmt->funcname), "event_trigger"))); - - /* Insert catalog entries. */ - return insert_event_trigger_tuple(stmt->trigname, stmt->eventname, - evtowner, funcoid, tags); -} - -/* - * Validate DDL command tags. - */ -static void -validate_ddl_tags(const char *filtervar, List *taglist) -{ - ListCell *lc; - - foreach(lc, taglist) - { - const char *tag = strVal(lfirst(lc)); - event_trigger_command_tag_check_result result; - - result = check_ddl_tag(tag); - if (result == EVENT_TRIGGER_COMMAND_TAG_NOT_RECOGNIZED) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("filter value \"%s\" not recognized for filter variable \"%s\"", - tag, filtervar))); - if (result == EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /* translator: %s represents an SQL statement name */ - errmsg("event triggers are not supported for %s", - tag))); - } -} - -static event_trigger_command_tag_check_result -check_ddl_tag(const char *tag) -{ - const char *obtypename; - event_trigger_support_data *etsd; - - /* - * Handle some idiosyncratic special cases. - */ - if (pg_strcasecmp(tag, "CREATE TABLE AS") == 0 || - pg_strcasecmp(tag, "SELECT INTO") == 0 || - pg_strcasecmp(tag, "REFRESH MATERIALIZED VIEW") == 0 || - pg_strcasecmp(tag, "ALTER DEFAULT PRIVILEGES") == 0 || - pg_strcasecmp(tag, "ALTER LARGE OBJECT") == 0 || - pg_strcasecmp(tag, "COMMENT") == 0 || - pg_strcasecmp(tag, "GRANT") == 0 || - pg_strcasecmp(tag, "REVOKE") == 0 || - pg_strcasecmp(tag, "DROP OWNED") == 0 || - pg_strcasecmp(tag, "IMPORT FOREIGN SCHEMA") == 0 || - pg_strcasecmp(tag, "SECURITY LABEL") == 0) - return EVENT_TRIGGER_COMMAND_TAG_OK; - - /* - * Otherwise, command should be CREATE, ALTER, or DROP. - */ - if (pg_strncasecmp(tag, "CREATE ", 7) == 0) - obtypename = tag + 7; - else if (pg_strncasecmp(tag, "ALTER ", 6) == 0) - obtypename = tag + 6; - else if (pg_strncasecmp(tag, "DROP ", 5) == 0) - obtypename = tag + 5; - else - return EVENT_TRIGGER_COMMAND_TAG_NOT_RECOGNIZED; - - /* - * ...and the object type should be something recognizable. - */ - for (etsd = event_trigger_support; etsd->obtypename != NULL; etsd++) - if (pg_strcasecmp(etsd->obtypename, obtypename) == 0) - break; - if (etsd->obtypename == NULL) - return EVENT_TRIGGER_COMMAND_TAG_NOT_RECOGNIZED; - if (!etsd->supported) - return EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED; - return EVENT_TRIGGER_COMMAND_TAG_OK; -} - -/* - * Validate DDL command tags for event table_rewrite. - */ -static void -validate_table_rewrite_tags(const char *filtervar, List *taglist) -{ - ListCell *lc; - - foreach(lc, taglist) - { - const char *tag = strVal(lfirst(lc)); - event_trigger_command_tag_check_result result; - - result = check_table_rewrite_ddl_tag(tag); - if (result == EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /* translator: %s represents an SQL statement name */ - errmsg("event triggers are not supported for %s", - tag))); - } -} - -static event_trigger_command_tag_check_result -check_table_rewrite_ddl_tag(const char *tag) -{ - if (pg_strcasecmp(tag, "ALTER TABLE") == 0 || - pg_strcasecmp(tag, "ALTER TYPE") == 0) - return EVENT_TRIGGER_COMMAND_TAG_OK; - - return EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED; -} - -/* - * Complain about a duplicate filter variable. - */ -static void -error_duplicate_filter_variable(const char *defname) -{ - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("filter variable \"%s\" specified more than once", - defname))); -} - -/* - * Insert the new pg_event_trigger row and record dependencies. - */ -static Oid -insert_event_trigger_tuple(char *trigname, char *eventname, Oid evtOwner, - Oid funcoid, List *taglist) -{ - Relation tgrel; - Oid trigoid; - HeapTuple tuple; - Datum values[Natts_pg_trigger]; - bool nulls[Natts_pg_trigger]; - NameData evtnamedata, - evteventdata; - ObjectAddress myself, - referenced; - - /* Open pg_event_trigger. */ - tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock); - - /* Build the new pg_trigger tuple. */ - memset(nulls, false, sizeof(nulls)); - namestrcpy(&evtnamedata, trigname); - values[Anum_pg_event_trigger_evtname - 1] = NameGetDatum(&evtnamedata); - namestrcpy(&evteventdata, eventname); - values[Anum_pg_event_trigger_evtevent - 1] = NameGetDatum(&evteventdata); - values[Anum_pg_event_trigger_evtowner - 1] = ObjectIdGetDatum(evtOwner); - values[Anum_pg_event_trigger_evtfoid - 1] = ObjectIdGetDatum(funcoid); - values[Anum_pg_event_trigger_evtenabled - 1] = - CharGetDatum(TRIGGER_FIRES_ON_ORIGIN); - if (taglist == NIL) - nulls[Anum_pg_event_trigger_evttags - 1] = true; - else - values[Anum_pg_event_trigger_evttags - 1] = - filter_list_to_array(taglist); - - /* Insert heap tuple. */ - tuple = heap_form_tuple(tgrel->rd_att, values, nulls); - trigoid = simple_heap_insert(tgrel, tuple); - CatalogUpdateIndexes(tgrel, tuple); - heap_freetuple(tuple); - - /* Depend on owner. */ - recordDependencyOnOwner(EventTriggerRelationId, trigoid, evtOwner); - - /* Depend on event trigger function. */ - myself.classId = EventTriggerRelationId; - myself.objectId = trigoid; - myself.objectSubId = 0; - referenced.classId = ProcedureRelationId; - referenced.objectId = funcoid; - referenced.objectSubId = 0; - recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); - - /* Depend on extension, if any. */ - recordDependencyOnCurrentExtension(&myself, false); - - /* Post creation hook for new event trigger */ - InvokeObjectPostCreateHook(EventTriggerRelationId, trigoid, 0); - - /* Close pg_event_trigger. */ - heap_close(tgrel, RowExclusiveLock); - - return trigoid; -} - -/* - * In the parser, a clause like WHEN tag IN ('cmd1', 'cmd2') is represented - * by a DefElem whose value is a List of String nodes; in the catalog, we - * store the list of strings as a text array. This function transforms the - * former representation into the latter one. - * - * For cleanliness, we store command tags in the catalog as text. It's - * possible (although not currently anticipated) that we might have - * a case-sensitive filter variable in the future, in which case this would - * need some further adjustment. - */ -static Datum -filter_list_to_array(List *filterlist) -{ - ListCell *lc; - Datum *data; - int i = 0, - l = list_length(filterlist); - - data = (Datum *) palloc(l * sizeof(Datum)); - - foreach(lc, filterlist) - { - const char *value = strVal(lfirst(lc)); - char *result, - *p; - - result = pstrdup(value); - for (p = result; *p; p++) - *p = pg_ascii_toupper((unsigned char) *p); - data[i++] = PointerGetDatum(cstring_to_text(result)); - pfree(result); - } - - return PointerGetDatum(construct_array(data, l, TEXTOID, -1, false, 'i')); -} - -/* - * Guts of event trigger deletion. - */ -void -RemoveEventTriggerById(Oid trigOid) -{ - Relation tgrel; - HeapTuple tup; - - tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock); - - tup = SearchSysCache1(EVENTTRIGGEROID, ObjectIdGetDatum(trigOid)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for event trigger %u", trigOid); - - simple_heap_delete(tgrel, &tup->t_self); - - ReleaseSysCache(tup); - - heap_close(tgrel, RowExclusiveLock); -} - -/* - * ALTER EVENT TRIGGER foo ENABLE|DISABLE|ENABLE ALWAYS|REPLICA - */ -Oid -AlterEventTrigger(AlterEventTrigStmt *stmt) -{ - Relation tgrel; - HeapTuple tup; - Oid trigoid; - Form_pg_event_trigger evtForm; - char tgenabled = stmt->tgenabled; - - tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock); - - tup = SearchSysCacheCopy1(EVENTTRIGGERNAME, - CStringGetDatum(stmt->trigname)); - if (!HeapTupleIsValid(tup)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("event trigger \"%s\" does not exist", - stmt->trigname))); - - trigoid = HeapTupleGetOid(tup); - - if (!pg_event_trigger_ownercheck(trigoid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER,(AclObjectKind) 19, - stmt->trigname); - - /* tuple is a copy, so we can modify it below */ - evtForm = (Form_pg_event_trigger) GETSTRUCT(tup); - evtForm->evtenabled = tgenabled; - - simple_heap_update(tgrel, &tup->t_self, tup); - CatalogUpdateIndexes(tgrel, tup); - - InvokeObjectPostAlterHook(EventTriggerRelationId, - trigoid, 0); - - /* clean up */ - heap_freetuple(tup); - heap_close(tgrel, RowExclusiveLock); - - return trigoid; -} - -/* - * Change event trigger's owner -- by name - */ -ObjectAddress -AlterEventTriggerOwner(const char *name, Oid newOwnerId) -{ - Oid evtOid; - HeapTuple tup; - Relation rel; - ObjectAddress address; - - rel = heap_open(EventTriggerRelationId, RowExclusiveLock); - - tup = SearchSysCacheCopy1(EVENTTRIGGERNAME, CStringGetDatum(name)); - - if (!HeapTupleIsValid(tup)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("event trigger \"%s\" does not exist", name))); - - evtOid = HeapTupleGetOid(tup); - - AlterEventTriggerOwner_internal(rel, tup, newOwnerId); - - ObjectAddressSet(address, EventTriggerRelationId, evtOid); - - heap_freetuple(tup); - - heap_close(rel, RowExclusiveLock); - - return address; -} - -/* - * Change event trigger owner, by OID - */ -void -AlterEventTriggerOwner_oid(Oid trigOid, Oid newOwnerId) -{ - HeapTuple tup; - Relation rel; - - rel = heap_open(EventTriggerRelationId, RowExclusiveLock); - - tup = SearchSysCacheCopy1(EVENTTRIGGEROID, ObjectIdGetDatum(trigOid)); - - if (!HeapTupleIsValid(tup)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("event trigger with OID %u does not exist", trigOid))); - - AlterEventTriggerOwner_internal(rel, tup, newOwnerId); - - heap_freetuple(tup); - - heap_close(rel, RowExclusiveLock); -} - -/* - * Internal workhorse for changing an event trigger's owner - */ -static void -AlterEventTriggerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) -{ - Form_pg_event_trigger form; - - form = (Form_pg_event_trigger) GETSTRUCT(tup); - - if (form->evtowner == newOwnerId) - return; - - if (!pg_event_trigger_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER,(AclObjectKind) 19, - NameStr(form->evtname)); - - /* New owner must be a superuser */ - if (!superuser_arg(newOwnerId)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to change owner of event trigger \"%s\"", - NameStr(form->evtname)), - errhint("The owner of an event trigger must be a superuser."))); - - form->evtowner = newOwnerId; - simple_heap_update(rel, &tup->t_self, tup); - CatalogUpdateIndexes(rel, tup); - - /* Update owner dependency reference */ - changeDependencyOnOwner(EventTriggerRelationId, - HeapTupleGetOid(tup), - newOwnerId); - - InvokeObjectPostAlterHook(EventTriggerRelationId, - HeapTupleGetOid(tup), 0); -} - -/* - * get_event_trigger_oid - Look up an event trigger by name to find its OID. - * - * If missing_ok is false, throw an error if trigger not found. If - * true, just return InvalidOid. - */ -Oid -get_event_trigger_oid(const char *trigname, bool missing_ok) -{ - Oid oid; - - oid = GetSysCacheOid1(EVENTTRIGGERNAME, CStringGetDatum(trigname)); - if (!OidIsValid(oid) && !missing_ok) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("event trigger \"%s\" does not exist", trigname))); - return oid; -} - -/* - * Return true when we want to fire given Event Trigger and false otherwise, - * filtering on the session replication role and the event trigger registered - * tags matching. - */ -static bool -filter_event_trigger(const char **tag, EventTriggerCacheItem *item) -{ - /* - * Filter by session replication role, knowing that we never see disabled - * items down here. - */ - if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA) - { - if (item->enabled == TRIGGER_FIRES_ON_ORIGIN) - return false; - } - else - { - if (item->enabled == TRIGGER_FIRES_ON_REPLICA) - return false; - } - - /* Filter by tags, if any were specified. */ - if (item->ntags != 0 && bsearch(tag, item->tag, - item->ntags, sizeof(char *), - pg_qsort_strcmp) == NULL) - return false; - - /* if we reach that point, we're not filtering out this item */ - return true; -} - -/* - * Setup for running triggers for the given event. Return value is an OID list - * of functions to run; if there are any, trigdata is filled with an - * appropriate EventTriggerData for them to receive. - */ -static List * -EventTriggerCommonSetup(Node *parsetree, - EventTriggerEvent event, const char *eventstr, - EventTriggerData *trigdata) -{ - const char *tag; - List *cachelist; - ListCell *lc; - List *runlist = NIL; - - /* - * We want the list of command tags for which this procedure is actually - * invoked to match up exactly with the list that CREATE EVENT TRIGGER - * accepts. This debugging cross-check will throw an error if this - * function is invoked for a command tag that CREATE EVENT TRIGGER won't - * accept. (Unfortunately, there doesn't seem to be any simple, automated - * way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that - * never reaches this control point.) - * - * If this cross-check fails for you, you probably need to either adjust - * standard_ProcessUtility() not to invoke event triggers for the command - * type in question, or you need to adjust check_ddl_tag to accept the - * relevant command tag. - */ -#ifdef USE_ASSERT_CHECKING - { - const char *dbgtag; - - dbgtag = CreateCommandTag(parsetree); - if (event == EVT_DDLCommandStart || - event == EVT_DDLCommandEnd || - event == EVT_SQLDrop) - { - if (check_ddl_tag(dbgtag) != EVENT_TRIGGER_COMMAND_TAG_OK) - elog(ERROR, "unexpected command tag \"%s\"", dbgtag); - } - else if (event == EVT_TableRewrite) - { - if (check_table_rewrite_ddl_tag(dbgtag) != EVENT_TRIGGER_COMMAND_TAG_OK) - elog(ERROR, "unexpected command tag \"%s\"", dbgtag); - } - } -#endif - - /* Use cache to find triggers for this event; fast exit if none. */ - cachelist = EventCacheLookup(event); - if (cachelist == NIL) - return NIL; - - /* Get the command tag. */ - tag = CreateCommandTag(parsetree); - - /* - * Filter list of event triggers by command tag, and copy them into our - * memory context. Once we start running the command trigers, or indeed - * once we do anything at all that touches the catalogs, an invalidation - * might leave cachelist pointing at garbage, so we must do this before we - * can do much else. - */ - foreach(lc, cachelist) - { - EventTriggerCacheItem *item =(EventTriggerCacheItem *) lfirst(lc); - - if (filter_event_trigger(&tag, item)) - { - /* We must plan to fire this trigger. */ - runlist = lappend_oid(runlist, item->fnoid); - } - } - - /* don't spend any more time on this if no functions to run */ - if (runlist == NIL) - return NIL; - - trigdata->event = eventstr; - trigdata->parsetree = parsetree; - trigdata->tag = tag; - - return runlist; -} - -/* - * Fire ddl_command_start triggers. - */ -void -EventTriggerDDLCommandStart(Node *parsetree) -{ - List *runlist; - EventTriggerData trigdata; - - /* - * Event Triggers are completely disabled in standalone mode. There are - * (at least) two reasons for this: - * - * 1. A sufficiently broken event trigger might not only render the - * database unusable, but prevent disabling itself to fix the situation. - * In this scenario, restarting in standalone mode provides an escape - * hatch. - * - * 2. BuildEventTriggerCache relies on systable_beginscan_ordered, and - * therefore will malfunction if pg_event_trigger's indexes are damaged. - * To allow recovery from a damaged index, we need some operating mode - * wherein event triggers are disabled. (Or we could implement - * heapscan-and-sort logic for that case, but having disaster recovery - * scenarios depend on code that's otherwise untested isn't appetizing.) - */ - if (!IsUnderPostmaster) - return; - - runlist = EventTriggerCommonSetup(parsetree, - EVT_DDLCommandStart, - "ddl_command_start", - &trigdata); - if (runlist == NIL) - return; - - /* Run the triggers. */ - EventTriggerInvoke(runlist, &trigdata); - - /* Cleanup. */ - list_free(runlist); - - /* - * Make sure anything the event triggers did will be visible to the main - * command. - */ - CommandCounterIncrement(); -} - -/* - * Fire ddl_command_end triggers. - */ -void -EventTriggerDDLCommandEnd(Node *parsetree) -{ - List *runlist; - EventTriggerData trigdata; - - /* - * See EventTriggerDDLCommandStart for a discussion about why event - * triggers are disabled in single user mode. - */ - if (!IsUnderPostmaster) - return; - - runlist = EventTriggerCommonSetup(parsetree, - EVT_DDLCommandEnd, "ddl_command_end", - &trigdata); - if (runlist == NIL) - return; - - /* - * Make sure anything the main command did will be visible to the event - * triggers. - */ - CommandCounterIncrement(); - - /* Run the triggers. */ - EventTriggerInvoke(runlist, &trigdata); - - /* Cleanup. */ - list_free(runlist); -} - -/* - * Fire sql_drop triggers. - */ -void -EventTriggerSQLDrop(Node *parsetree) -{ - List *runlist; - EventTriggerData trigdata; - - /* - * See EventTriggerDDLCommandStart for a discussion about why event - * triggers are disabled in single user mode. - */ - if (!IsUnderPostmaster) - return; - - /* - * Use current state to determine whether this event fires at all. If - * there are no triggers for the sql_drop event, then we don't have - * anything to do here. Note that dropped object collection is disabled - * if this is the case, so even if we were to try to run, the list would - * be empty. - */ - if (!currentEventTriggerState || - slist_is_empty(¤tEventTriggerState->SQLDropList)) - return; - - runlist = EventTriggerCommonSetup(parsetree, - EVT_SQLDrop, "sql_drop", - &trigdata); - - /* - * Nothing to do if run list is empty. Note this shouldn't happen, - * because if there are no sql_drop events, then objects-to-drop wouldn't - * have been collected in the first place and we would have quit above. - */ - if (runlist == NIL) - return; - - /* - * Make sure anything the main command did will be visible to the event - * triggers. - */ - CommandCounterIncrement(); - - /* - * Make sure pg_event_trigger_dropped_objects only works when running - * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when - * one trigger fails. (This is perhaps not necessary, as the currentState - * variable will be removed shortly by our caller, but it seems better to - * play safe.) - */ - currentEventTriggerState->in_sql_drop = true; - - /* Run the triggers. */ - PG_TRY(); - { - EventTriggerInvoke(runlist, &trigdata); - } - PG_CATCH(); - { - currentEventTriggerState->in_sql_drop = false; - PG_RE_THROW(); - } - PG_END_TRY(); - currentEventTriggerState->in_sql_drop = false; - - /* Cleanup. */ - list_free(runlist); -} - - -/* - * Fire table_rewrite triggers. - */ -void -EventTriggerTableRewrite(Node *parsetree, Oid tableOid, int reason) -{ - List *runlist; - EventTriggerData trigdata; - - elog(DEBUG1, "EventTriggerTableRewrite(%u)", tableOid); - - /* - * Event Triggers are completely disabled in standalone mode. There are - * (at least) two reasons for this: - * - * 1. A sufficiently broken event trigger might not only render the - * database unusable, but prevent disabling itself to fix the situation. - * In this scenario, restarting in standalone mode provides an escape - * hatch. - * - * 2. BuildEventTriggerCache relies on systable_beginscan_ordered, and - * therefore will malfunction if pg_event_trigger's indexes are damaged. - * To allow recovery from a damaged index, we need some operating mode - * wherein event triggers are disabled. (Or we could implement - * heapscan-and-sort logic for that case, but having disaster recovery - * scenarios depend on code that's otherwise untested isn't appetizing.) - */ - if (!IsUnderPostmaster) - return; - - runlist = EventTriggerCommonSetup(parsetree, - EVT_TableRewrite, - "table_rewrite", - &trigdata); - if (runlist == NIL) - return; - - /* - * Make sure pg_event_trigger_table_rewrite_oid only works when running - * these triggers. Use PG_TRY to ensure table_rewrite_oid is reset even - * when one trigger fails. (This is perhaps not necessary, as the - * currentState variable will be removed shortly by our caller, but it - * seems better to play safe.) - */ - currentEventTriggerState->table_rewrite_oid = tableOid; - currentEventTriggerState->table_rewrite_reason = reason; - - /* Run the triggers. */ - PG_TRY(); - { - EventTriggerInvoke(runlist, &trigdata); - } - PG_CATCH(); - { - currentEventTriggerState->table_rewrite_oid = InvalidOid; - currentEventTriggerState->table_rewrite_reason = 0; - PG_RE_THROW(); - } - PG_END_TRY(); - - currentEventTriggerState->table_rewrite_oid = InvalidOid; - currentEventTriggerState->table_rewrite_reason = 0; - - /* Cleanup. */ - list_free(runlist); - - /* - * Make sure anything the event triggers did will be visible to the main - * command. - */ - CommandCounterIncrement(); -} - -/* - * Invoke each event trigger in a list of event triggers. - */ -static void -EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata) -{ - MemoryContext context; - MemoryContext oldcontext; - ListCell *lc; - bool first = true; - - /* Guard against stack overflow due to recursive event trigger */ - check_stack_depth(); - - /* - * Let's evaluate event triggers in their own memory context, so that any - * leaks get cleaned up promptly. - */ - context = AllocSetContextCreate(CurrentMemoryContext, - "event trigger context", - ALLOCSET_DEFAULT_SIZES); - oldcontext = MemoryContextSwitchTo(context); - - /* Call each event trigger. */ - foreach(lc, fn_oid_list) - { - Oid fnoid = lfirst_oid(lc); - FmgrInfo flinfo; - FunctionCallInfoData fcinfo; - PgStat_FunctionCallUsage fcusage; - - elog(DEBUG1, "EventTriggerInvoke %u", fnoid); - - /* - * We want each event trigger to be able to see the results of the - * previous event trigger's action. Caller is responsible for any - * command-counter increment that is needed between the event trigger - * and anything else in the transaction. - */ - if (first) - first = false; - else - CommandCounterIncrement(); - - /* Look up the function */ - fmgr_info(fnoid, &flinfo); - - /* Call the function, passing no arguments but setting a context. */ - InitFunctionCallInfoData(fcinfo, &flinfo, 0, - InvalidOid, (Node *) trigdata, NULL); - pgstat_init_function_usage(&fcinfo, &fcusage); - FunctionCallInvoke(&fcinfo); - pgstat_end_function_usage(&fcusage, true); - - /* Reclaim memory. */ - MemoryContextReset(context); - } - - /* Restore old memory context and delete the temporary one. */ - MemoryContextSwitchTo(oldcontext); - MemoryContextDelete(context); -} - -/* - * Do event triggers support this object type? - */ -bool -EventTriggerSupportsObjectType(ObjectType obtype) -{ - switch (obtype) - { - case OBJECT_DATABASE: - case OBJECT_TABLESPACE: - case OBJECT_ROLE: - /* no support for global objects */ - return false; - case OBJECT_AGGREGATE: - case OBJECT_ATTRIBUTE: - case OBJECT_CAST: - case OBJECT_COLUMN: - case OBJECT_COLLATION: - case OBJECT_CONVERSION: - case OBJECT_DOMAIN: - case OBJECT_EXTENSION: - case OBJECT_FDW: - case OBJECT_FOREIGN_SERVER: - case OBJECT_FOREIGN_TABLE: - case OBJECT_FUNCTION: - case OBJECT_INDEX: - case OBJECT_LANGUAGE: - case OBJECT_LARGEOBJECT: - case OBJECT_MATVIEW: - case OBJECT_OPCLASS: - case OBJECT_OPERATOR: - case OBJECT_OPFAMILY: - case OBJECT_RULE: - case OBJECT_SCHEMA: - case OBJECT_SEQUENCE: - case OBJECT_TABLE: - case OBJECT_TRIGGER: - case OBJECT_TSCONFIGURATION: - case OBJECT_TSDICTIONARY: - case OBJECT_TSPARSER: - case OBJECT_TSTEMPLATE: - case OBJECT_TYPE: - case OBJECT_VIEW: - return true; - } - return true; -} - -/* - * Do event triggers support this object class? - */ -bool -EventTriggerSupportsObjectClass(ObjectClass objclass) -{ - switch (objclass) - { - case OCLASS_DATABASE: - case OCLASS_TBLSPACE: - case OCLASS_ROLE: - /* no support for global objects */ - return false; - case OCLASS_CLASS: - case OCLASS_PROC: - case OCLASS_TYPE: - case OCLASS_CAST: - case OCLASS_COLLATION: - case OCLASS_CONSTRAINT: - case OCLASS_CONVERSION: - case OCLASS_DEFAULT: - case OCLASS_LANGUAGE: - case OCLASS_LARGEOBJECT: - case OCLASS_OPERATOR: - case OCLASS_OPCLASS: - case OCLASS_OPFAMILY: - case OCLASS_AMOP: - case OCLASS_AMPROC: - case OCLASS_REWRITE: - case OCLASS_TRIGGER: - case OCLASS_SCHEMA: - case OCLASS_TSPARSER: - case OCLASS_TSDICT: - case OCLASS_TSTEMPLATE: - case OCLASS_TSCONFIG: - case OCLASS_FDW: - case OCLASS_FOREIGN_SERVER: - case OCLASS_USER_MAPPING: - case OCLASS_DEFACL: - case OCLASS_EXTENSION: - return true; - } - - return true; -} - -bool -EventTriggerSupportsGrantObjectType(GrantObjectType objtype) -{ - switch (objtype) - { - case ACL_OBJECT_DATABASE: - case ACL_OBJECT_TABLESPACE: - /* no support for global objects */ - return false; - - case ACL_OBJECT_COLUMN: - case ACL_OBJECT_RELATION: - case ACL_OBJECT_SEQUENCE: - case ACL_OBJECT_DOMAIN: - case ACL_OBJECT_FDW: - case ACL_OBJECT_FOREIGN_SERVER: - case ACL_OBJECT_FUNCTION: - case ACL_OBJECT_LANGUAGE: - case ACL_OBJECT_LARGEOBJECT: - case ACL_OBJECT_NAMESPACE: - case ACL_OBJECT_TYPE: - return true; - default: - Assert(false); - return true; - } -} - -/* - * Prepare event trigger state for a new complete query to run, if necessary; - * returns whether this was done. If it was, EventTriggerEndCompleteQuery must - * be called when the query is done, regardless of whether it succeeds or fails - * -- so use of a PG_TRY block is mandatory. - */ -bool -EventTriggerBeginCompleteQuery(void) -{ - EventTriggerQueryState *state; - MemoryContext cxt; - - /* - * Currently, sql_drop, table_rewrite, ddl_command_end events are the only - * reason to have event trigger state at all; so if there are none, don't - * install one. - */ - if (!trackDroppedObjectsNeeded()) - return false; - - cxt = AllocSetContextCreate(TopMemoryContext, - "event trigger state", - ALLOCSET_DEFAULT_SIZES); - state =(EventTriggerQueryState*) MemoryContextAlloc(cxt, sizeof(EventTriggerQueryState)); - state->cxt = cxt; - slist_init(&(state->SQLDropList)); - state->in_sql_drop = false; - state->table_rewrite_oid = InvalidOid; - - state->commandCollectionInhibited = currentEventTriggerState ? - currentEventTriggerState->commandCollectionInhibited : false; - state->currentCommand = NULL; - state->commandList = NIL; - state->previous = currentEventTriggerState; - currentEventTriggerState = state; - - return true; -} - -/* - * Query completed (or errored out) -- clean up local state, return to previous - * one. - * - * Note: it's an error to call this routine if EventTriggerBeginCompleteQuery - * returned false previously. - * - * Note: this might be called in the PG_CATCH block of a failing transaction, - * so be wary of running anything unnecessary. (In particular, it's probably - * unwise to try to allocate memory.) - */ -void -EventTriggerEndCompleteQuery(void) -{ - EventTriggerQueryState *prevstate; - - prevstate = currentEventTriggerState->previous; - - /* this avoids the need for retail pfree of SQLDropList items: */ - MemoryContextDelete(currentEventTriggerState->cxt); - - currentEventTriggerState = prevstate; -} - -/* - * Do we need to keep close track of objects being dropped? - * - * This is useful because there is a cost to running with them enabled. - */ -bool -trackDroppedObjectsNeeded(void) -{ - /* - * true if any sql_drop, table_rewrite, ddl_command_end event trigger - * exists - */ - return list_length(EventCacheLookup(EVT_SQLDrop)) > 0 || - list_length(EventCacheLookup(EVT_TableRewrite)) > 0 || - list_length(EventCacheLookup(EVT_DDLCommandEnd)) > 0; -} - -/* - * Support for dropped objects information on event trigger functions. - * - * We keep the list of objects dropped by the current command in current - * state's SQLDropList (comprising SQLDropObject items). Each time a new - * command is to start, a clean EventTriggerQueryState is created; commands - * that drop objects do the dependency.c dance to drop objects, which - * populates the current state's SQLDropList; when the event triggers are - * invoked they can consume the list via pg_event_trigger_dropped_objects(). - * When the command finishes, the EventTriggerQueryState is cleared, and - * the one from the previous command is restored (when no command is in - * execution, the current state is NULL). - * - * All this lets us support the case that an event trigger function drops - * objects "reentrantly". - */ - -/* - * Register one object as being dropped by the current command. - */ -void -EventTriggerSQLDropAddObject(const ObjectAddress *object, bool original, bool normal) -{ - SQLDropObject *obj; - MemoryContext oldcxt; - - if (!currentEventTriggerState) - return; - - Assert(EventTriggerSupportsObjectClass(getObjectClass(object))); - - /* don't report temp schemas except my own */ - if (object->classId == NamespaceRelationId && - (isAnyTempNamespace(object->objectId) && - !isTempNamespace(object->objectId))) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - obj =(SQLDropObject*) palloc0(sizeof(SQLDropObject)); - obj->address = *object; - obj->original = original; - obj->normal = normal; - - /* - * Obtain schema names from the object's catalog tuple, if one exists; - * this lets us skip objects in temp schemas. We trust that - * ObjectProperty contains all object classes that can be - * schema-qualified. - */ - if (is_objectclass_supported(object->classId)) - { - Relation catalog; - HeapTuple tuple; - - catalog = heap_open(obj->address.classId, AccessShareLock); - tuple = get_catalog_object_by_oid(catalog, obj->address.objectId); - - if (tuple) - { - AttrNumber attnum; - Datum datum; - bool isnull; - - attnum = get_object_attnum_namespace(obj->address.classId); - if (attnum != InvalidAttrNumber) - { - datum = heap_getattr(tuple, attnum, - RelationGetDescr(catalog), &isnull); - if (!isnull) - { - Oid namespaceId; - - namespaceId = DatumGetObjectId(datum); - /* temp objects are only reported if they are my own */ - if (isTempNamespace(namespaceId)) - { - obj->schemaname = "pg_temp"; - obj->istemp = true; - } - else if (isAnyTempNamespace(namespaceId)) - { - pfree(obj); - heap_close(catalog, AccessShareLock); - MemoryContextSwitchTo(oldcxt); - return; - } - else - { - obj->schemaname = get_namespace_name(namespaceId); - obj->istemp = false; - } - } - } - - if (get_object_namensp_unique(obj->address.classId) && - obj->address.objectSubId == 0) - { - attnum = get_object_attnum_name(obj->address.classId); - if (attnum != InvalidAttrNumber) - { - datum = heap_getattr(tuple, attnum, - RelationGetDescr(catalog), &isnull); - if (!isnull) - obj->objname = pstrdup(NameStr(*DatumGetName(datum))); - } - } - } - - heap_close(catalog, AccessShareLock); - } - else - { - if (object->classId == NamespaceRelationId && - isTempNamespace(object->objectId)) - obj->istemp = true; - } - - /* object identity, objname and objargs */ - obj->objidentity = - getObjectIdentityParts(&obj->address, &obj->addrnames, &obj->addrargs); - - /* object type */ - obj->objecttype = getObjectTypeDescription(&obj->address); - - slist_push_head(&(currentEventTriggerState->SQLDropList), &obj->next); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * pg_event_trigger_dropped_objects - * - * Make the list of dropped objects available to the user function run by the - * Event Trigger. - */ -Datum -pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS) -{ - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - slist_iter iter; - - /* - * Protect this function from being called out of context - */ - if (!currentEventTriggerState || - !currentEventTriggerState->in_sql_drop) - ereport(ERROR, - (errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - errmsg("%s can only be called in a sql_drop event trigger function", - "pg_event_trigger_dropped_objects()"))); - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); - - slist_foreach(iter, &(currentEventTriggerState->SQLDropList)) - { - SQLDropObject *obj; - int i = 0; - Datum values[12]; - bool nulls[12]; - - obj = slist_container(SQLDropObject, next, iter.cur); - - MemSet(values, 0, sizeof(values)); - MemSet(nulls, 0, sizeof(nulls)); - - /* classid */ - values[i++] = ObjectIdGetDatum(obj->address.classId); - - /* objid */ - values[i++] = ObjectIdGetDatum(obj->address.objectId); - - /* objsubid */ - values[i++] = Int32GetDatum(obj->address.objectSubId); - - /* original */ - values[i++] = BoolGetDatum(obj->original); - - /* normal */ - values[i++] = BoolGetDatum(obj->normal); - - /* is_temporary */ - values[i++] = BoolGetDatum(obj->istemp); - - /* object_type */ - values[i++] = CStringGetTextDatum(obj->objecttype); - - /* schema_name */ - if (obj->schemaname) - values[i++] = CStringGetTextDatum(obj->schemaname); - else - nulls[i++] = true; - - /* object_name */ - if (obj->objname) - values[i++] = CStringGetTextDatum(obj->objname); - else - nulls[i++] = true; - - /* object_identity */ - if (obj->objidentity) - values[i++] = CStringGetTextDatum(obj->objidentity); - else - nulls[i++] = true; - - /* address_names and address_args */ - if (obj->addrnames) - { - values[i++] = PointerGetDatum(strlist_to_textarray(obj->addrnames)); - - if (obj->addrargs) - values[i++] = PointerGetDatum(strlist_to_textarray(obj->addrargs)); - else - values[i++] = PointerGetDatum(construct_empty_array(TEXTOID)); - } - else - { - nulls[i++] = true; - nulls[i++] = true; - } - - tuplestore_putvalues(tupstore, tupdesc, values, nulls); - } - - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - - return (Datum) 0; -} - -/* - * pg_event_trigger_table_rewrite_oid - * - * Make the Oid of the table going to be rewritten available to the user - * function run by the Event Trigger. - */ -Datum -pg_event_trigger_table_rewrite_oid(PG_FUNCTION_ARGS) -{ - /* - * Protect this function from being called out of context - */ - if (!currentEventTriggerState || - currentEventTriggerState->table_rewrite_oid == InvalidOid) - ereport(ERROR, - (errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - errmsg("%s can only be called in a table_rewrite event trigger function", - "pg_event_trigger_table_rewrite_oid()"))); - - PG_RETURN_OID(currentEventTriggerState->table_rewrite_oid); -} - -/* - * pg_event_trigger_table_rewrite_reason - * - * Make the rewrite reason available to the user. - */ -Datum -pg_event_trigger_table_rewrite_reason(PG_FUNCTION_ARGS) -{ - /* - * Protect this function from being called out of context - */ - if (!currentEventTriggerState || - currentEventTriggerState->table_rewrite_reason == 0) - ereport(ERROR, - (errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - errmsg("%s can only be called in a table_rewrite event trigger function", - "pg_event_trigger_table_rewrite_reason()"))); - - PG_RETURN_INT32(currentEventTriggerState->table_rewrite_reason); -} - -/*------------------------------------------------------------------------- - * Support for DDL command deparsing - * - * The routines below enable an event trigger function to obtain a list of - * DDL commands as they are executed. There are three main pieces to this - * feature: - * - * 1) Within ProcessUtilitySlow, or some sub-routine thereof, each DDL command - * adds a struct CollectedCommand representation of itself to the command list, - * using the routines below. - * - * 2) Some time after that, ddl_command_end fires and the command list is made - * available to the event trigger function via pg_event_trigger_ddl_commands(); - * the complete command details are exposed as a column of type pg_ddl_command. - * - * 3) An extension can install a function capable of taking a value of type - * pg_ddl_command and transform it into some external, user-visible and/or - * -modifiable representation. - *------------------------------------------------------------------------- - */ - -/* - * Inhibit DDL command collection. - */ -void -EventTriggerInhibitCommandCollection(void) -{ - if (!currentEventTriggerState) - return; - - currentEventTriggerState->commandCollectionInhibited = true; -} - -/* - * Re-establish DDL command collection. - */ -void -EventTriggerUndoInhibitCommandCollection(void) -{ - if (!currentEventTriggerState) - return; - - currentEventTriggerState->commandCollectionInhibited = false; -} - -/* - * EventTriggerCollectSimpleCommand - * Save data about a simple DDL command that was just executed - * - * address identifies the object being operated on. secondaryObject is an - * object address that was related in some way to the executed command; its - * meaning is command-specific. - * - * For instance, for an ALTER obj SET SCHEMA command, objtype is the type of - * object being moved, objectId is its OID, and secondaryOid is the OID of the - * old schema. (The destination schema OID can be obtained by catalog lookup - * of the object.) - */ -void -EventTriggerCollectSimpleCommand(ObjectAddress address, - ObjectAddress secondaryObject, - Node *parsetree) -{ - MemoryContext oldcxt; - CollectedCommand *command; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - command =(CollectedCommand*) palloc(sizeof(CollectedCommand)); - - command->type = SCT_Simple; - command->in_extension = creating_extension; - - command->d.simple.address = address; - command->d.simple.secondaryObject = secondaryObject; - command->parsetree =(Node*) copyObject(parsetree); - - currentEventTriggerState->commandList = lappend(currentEventTriggerState->commandList, - command); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * EventTriggerAlterTableStart - * Prepare to receive data on an ALTER TABLE command about to be executed - * - * Note we don't collect the command immediately; instead we keep it in - * currentCommand, and only when we're done processing the subcommands we will - * add it to the command list. - * - * XXX -- this API isn't considering the possibility of an ALTER TABLE command - * being called reentrantly by an event trigger function. Do we need stackable - * commands at this level? Perhaps at least we should detect the condition and - * raise an error. - */ -void -EventTriggerAlterTableStart(Node *parsetree) -{ - MemoryContext oldcxt; - CollectedCommand *command; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - command =(CollectedCommand*) palloc(sizeof(CollectedCommand)); - - command->type = SCT_AlterTable; - command->in_extension = creating_extension; - - command->d.alterTable.classId = RelationRelationId; - command->d.alterTable.objectId = InvalidOid; - command->d.alterTable.subcmds = NIL; - command->parsetree =(Node*) copyObject(parsetree); - - currentEventTriggerState->currentCommand = command; - - MemoryContextSwitchTo(oldcxt); -} - -/* - * Remember the OID of the object being affected by an ALTER TABLE. - * - * This is needed because in some cases we don't know the OID until later. - */ -void -EventTriggerAlterTableRelid(Oid objectId) -{ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - currentEventTriggerState->currentCommand->d.alterTable.objectId = objectId; -} - -/* - * EventTriggerCollectAlterTableSubcmd - * Save data about a single part of an ALTER TABLE. - * - * Several different commands go through this path, but apart from ALTER TABLE - * itself, they are all concerned with AlterTableCmd nodes that are generated - * internally, so that's all that this code needs to handle at the moment. - */ -void -EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) -{ - MemoryContext oldcxt; - CollectedATSubcmd *newsub; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - Assert(IsA(subcmd, AlterTableCmd)); - Assert(OidIsValid(currentEventTriggerState->currentCommand->d.alterTable.objectId)); - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - newsub =(CollectedATSubcmd*) palloc(sizeof(CollectedATSubcmd)); - newsub->address = address; - newsub->parsetree =(Node*) copyObject(subcmd); - - currentEventTriggerState->currentCommand->d.alterTable.subcmds = - lappend(currentEventTriggerState->currentCommand->d.alterTable.subcmds, newsub); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * EventTriggerAlterTableEnd - * Finish up saving an ALTER TABLE command, and add it to command list. - * - * FIXME this API isn't considering the possibility that an xact/subxact is - * aborted partway through. Probably it's best to add an - * AtEOSubXact_EventTriggers() to fix this. - */ -void -EventTriggerAlterTableEnd(void) -{ - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - /* If no subcommands, don't collect */ - if (list_length(currentEventTriggerState->currentCommand->d.alterTable.subcmds) != 0) - { - currentEventTriggerState->commandList = - lappend(currentEventTriggerState->commandList, - currentEventTriggerState->currentCommand); - } - else - pfree(currentEventTriggerState->currentCommand); - - currentEventTriggerState->currentCommand = NULL; -} - -/* - * EventTriggerCollectGrant - * Save data about a GRANT/REVOKE command being executed - * - * This function creates a copy of the InternalGrant, as the original might - * not have the right lifetime. - */ -void -EventTriggerCollectGrant(InternalGrant *istmt) -{ - MemoryContext oldcxt; - CollectedCommand *command; - InternalGrant *icopy; - ListCell *cell; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - /* - * This is tedious, but necessary. - */ - icopy =(InternalGrant*) palloc(sizeof(InternalGrant)); - memcpy(icopy, istmt, sizeof(InternalGrant)); - icopy->objects = list_copy(istmt->objects); - icopy->grantees = list_copy(istmt->grantees); - icopy->col_privs = NIL; - foreach(cell, istmt->col_privs) - icopy->col_privs = lappend(icopy->col_privs, copyObject(lfirst(cell))); - - /* Now collect it, using the copied InternalGrant */ - command =(CollectedCommand*) palloc(sizeof(CollectedCommand)); - command->type = SCT_Grant; - command->in_extension = creating_extension; - command->d.grant.istmt = icopy; - command->parsetree = NULL; - - currentEventTriggerState->commandList = - lappend(currentEventTriggerState->commandList, command); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * EventTriggerCollectAlterOpFam - * Save data about an ALTER OPERATOR FAMILY ADD/DROP command being - * executed - */ -void -EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid, - List *operators, List *procedures) -{ - MemoryContext oldcxt; - CollectedCommand *command; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - command =(CollectedCommand*) palloc(sizeof(CollectedCommand)); - command->type = SCT_AlterOpFamily; - command->in_extension = creating_extension; - ObjectAddressSet(command->d.opfam.address, - OperatorFamilyRelationId, opfamoid); - command->d.opfam.operators = operators; - command->d.opfam.procedures = procedures; - command->parsetree =(Node*) copyObject(stmt); - - currentEventTriggerState->commandList = - lappend(currentEventTriggerState->commandList, command); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * EventTriggerCollectCreateOpClass - * Save data about a CREATE OPERATOR CLASS command being executed - */ -void -EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, Oid opcoid, - List *operators, List *procedures) -{ - MemoryContext oldcxt; - CollectedCommand *command; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - command =(CollectedCommand*) palloc0(sizeof(CollectedCommand)); - command->type = SCT_CreateOpClass; - command->in_extension = creating_extension; - ObjectAddressSet(command->d.createopc.address, - OperatorClassRelationId, opcoid); - command->d.createopc.operators = operators; - command->d.createopc.procedures = procedures; - command->parsetree =(Node*) copyObject(stmt); - - currentEventTriggerState->commandList = - lappend(currentEventTriggerState->commandList, command); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * EventTriggerCollectAlterTSConfig - * Save data about an ALTER TEXT SEARCH CONFIGURATION command being - * executed - */ -void -EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId, - Oid *dictIds, int ndicts) -{ - MemoryContext oldcxt; - CollectedCommand *command; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - command =(CollectedCommand*) palloc0(sizeof(CollectedCommand)); - command->type = SCT_AlterTSConfig; - command->in_extension = creating_extension; - ObjectAddressSet(command->d.atscfg.address, - TSConfigRelationId, cfgId); - command->d.atscfg.dictIds =(Oid*) palloc(sizeof(Oid) * ndicts); - memcpy(command->d.atscfg.dictIds, dictIds, sizeof(Oid) * ndicts); - command->d.atscfg.ndicts = ndicts; - command->parsetree =(Node*) copyObject(stmt); - - currentEventTriggerState->commandList = - lappend(currentEventTriggerState->commandList, command); - - MemoryContextSwitchTo(oldcxt); -} - -/* - * EventTriggerCollectAlterDefPrivs - * Save data about an ALTER DEFAULT PRIVILEGES command being - * executed - */ -void -EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt) -{ - MemoryContext oldcxt; - CollectedCommand *command; - - /* ignore if event trigger context not set, or collection disabled */ - if (!currentEventTriggerState || - currentEventTriggerState->commandCollectionInhibited) - return; - - oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); - - command =(CollectedCommand*) palloc0(sizeof(CollectedCommand)); - command->type = SCT_AlterDefaultPrivileges; - command->d.defprivs.objtype = stmt->action->objtype; - command->in_extension = creating_extension; - command->parsetree =(Node*) copyObject(stmt); - - currentEventTriggerState->commandList = - lappend(currentEventTriggerState->commandList, command); - MemoryContextSwitchTo(oldcxt); -} - -/* - * In a ddl_command_end event trigger, this function reports the DDL commands - * being run. - */ -Datum -pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) -{ - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - ListCell *lc; - - /* - * Protect this function from being called out of context - */ - if (!currentEventTriggerState) - ereport(ERROR, - (errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - errmsg("%s can only be called in an event trigger function", - "pg_event_trigger_ddl_commands()"))); - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - - /* Build tuplestore to hold the result rows */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupdesc; - - MemoryContextSwitchTo(oldcontext); - - foreach(lc, currentEventTriggerState->commandList) - { - CollectedCommand *cmd =(CollectedCommand*) lfirst(lc); - Datum values[9]; - bool nulls[9]; - ObjectAddress addr; - int i = 0; - - /* - * For IF NOT EXISTS commands that attempt to create an existing - * object, the returned OID is Invalid. Don't return anything. - * - * One might think that a viable alternative would be to look up the - * Oid of the existing object and run the deparse with that. But - * since the parse tree might be different from the one that created - * the object in the first place, we might not end up in a consistent - * state anyway. - */ - if (cmd->type == SCT_Simple && - !OidIsValid(cmd->d.simple.address.objectId)) - continue; - - MemSet(nulls, 0, sizeof(nulls)); - - switch (cmd->type) - { - case SCT_Simple: - case SCT_AlterTable: - case SCT_AlterOpFamily: - case SCT_CreateOpClass: - case SCT_AlterTSConfig: - { - char *identity; - char *type; - char *schema = NULL; - - if (cmd->type == SCT_Simple) - addr = cmd->d.simple.address; - else if (cmd->type == SCT_AlterTable) - ObjectAddressSet(addr, - cmd->d.alterTable.classId, - cmd->d.alterTable.objectId); - else if (cmd->type == SCT_AlterOpFamily) - addr = cmd->d.opfam.address; - else if (cmd->type == SCT_CreateOpClass) - addr = cmd->d.createopc.address; - else if (cmd->type == SCT_AlterTSConfig) - addr = cmd->d.atscfg.address; - - type = getObjectTypeDescription(&addr); - identity = getObjectIdentity(&addr); - - /* - * Obtain schema name, if any ("pg_temp" if a temp - * object). If the object class is not in the supported - * list here, we assume it's a schema-less object type, - * and thus "schema" remains set to NULL. - */ - if (is_objectclass_supported(addr.classId)) - { - AttrNumber nspAttnum; - - nspAttnum = get_object_attnum_namespace(addr.classId); - if (nspAttnum != InvalidAttrNumber) - { - Relation catalog; - HeapTuple objtup; - Oid schema_oid; - bool isnull; - - catalog = heap_open(addr.classId, AccessShareLock); - objtup = get_catalog_object_by_oid(catalog, - addr.objectId); - if (!HeapTupleIsValid(objtup)) - elog(ERROR, "cache lookup failed for object %u/%u", - addr.classId, addr.objectId); - schema_oid = - heap_getattr(objtup, nspAttnum, - RelationGetDescr(catalog), &isnull); - if (isnull) - elog(ERROR, - "invalid null namespace in object %u/%u/%d", - addr.classId, addr.objectId, addr.objectSubId); - /* XXX not quite get_namespace_name_or_temp */ - if (isAnyTempNamespace(schema_oid)) - schema = pstrdup("pg_temp"); - else - schema = get_namespace_name(schema_oid); - - heap_close(catalog, AccessShareLock); - } - } - - /* classid */ - values[i++] = ObjectIdGetDatum(addr.classId); - /* objid */ - values[i++] = ObjectIdGetDatum(addr.objectId); - /* objsubid */ - values[i++] = Int32GetDatum(addr.objectSubId); - /* command tag */ - values[i++] = CStringGetTextDatum(CreateCommandTag(cmd->parsetree)); - /* object_type */ - values[i++] = CStringGetTextDatum(type); - /* schema */ - if (schema == NULL) - nulls[i++] = true; - else - values[i++] = CStringGetTextDatum(schema); - /* identity */ - values[i++] = CStringGetTextDatum(identity); - /* in_extension */ - values[i++] = BoolGetDatum(cmd->in_extension); - /* command */ - values[i++] = PointerGetDatum(cmd); - } - break; - - case SCT_AlterDefaultPrivileges: - /* classid */ - nulls[i++] = true; - /* objid */ - nulls[i++] = true; - /* objsubid */ - nulls[i++] = true; - /* command tag */ - values[i++] = CStringGetTextDatum(CreateCommandTag(cmd->parsetree)); - /* object_type */ - values[i++] = CStringGetTextDatum(stringify_adefprivs_objtype( - cmd->d.defprivs.objtype)); - /* schema */ - nulls[i++] = true; - /* identity */ - nulls[i++] = true; - /* in_extension */ - values[i++] = BoolGetDatum(cmd->in_extension); - /* command */ - values[i++] = PointerGetDatum(cmd); - break; - - case SCT_Grant: - /* classid */ - nulls[i++] = true; - /* objid */ - nulls[i++] = true; - /* objsubid */ - nulls[i++] = true; - /* command tag */ - values[i++] = CStringGetTextDatum(cmd->d.grant.istmt->is_grant ? - "GRANT" : "REVOKE"); - /* object_type */ - values[i++] = CStringGetTextDatum(stringify_grantobjtype( - cmd->d.grant.istmt->objtype)); - /* schema */ - nulls[i++] = true; - /* identity */ - nulls[i++] = true; - /* in_extension */ - values[i++] = BoolGetDatum(cmd->in_extension); - /* command */ - values[i++] = PointerGetDatum(cmd); - break; - } - - tuplestore_putvalues(tupstore, tupdesc, values, nulls); - } - - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - - PG_RETURN_VOID(); -} - -/* - * Return the GrantObjectType as a string, as it would appear in GRANT and - * REVOKE commands. - */ -static const char * -stringify_grantobjtype(GrantObjectType objtype) -{ - switch (objtype) - { - case ACL_OBJECT_COLUMN: - return "COLUMN"; - case ACL_OBJECT_RELATION: - return "TABLE"; - case ACL_OBJECT_SEQUENCE: - return "SEQUENCE"; - case ACL_OBJECT_DATABASE: - return "DATABASE"; - case ACL_OBJECT_DOMAIN: - return "DOMAIN"; - case ACL_OBJECT_FDW: - return "FOREIGN DATA WRAPPER"; - case ACL_OBJECT_FOREIGN_SERVER: - return "FOREIGN SERVER"; - case ACL_OBJECT_FUNCTION: - return "FUNCTION"; - case ACL_OBJECT_LANGUAGE: - return "LANGUAGE"; - case ACL_OBJECT_LARGEOBJECT: - return "LARGE OBJECT"; - case ACL_OBJECT_NAMESPACE: - return "SCHEMA"; - case ACL_OBJECT_TABLESPACE: - return "TABLESPACE"; - case ACL_OBJECT_TYPE: - return "TYPE"; - default: - elog(ERROR, "unrecognized type %d", objtype); - return "???"; /* keep compiler quiet */ - } -} - -/* - * Return the GrantObjectType as a string; as above, but use the spelling - * in ALTER DEFAULT PRIVILEGES commands instead. - */ -static const char * -stringify_adefprivs_objtype(GrantObjectType objtype) -{ - switch (objtype) - { - case ACL_OBJECT_RELATION: - return "TABLES"; - break; - case ACL_OBJECT_FUNCTION: - return "FUNCTIONS"; - break; - case ACL_OBJECT_SEQUENCE: - return "SEQUENCES"; - break; - case ACL_OBJECT_TYPE: - return "TYPES"; - break; - default: - elog(ERROR, "unrecognized type %d", objtype); - return "???"; /* keep compiler quiet */ - } -} diff --git a/contrib/timescaledb/src/tsdb_event_trigger.h b/contrib/timescaledb/src/tsdb_event_trigger.h deleted file mode 100644 index 0ef8be331..000000000 --- a/contrib/timescaledb/src/tsdb_event_trigger.h +++ /dev/null @@ -1,192 +0,0 @@ -/*------------------------------------------------------------------------- - * - * event_trigger.h - * Declarations for command trigger handling. - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/commands/event_trigger.h - * - *------------------------------------------------------------------------- - */ -#ifndef EVENT_TRIGGER_H -#define EVENT_TRIGGER_H - -#include "catalog/dependency.h" -#include "catalog/objectaddress.h" -#include "nodes/parsenodes.h" -#include "tsdb.h" - - -typedef struct EventTriggerData -{ - NodeTag type; - const char *event; /* event name */ - Node *parsetree; /* parse tree */ - const char *tag; /* command tag */ -} EventTriggerData; - -typedef struct AlterEventTrigStmt -{ - NodeTag type; - char *trigname; /* TRIGGER's name */ - char tgenabled; /* trigger's firing configuration WRT - * session_replication_role */ -} AlterEventTrigStmt; - - -typedef struct CreateEventTrigStmt -{ - NodeTag type; - char *trigname; /* TRIGGER's name */ - char *eventname; /* event's identifier */ - List *whenclause; /* list of DefElems indicating filtering */ - List *funcname; /* qual. name of function to call */ -} CreateEventTrigStmt; - -typedef struct InternalGrant -{ - bool is_grant; - GrantObjectType objtype; - List *objects; - bool all_privs; - AclMode privileges; - List *col_privs; - List *grantees; - bool grant_option; - DropBehavior behavior; -} InternalGrant; - -typedef enum CollectedCommandType -{ - SCT_Simple, - SCT_AlterTable, - SCT_Grant, - SCT_AlterOpFamily, - SCT_AlterDefaultPrivileges, - SCT_CreateOpClass, - SCT_AlterTSConfig -} CollectedCommandType; - -typedef struct CollectedCommand -{ - CollectedCommandType type; - bool in_extension; - Node *parsetree; - - CollectedCommand * parent; - union - { - /* most commands */ - struct - { - ObjectAddress address; - ObjectAddress secondaryObject; - } simple; - - /* ALTER TABLE, and internal uses thereof */ - struct - { - Oid objectId; - Oid classId; - List *subcmds; - } alterTable; - - /* GRANT / REVOKE */ - struct - { - InternalGrant *istmt; - } grant; - - /* ALTER OPERATOR FAMILY */ - struct - { - ObjectAddress address; - List *operators; - List *procedures; - } opfam; - - /* CREATE OPERATOR CLASS */ - struct - { - ObjectAddress address; - List *operators; - List *procedures; - } createopc; - - /* ALTER TEXT SEARCH CONFIGURATION ADD/ALTER/DROP MAPPING */ - struct - { - ObjectAddress address; - Oid *dictIds; - int ndicts; - } atscfg; - - /* ALTER DEFAULT PRIVILEGES */ - struct - { - GrantObjectType objtype; - } defprivs; - } d; -} CollectedCommand; - -#define AT_REWRITE_ALTER_PERSISTENCE 0x01 -#define AT_REWRITE_DEFAULT_VAL 0x02 -#define AT_REWRITE_COLUMN_REWRITE 0x04 -#define AT_REWRITE_ALTER_OID 0x08 - -/* - * EventTriggerData is the node type that is passed as fmgr "context" info - * when a function is called by the event trigger manager. - */ -#define CALLED_AS_EVENT_TRIGGER(fcinfo) \ - ((fcinfo)->context != NULL && IsA((fcinfo)->context, EventTriggerData)) - -extern Oid CreateEventTrigger(CreateEventTrigStmt *stmt); -extern void RemoveEventTriggerById(Oid ctrigOid); -extern Oid get_event_trigger_oid(const char *trigname, bool missing_ok); - -extern Oid AlterEventTrigger(AlterEventTrigStmt *stmt); -extern ObjectAddress AlterEventTriggerOwner(const char *name, Oid newOwnerId); -extern void AlterEventTriggerOwner_oid(Oid, Oid newOwnerId); - -extern bool EventTriggerSupportsObjectType(ObjectType obtype); -extern bool EventTriggerSupportsObjectClass(ObjectClass objclass); -extern bool EventTriggerSupportsGrantObjectType(GrantObjectType objtype); -extern void EventTriggerDDLCommandStart(Node *parsetree); -extern void EventTriggerDDLCommandEnd(Node *parsetree); -extern void EventTriggerSQLDrop(Node *parsetree); -extern void EventTriggerTableRewrite(Node *parsetree, Oid tableOid, int reason); - -extern bool EventTriggerBeginCompleteQuery(void); -extern void EventTriggerEndCompleteQuery(void); -extern bool trackDroppedObjectsNeeded(void); -extern void EventTriggerSQLDropAddObject(const ObjectAddress *object, - bool original, bool normal); - -extern void EventTriggerInhibitCommandCollection(void); -extern void EventTriggerUndoInhibitCommandCollection(void); - -extern void EventTriggerCollectSimpleCommand(ObjectAddress address, - ObjectAddress secondaryObject, - Node *parsetree); - -extern void EventTriggerAlterTableStart(Node *parsetree); -extern void EventTriggerAlterTableRelid(Oid objectId); -extern void EventTriggerCollectAlterTableSubcmd(Node *subcmd, - ObjectAddress address); -extern void EventTriggerAlterTableEnd(void); - -extern void EventTriggerCollectGrant(InternalGrant *istmt); -extern void EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, - Oid opfamoid, List *operators, - List *procedures); -extern void EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, - Oid opcoid, List *operators, - List *procedures); -extern void EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, - Oid cfgId, Oid *dictIds, int ndicts); -extern void EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt); - -#endif /* EVENT_TRIGGER_H */ diff --git a/contrib/timescaledb/src/tsdb_extension.cpp b/contrib/timescaledb/src/tsdb_extension.cpp index 1ba4f6aad..c4ba1ac98 100644 --- a/contrib/timescaledb/src/tsdb_extension.cpp +++ b/contrib/timescaledb/src/tsdb_extension.cpp @@ -1,3 +1,4 @@ +#include "postgres.h" #include "compat.h" #include "commands/cluster.h" @@ -239,16 +240,7 @@ pg_event_trigger_ownercheck(Oid et_oid, Oid roleid) } -List * -EventCacheLookup(EventTriggerEvent event) -{ - EventTriggerCacheEntry *entry; - if (EventTriggerCacheState != ETCS_VALID) - BuildEventTriggerCache(); - entry =(EventTriggerCacheEntry *) hash_search(EventTriggerCache, &event, HASH_FIND, NULL); - return entry != NULL ? entry->triggerlist : NULL; -} bool has_bypassrls_privilege(Oid roleid) diff --git a/contrib/timescaledb/src/tsdb_get_obj.cpp b/contrib/timescaledb/src/tsdb_get_obj.cpp deleted file mode 100644 index 117f4632e..000000000 --- a/contrib/timescaledb/src/tsdb_get_obj.cpp +++ /dev/null @@ -1,2553 +0,0 @@ -/*------------------------------------------------------------------------- - * - * objectaddress.c - * functions for working with ObjectAddresses - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/catalog/objectaddress.c - * - *------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "access/htup.h" -#include "access/sysattr.h" -#include "catalog/catalog.h" -#include "catalog/indexing.h" -#include "catalog/objectaddress.h" -#include "catalog/pg_am.h" -#include "catalog/pg_amop.h" -#include "catalog/pg_amproc.h" -#include "catalog/pg_attrdef.h" -#include "catalog/pg_authid.h" -#include "catalog/pg_cast.h" -#include "catalog/pg_default_acl.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_conversion.h" -#include "catalog/pg_database.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_foreign_server.h" -#include "catalog/pg_language.h" -#include "catalog/pg_largeobject.h" -#include "catalog/pg_largeobject_metadata.h" -#include "catalog/pg_namespace.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_opfamily.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_rewrite.h" -#include "catalog/pg_tablespace.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_ts_config.h" -#include "catalog/pg_ts_dict.h" -#include "catalog/pg_ts_parser.h" -#include "catalog/pg_ts_template.h" -#include "catalog/pg_type.h" -#include "catalog/pg_user_mapping.h" -#include "commands/dbcommands.h" -#include "commands/defrem.h" -#include "commands/extension.h" -#include "commands/proclang.h" -#include "commands/tablespace.h" -#include "commands/trigger.h" -#include "foreign/foreign.h" -#include "funcapi.h" -#include "libpq/be-fsstubs.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "parser/parse_func.h" -#include "parser/parse_oper.h" -#include "parser/parse_type.h" -#include "rewrite/rewriteSupport.h" -#include "storage/lmgr.h" -#include "storage/sinval.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/syscache.h" - - -#include "compat.h" -#include "tsdb_get_obj.h" - - -#define TransformOidIndexId 3574 -#define PolicyOidIndexId 3257 -#define EventTriggerOidIndexId 3468 - -#define Anum_pg_policy_polname 1 -/* - * ObjectProperty - * - * This array provides a common part of system object structure; to help - * consolidate routines to handle various kind of object classes. - */ -typedef struct -{ - Oid class_oid; /* oid of catalog */ - Oid oid_index_oid; /* oid of index on system oid column */ - int oid_catcache_id; /* id of catcache on system oid column */ - int name_catcache_id; /* id of catcache on (name,namespace), - * or (name) if the object does not - * live in a namespace */ - AttrNumber attnum_name; /* attnum of name field */ - AttrNumber attnum_namespace; /* attnum of namespace field */ - AttrNumber attnum_owner; /* attnum of owner field */ - AttrNumber attnum_acl; /* attnum of acl field */ - AclObjectKind acl_kind; /* ACL_KIND_* of this object type */ - bool is_nsp_name_unique; /* can the nsp/name combination (or - * name alone, if there's no - * namespace) be considered a unique - * identifier for an object of this - * class? */ -} ObjectPropertyType; - -static const ObjectPropertyType ObjectProperty[] = -{ - { - AccessMethodRelationId, - AmOidIndexId, - AMOID, - AMNAME, - Anum_pg_am_amname, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - true - }, - { - CastRelationId, - CastOidIndexId, - -1, - -1, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - false - }, - { - CollationRelationId, - CollationOidIndexId, - COLLOID, - -1, /* COLLNAMEENCNSP also takes encoding */ - Anum_pg_collation_collname, - Anum_pg_collation_collnamespace, - Anum_pg_collation_collowner, - InvalidAttrNumber, - ACL_KIND_COLLATION, - true - }, - { - ConstraintRelationId, - ConstraintOidIndexId, - CONSTROID, - -1, - Anum_pg_constraint_conname, - Anum_pg_constraint_connamespace, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - false - }, - { - ConversionRelationId, - ConversionOidIndexId, - CONVOID, - CONNAMENSP, - Anum_pg_conversion_conname, - Anum_pg_conversion_connamespace, - Anum_pg_conversion_conowner, - InvalidAttrNumber, - ACL_KIND_CONVERSION, - true - }, - { - DatabaseRelationId, - DatabaseOidIndexId, - DATABASEOID, - -1, - Anum_pg_database_datname, - InvalidAttrNumber, - Anum_pg_database_datdba, - Anum_pg_database_datacl, - ACL_KIND_DATABASE, - true - }, - { - ExtensionRelationId, - ExtensionOidIndexId, - -1, - -1, - Anum_pg_extension_extname, - InvalidAttrNumber, /* extension doesn't belong to extnamespace */ - Anum_pg_extension_extowner, - InvalidAttrNumber, - ACL_KIND_EXTENSION, - true - }, - { - ForeignDataWrapperRelationId, - ForeignDataWrapperOidIndexId, - FOREIGNDATAWRAPPEROID, - FOREIGNDATAWRAPPERNAME, - Anum_pg_foreign_data_wrapper_fdwname, - InvalidAttrNumber, - Anum_pg_foreign_data_wrapper_fdwowner, - Anum_pg_foreign_data_wrapper_fdwacl, - ACL_KIND_FDW, - true - }, - { - ForeignServerRelationId, - ForeignServerOidIndexId, - FOREIGNSERVEROID, - FOREIGNSERVERNAME, - Anum_pg_foreign_server_srvname, - InvalidAttrNumber, - Anum_pg_foreign_server_srvowner, - Anum_pg_foreign_server_srvacl, - ACL_KIND_FOREIGN_SERVER, - true - }, - { - ProcedureRelationId, - ProcedureOidIndexId, - PROCOID, - -1, /* PROCNAMEARGSNSP also takes argument types */ - Anum_pg_proc_proname, - Anum_pg_proc_pronamespace, - Anum_pg_proc_proowner, - Anum_pg_proc_proacl, - ACL_KIND_PROC, - false - }, - { - LanguageRelationId, - LanguageOidIndexId, - LANGOID, - LANGNAME, - Anum_pg_language_lanname, - InvalidAttrNumber, - Anum_pg_language_lanowner, - Anum_pg_language_lanacl, - ACL_KIND_LANGUAGE, - true - }, - { - LargeObjectMetadataRelationId, - LargeObjectMetadataOidIndexId, - -1, - -1, - InvalidAttrNumber, - InvalidAttrNumber, - Anum_pg_largeobject_metadata_lomowner, - Anum_pg_largeobject_metadata_lomacl, - ACL_KIND_LARGEOBJECT, - false - }, - { - OperatorClassRelationId, - OpclassOidIndexId, - CLAOID, - -1, /* CLAAMNAMENSP also takes opcmethod */ - Anum_pg_opclass_opcname, - Anum_pg_opclass_opcnamespace, - Anum_pg_opclass_opcowner, - InvalidAttrNumber, - ACL_KIND_OPCLASS, - true - }, - { - OperatorRelationId, - OperatorOidIndexId, - OPEROID, - -1, /* OPERNAMENSP also takes left and right type */ - Anum_pg_operator_oprname, - Anum_pg_operator_oprnamespace, - Anum_pg_operator_oprowner, - InvalidAttrNumber, - ACL_KIND_OPER, - false - }, - { - OperatorFamilyRelationId, - OpfamilyOidIndexId, - OPFAMILYOID, - -1, /* OPFAMILYAMNAMENSP also takes opfmethod */ - Anum_pg_opfamily_opfname, - Anum_pg_opfamily_opfnamespace, - Anum_pg_opfamily_opfowner, - InvalidAttrNumber, - ACL_KIND_OPFAMILY, - true - }, - { - AuthIdRelationId, - AuthIdOidIndexId, - AUTHOID, - AUTHNAME, - Anum_pg_authid_rolname, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - true - }, - { - RewriteRelationId, - RewriteOidIndexId, - -1, - -1, - Anum_pg_rewrite_rulename, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - false - }, - { - NamespaceRelationId, - NamespaceOidIndexId, - NAMESPACEOID, - NAMESPACENAME, - Anum_pg_namespace_nspname, - InvalidAttrNumber, - Anum_pg_namespace_nspowner, - Anum_pg_namespace_nspacl, - ACL_KIND_NAMESPACE, - true - }, - { - RelationRelationId, - ClassOidIndexId, - RELOID, - RELNAMENSP, - Anum_pg_class_relname, - Anum_pg_class_relnamespace, - Anum_pg_class_relowner, - Anum_pg_class_relacl, - ACL_KIND_CLASS, - true - }, - { - TableSpaceRelationId, - TablespaceOidIndexId, - TABLESPACEOID, - -1, - Anum_pg_tablespace_spcname, - InvalidAttrNumber, - Anum_pg_tablespace_spcowner, - Anum_pg_tablespace_spcacl, - ACL_KIND_TABLESPACE, - true - }, - { - TransformRelationId, - TransformOidIndexId, - 51, - InvalidAttrNumber - }, - { - TriggerRelationId, - TriggerOidIndexId, - -1, - -1, - Anum_pg_trigger_tgname, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - false - }, - { - PolicyRelationId, - PolicyOidIndexId, - -1, - -1, - Anum_pg_policy_polname, - InvalidAttrNumber, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - false - }, - { - EventTriggerRelationId, - EventTriggerOidIndexId, - EVENTTRIGGEROID, - EVENTTRIGGERNAME, - Anum_pg_event_trigger_evtname, - InvalidAttrNumber, - Anum_pg_event_trigger_evtowner, - InvalidAttrNumber, - (AclObjectKind)19, - true - }, - { - TSConfigRelationId, - TSConfigOidIndexId, - TSCONFIGOID, - TSCONFIGNAMENSP, - Anum_pg_ts_config_cfgname, - Anum_pg_ts_config_cfgnamespace, - Anum_pg_ts_config_cfgowner, - InvalidAttrNumber, - ACL_KIND_TSCONFIGURATION, - true - }, - { - TSDictionaryRelationId, - TSDictionaryOidIndexId, - TSDICTOID, - TSDICTNAMENSP, - Anum_pg_ts_dict_dictname, - Anum_pg_ts_dict_dictnamespace, - Anum_pg_ts_dict_dictowner, - InvalidAttrNumber, - ACL_KIND_TSDICTIONARY, - true - }, - { - TSParserRelationId, - TSParserOidIndexId, - TSPARSEROID, - TSPARSERNAMENSP, - Anum_pg_ts_parser_prsname, - Anum_pg_ts_parser_prsnamespace, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - true - }, - { - TSTemplateRelationId, - TSTemplateOidIndexId, - TSTEMPLATEOID, - TSTEMPLATENAMENSP, - Anum_pg_ts_template_tmplname, - Anum_pg_ts_template_tmplnamespace, - InvalidAttrNumber, - InvalidAttrNumber, - (AclObjectKind)-1, - true, - }, - { - TypeRelationId, - TypeOidIndexId, - TYPEOID, - TYPENAMENSP, - Anum_pg_type_typname, - Anum_pg_type_typnamespace, - Anum_pg_type_typowner, - Anum_pg_type_typacl, - ACL_KIND_TYPE, - true - } -}; - -/* - * This struct maps the string object types as returned by - * getObjectTypeDescription into ObjType enum values. Note that some enum - * values can be obtained by different names, and that some string object types - * do not have corresponding values in the output enum. The user of this map - * must be careful to test for invalid values being returned. - * - * To ease maintenance, this follows the order of getObjectTypeDescription. - */ -static const struct object_type_map -{ - const char *tm_name; - ObjectType tm_type; -} - - ObjectTypeMap[] = -{ - /* OCLASS_CLASS, all kinds of relations */ - { - "table", OBJECT_TABLE - }, - { - "index", OBJECT_INDEX - }, - { - "sequence", OBJECT_SEQUENCE - }, - { - "toast table", (ObjectType)-1 - }, /* unmapped */ - { - "view", OBJECT_VIEW - }, - { - "materialized view", OBJECT_MATVIEW - }, - { - "composite type",(ObjectType) -1 - }, /* unmapped */ - { - "foreign table", OBJECT_FOREIGN_TABLE - }, - { - "table column", OBJECT_COLUMN - }, - { - "index column", (ObjectType)-1 - }, /* unmapped */ - { - "sequence column",(ObjectType) -1 - }, /* unmapped */ - { - "toast table column",(ObjectType) -1 - }, /* unmapped */ - { - "view column", (ObjectType)-1 - }, /* unmapped */ - { - "materialized view column", (ObjectType)-1 - }, /* unmapped */ - { - "composite type column",(ObjectType) -1 - }, /* unmapped */ - { - "foreign table column", OBJECT_COLUMN - }, - /* OCLASS_PROC */ - { - "aggregate", OBJECT_AGGREGATE - }, - { - "function", OBJECT_FUNCTION - }, - /* OCLASS_TYPE */ - { - "type", OBJECT_TYPE - }, - /* OCLASS_CAST */ - { - "cast", OBJECT_CAST - }, - /* OCLASS_COLLATION */ - { - "collation", OBJECT_COLLATION - }, - /* OCLASS_CONSTRAINT */ - { - "table constraint", (ObjectType)12 - }, - { - "domain constraint",(ObjectType) 13 - }, - /* OCLASS_CONVERSION */ - { - "conversion", OBJECT_CONVERSION - }, - /* OCLASS_DEFAULT */ - { - "default value", (ObjectType)10 - }, - /* OCLASS_LANGUAGE */ - { - "language", OBJECT_LANGUAGE - }, - /* OCLASS_LARGEOBJECT */ - { - "large object", OBJECT_LARGEOBJECT - }, - /* OCLASS_OPERATOR */ - { - "operator", OBJECT_OPERATOR - }, - /* OCLASS_OPCLASS */ - { - "operator class", OBJECT_OPCLASS - }, - /* OCLASS_OPFAMILY */ - { - "operator family", OBJECT_OPFAMILY - }, - /* OCLASS_AM */ - { - "access method",(ObjectType) 0 - }, - /* OCLASS_AMOP */ - { - "operator of access method",(ObjectType) 2 - }, - /* OCLASS_AMPROC */ - { - "function of access method",(ObjectType) 3 - }, - /* OCLASS_REWRITE */ - { - "rule", OBJECT_RULE - }, - /* OCLASS_TRIGGER */ - { - "trigger", OBJECT_TRIGGER - }, - /* OCLASS_SCHEMA */ - { - "schema", OBJECT_SCHEMA - }, - /* OCLASS_TSPARSER */ - { - "text search parser", OBJECT_TSPARSER - }, - /* OCLASS_TSDICT */ - { - "text search dictionary", OBJECT_TSDICTIONARY - }, - /* OCLASS_TSTEMPLATE */ - { - "text search template", OBJECT_TSTEMPLATE - }, - /* OCLASS_TSCONFIG */ - { - "text search configuration", OBJECT_TSCONFIGURATION - }, - /* OCLASS_ROLE */ - { - "role", OBJECT_ROLE - }, - /* OCLASS_DATABASE */ - { - "database", OBJECT_DATABASE - }, - /* OCLASS_TBLSPACE */ - { - "tablespace", OBJECT_TABLESPACE - }, - /* OCLASS_FDW */ - { - "foreign-data wrapper", OBJECT_FDW - }, - /* OCLASS_FOREIGN_SERVER */ - { - "server", OBJECT_FOREIGN_SERVER - }, - /* OCLASS_USER_MAPPING */ - { - "user mapping",(ObjectType) 42 - }, - /* OCLASS_DEFACL */ - { - "default acl", (ObjectType) 11 - }, - /* OCLASS_EXTENSION */ - { - "extension", OBJECT_EXTENSION - }, - /* OCLASS_EVENT_TRIGGER */ - { - "event trigger",(ObjectType)31 - }, - /* OCLASS_POLICY */ - { - "policy",(ObjectType)32 - }, - /* OCLASS_TRANSFORM */ - { - "transform",(ObjectType) 33 - } -}; - -const ObjectAddress InvalidObjectAddress = -{ - InvalidOid, - InvalidOid, - 0 -}; - -static char* format_type_internal( - Oid type_oid, int32 typemod, bool typemod_given, bool allow_invalid, bool include_nspname = false); -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout); -static char* psnprintf(size_t len, const char* fmt, ...) - /* This lets gcc check the format string for consistency. */ - __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); - -static char *format_procedure_internal(Oid procedure_oid, bool force_qualify); -static char *format_operator_internal(Oid operator_oid, bool force_qualify); - -static const ObjectPropertyType *get_object_property_data(Oid class_id); - -static void getRelationTypeDescription(StringInfo buffer, Oid relid, - int32 objectSubId); -static void getProcedureTypeDescription(StringInfo buffer, Oid procid); -static void getConstraintTypeDescription(StringInfo buffer, Oid constroid); -static void getOpFamilyIdentity(StringInfo buffer, Oid opfid, List **objname); -static void getRelationIdentity(StringInfo buffer, Oid relid, List **objname); - -Oid -get_object_oid_index(Oid class_id) -{ - const ObjectPropertyType *prop = get_object_property_data(class_id); - - return prop->oid_index_oid; -} - -int -get_object_catcache_oid(Oid class_id) -{ - const ObjectPropertyType *prop = get_object_property_data(class_id); - - return prop->oid_catcache_id; -} - -AttrNumber -get_object_attnum_name(Oid class_id) -{ - const ObjectPropertyType *prop = get_object_property_data(class_id); - - return prop->attnum_name; -} - -AttrNumber -get_object_attnum_namespace(Oid class_id) -{ - const ObjectPropertyType *prop = get_object_property_data(class_id); - - return prop->attnum_namespace; -} - -bool -get_object_namensp_unique(Oid class_id) -{ - const ObjectPropertyType *prop = get_object_property_data(class_id); - - return prop->is_nsp_name_unique; -} - -static const ObjectPropertyType * -get_object_property_data(Oid class_id) -{ - static const ObjectPropertyType *prop_last = NULL; - int index; - - /* - * A shortcut to speed up multiple consecutive lookups of a particular - * object class. - */ - if (prop_last && prop_last->class_oid == class_id) - return prop_last; - - for (index = 0; index < lengthof(ObjectProperty); index++) - { - if (ObjectProperty[index].class_oid == class_id) - { - prop_last = &ObjectProperty[index]; - return &ObjectProperty[index]; - } - } - - ereport(ERROR, - (errmsg_internal("unrecognized class ID: %u", class_id))); - - return NULL; /* keep MSC compiler happy */ -} - -HeapTuple -get_catalog_object_by_oid(Relation catalog, Oid objectId) -{ - HeapTuple tuple; - Oid classId = RelationGetRelid(catalog); - int oidCacheId = get_object_catcache_oid(classId); - - if (oidCacheId > 0) - { - tuple = SearchSysCacheCopy1(oidCacheId, ObjectIdGetDatum(objectId)); - if (!HeapTupleIsValid(tuple)) /* should not happen */ - return NULL; - } - else - { - Oid oidIndexId = get_object_oid_index(classId); - SysScanDesc scan; - ScanKeyData skey; - - Assert(OidIsValid(oidIndexId)); - - ScanKeyInit(&skey, - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(objectId)); - - scan = systable_beginscan(catalog, oidIndexId, true, - NULL, 1, &skey); - tuple = systable_getnext(scan); - if (!HeapTupleIsValid(tuple)) - { - systable_endscan(scan); - return NULL; - } - tuple = heap_copytuple(tuple); - - systable_endscan(scan); - } - - return tuple; -} - -/* - * Return a palloc'ed string that describes the type of object that the - * passed address is for. - * - * Keep ObjectTypeMap in sync with this. - */ -char * -getObjectTypeDescription(const ObjectAddress *object) -{ - StringInfoData buffer; - - initStringInfo(&buffer); - - switch (getObjectClass(object)) - { - case OCLASS_CLASS: - getRelationTypeDescription(&buffer, object->objectId, - object->objectSubId); - break; - - case OCLASS_PROC: - getProcedureTypeDescription(&buffer, object->objectId); - break; - - case OCLASS_TYPE: - appendStringInfoString(&buffer, "type"); - break; - - case OCLASS_CAST: - appendStringInfoString(&buffer, "cast"); - break; - - case OCLASS_COLLATION: - appendStringInfoString(&buffer, "collation"); - break; - - case OCLASS_CONSTRAINT: - getConstraintTypeDescription(&buffer, object->objectId); - break; - - case OCLASS_CONVERSION: - appendStringInfoString(&buffer, "conversion"); - break; - - case OCLASS_DEFAULT: - appendStringInfoString(&buffer, "default value"); - break; - - case OCLASS_LANGUAGE: - appendStringInfoString(&buffer, "language"); - break; - - case OCLASS_LARGEOBJECT: - appendStringInfoString(&buffer, "large object"); - break; - - case OCLASS_OPERATOR: - appendStringInfoString(&buffer, "operator"); - break; - - case OCLASS_OPCLASS: - appendStringInfoString(&buffer, "operator class"); - break; - - case OCLASS_OPFAMILY: - appendStringInfoString(&buffer, "operator family"); - break; - - case OCLASS_AMOP: - appendStringInfoString(&buffer, "operator of access method"); - break; - - case OCLASS_AMPROC: - appendStringInfoString(&buffer, "function of access method"); - break; - - case OCLASS_REWRITE: - appendStringInfoString(&buffer, "rule"); - break; - - case OCLASS_TRIGGER: - appendStringInfoString(&buffer, "trigger"); - break; - - case OCLASS_SCHEMA: - appendStringInfoString(&buffer, "schema"); - break; - - case OCLASS_TSPARSER: - appendStringInfoString(&buffer, "text search parser"); - break; - - case OCLASS_TSDICT: - appendStringInfoString(&buffer, "text search dictionary"); - break; - - case OCLASS_TSTEMPLATE: - appendStringInfoString(&buffer, "text search template"); - break; - - case OCLASS_TSCONFIG: - appendStringInfoString(&buffer, "text search configuration"); - break; - - case OCLASS_ROLE: - appendStringInfoString(&buffer, "role"); - break; - - case OCLASS_DATABASE: - appendStringInfoString(&buffer, "database"); - break; - - case OCLASS_TBLSPACE: - appendStringInfoString(&buffer, "tablespace"); - break; - - case OCLASS_FDW: - appendStringInfoString(&buffer, "foreign-data wrapper"); - break; - - case OCLASS_FOREIGN_SERVER: - appendStringInfoString(&buffer, "server"); - break; - - case OCLASS_USER_MAPPING: - appendStringInfoString(&buffer, "user mapping"); - break; - - case OCLASS_DEFACL: - appendStringInfoString(&buffer, "default acl"); - break; - - case OCLASS_EXTENSION: - appendStringInfoString(&buffer, "extension"); - break; - - case 31: - appendStringInfoString(&buffer, "event trigger"); - break; - - case 32: - appendStringInfoString(&buffer, "policy"); - break; - - case 33: - appendStringInfoString(&buffer, "transform"); - break; - - case 29: - appendStringInfoString(&buffer, "access method"); - break; - - default: - appendStringInfo(&buffer, "unrecognized %u", object->classId); - break; - } - - return buffer.data; -} - -static void -getRelationTypeDescription(StringInfo buffer, Oid relid, int32 objectSubId) -{ - HeapTuple relTup; - Form_pg_class relForm; - - relTup = SearchSysCache1(RELOID, - ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(relTup)) - elog(ERROR, "cache lookup failed for relation %u", relid); - relForm = (Form_pg_class) GETSTRUCT(relTup); - - switch (relForm->relkind) - { - case RELKIND_RELATION: - appendStringInfoString(buffer, "table"); - break; - case RELKIND_INDEX: - appendStringInfoString(buffer, "index"); - break; - case RELKIND_SEQUENCE: - appendStringInfoString(buffer, "sequence"); - break; - case RELKIND_TOASTVALUE: - appendStringInfoString(buffer, "toast table"); - break; - case RELKIND_VIEW: - appendStringInfoString(buffer, "view"); - break; - case RELKIND_MATVIEW: - appendStringInfoString(buffer, "materialized view"); - break; - case RELKIND_COMPOSITE_TYPE: - appendStringInfoString(buffer, "composite type"); - break; - case RELKIND_FOREIGN_TABLE: - appendStringInfoString(buffer, "foreign table"); - break; - default: - /* shouldn't get here */ - appendStringInfoString(buffer, "relation"); - break; - } - - if (objectSubId != 0) - appendStringInfoString(buffer, " column"); - - ReleaseSysCache(relTup); -} - -/* - * subroutine for getObjectTypeDescription: describe a constraint type - */ -static void -getConstraintTypeDescription(StringInfo buffer, Oid constroid) -{ - Relation constrRel; - HeapTuple constrTup; - Form_pg_constraint constrForm; - - constrRel = heap_open(ConstraintRelationId, AccessShareLock); - constrTup = get_catalog_object_by_oid(constrRel, constroid); - if (!HeapTupleIsValid(constrTup)) - elog(ERROR, "cache lookup failed for constraint %u", constroid); - - constrForm = (Form_pg_constraint) GETSTRUCT(constrTup); - - if (OidIsValid(constrForm->conrelid)) - appendStringInfoString(buffer, "table constraint"); - else if (OidIsValid(constrForm->contypid)) - appendStringInfoString(buffer, "domain constraint"); - else - elog(ERROR, "invalid constraint %u", HeapTupleGetOid(constrTup)); - - heap_close(constrRel, AccessShareLock); -} - -/* - * subroutine for getObjectTypeDescription: describe a procedure type - */ -static void -getProcedureTypeDescription(StringInfo buffer, Oid procid) -{ - HeapTuple procTup; - Form_pg_proc procForm; - - procTup = SearchSysCache1(PROCOID, - ObjectIdGetDatum(procid)); - if (!HeapTupleIsValid(procTup)) - elog(ERROR, "cache lookup failed for procedure %u", procid); - procForm = (Form_pg_proc) GETSTRUCT(procTup); - - if (procForm->proisagg) - appendStringInfoString(buffer, "aggregate"); - else - appendStringInfoString(buffer, "function"); - - ReleaseSysCache(procTup); -} - -/* - * Obtain a given object's identity, as a palloc'ed string. - * - * This is for machine consumption, so it's not translated. All elements are - * schema-qualified when appropriate. - */ -char * -getObjectIdentity(const ObjectAddress *object) -{ - return getObjectIdentityParts(object, NULL, NULL); -} - -/* - * As above, but more detailed. - * - * There are two sets of return values: the identity itself as a palloc'd - * string is returned. objname and objargs, if not NULL, are output parameters - * that receive lists of C-strings that are useful to give back to - * get_object_address() to reconstruct the ObjectAddress. - */ -char * -getObjectIdentityParts(const ObjectAddress *object, - List **objname, List **objargs) -{ - StringInfoData buffer; - - initStringInfo(&buffer); - - /* - * Make sure that both objname and objargs were passed, or none was; and - * initialize them to empty lists. For objname this is useless because it - * will be initialized in all cases inside the switch; but we do it anyway - * so that we can test below that no branch leaves it unset. - */ - Assert(PointerIsValid(objname) == PointerIsValid(objargs)); - if (objname) - { - *objname = NIL; - *objargs = NIL; - } - - switch (getObjectClass(object)) - { - case OCLASS_CLASS: - getRelationIdentity(&buffer, object->objectId, objname); - if (object->objectSubId != 0) - { - char *attr; - - attr = get_relid_attribute_name(object->objectId, - object->objectSubId); - appendStringInfo(&buffer, ".%s", quote_identifier(attr)); - if (objname) - *objname = lappend(*objname, attr); - } - break; - - case OCLASS_PROC: - appendStringInfoString(&buffer, - format_procedure_qualified(object->objectId)); - if (objname) - format_procedure_parts(object->objectId, objname, objargs); - break; - - case OCLASS_TYPE: - { - char *typeout; - - typeout = format_type_be_qualified(object->objectId); - appendStringInfoString(&buffer, typeout); - if (objname) - *objname = list_make1(typeout); - } - break; - - case OCLASS_CAST: - { - Relation castRel; - HeapTuple tup; - Form_pg_cast castForm; - - castRel = heap_open(CastRelationId, AccessShareLock); - - tup = get_catalog_object_by_oid(castRel, object->objectId); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for cast %u", - object->objectId); - - castForm = (Form_pg_cast) GETSTRUCT(tup); - - appendStringInfo(&buffer, "(%s AS %s)", - format_type_be_qualified(castForm->castsource), - format_type_be_qualified(castForm->casttarget)); - - if (objname) - { - *objname = list_make1(format_type_be_qualified(castForm->castsource)); - *objargs = list_make1(format_type_be_qualified(castForm->casttarget)); - } - - heap_close(castRel, AccessShareLock); - break; - } - - case OCLASS_COLLATION: - { - HeapTuple collTup; - Form_pg_collation coll; - char *schema; - - collTup = SearchSysCache1(COLLOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(collTup)) - elog(ERROR, "cache lookup failed for collation %u", - object->objectId); - coll = (Form_pg_collation) GETSTRUCT(collTup); - schema = get_namespace_name_or_temp(coll->collnamespace); - appendStringInfoString(&buffer, - quote_qualified_identifier(schema, - NameStr(coll->collname))); - if (objname) - *objname = list_make2(schema, - pstrdup(NameStr(coll->collname))); - ReleaseSysCache(collTup); - break; - } - - case OCLASS_CONSTRAINT: - { - HeapTuple conTup; - Form_pg_constraint con; - - conTup = SearchSysCache1(CONSTROID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(conTup)) - elog(ERROR, "cache lookup failed for constraint %u", - object->objectId); - con = (Form_pg_constraint) GETSTRUCT(conTup); - - if (OidIsValid(con->conrelid)) - { - appendStringInfo(&buffer, "%s on ", - quote_identifier(NameStr(con->conname))); - getRelationIdentity(&buffer, con->conrelid, objname); - if (objname) - *objname = lappend(*objname, pstrdup(NameStr(con->conname))); - } - else - { - ObjectAddress domain; - - Assert(OidIsValid(con->contypid)); - domain.classId = TypeRelationId; - domain.objectId = con->contypid; - domain.objectSubId = 0; - - appendStringInfo(&buffer, "%s on %s", - quote_identifier(NameStr(con->conname)), - getObjectIdentityParts(&domain, objname, objargs)); - - if (objname) - *objargs = lappend(*objargs, pstrdup(NameStr(con->conname))); - } - - ReleaseSysCache(conTup); - break; - } - - case OCLASS_CONVERSION: - { - HeapTuple conTup; - Form_pg_conversion conForm; - char *schema; - - conTup = SearchSysCache1(CONVOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(conTup)) - elog(ERROR, "cache lookup failed for conversion %u", - object->objectId); - conForm = (Form_pg_conversion) GETSTRUCT(conTup); - schema = get_namespace_name_or_temp(conForm->connamespace); - appendStringInfoString(&buffer, - quote_qualified_identifier(schema, - NameStr(conForm->conname))); - if (objname) - *objname = list_make2(schema, - pstrdup(NameStr(conForm->conname))); - ReleaseSysCache(conTup); - break; - } - - case OCLASS_DEFAULT: - { - Relation attrdefDesc; - ScanKeyData skey[1]; - SysScanDesc adscan; - - HeapTuple tup; - Form_pg_attrdef attrdef; - ObjectAddress colobject; - - attrdefDesc = heap_open(AttrDefaultRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(object->objectId)); - - adscan = systable_beginscan(attrdefDesc, AttrDefaultOidIndexId, - true, NULL, 1, skey); - - tup = systable_getnext(adscan); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for attrdef %u", - object->objectId); - - attrdef = (Form_pg_attrdef) GETSTRUCT(tup); - - colobject.classId = RelationRelationId; - colobject.objectId = attrdef->adrelid; - colobject.objectSubId = attrdef->adnum; - - appendStringInfo(&buffer, "for %s", - getObjectIdentityParts(&colobject, - objname, objargs)); - - systable_endscan(adscan); - heap_close(attrdefDesc, AccessShareLock); - break; - } - - case OCLASS_LANGUAGE: - { - HeapTuple langTup; - Form_pg_language langForm; - - langTup = SearchSysCache1(LANGOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(langTup)) - elog(ERROR, "cache lookup failed for language %u", - object->objectId); - langForm = (Form_pg_language) GETSTRUCT(langTup); - appendStringInfoString(&buffer, - quote_identifier(NameStr(langForm->lanname))); - if (objname) - *objname = list_make1(pstrdup(NameStr(langForm->lanname))); - ReleaseSysCache(langTup); - break; - } - case OCLASS_LARGEOBJECT: - appendStringInfo(&buffer, "%u", - object->objectId); - if (objname) - *objname = list_make1(psprintf("%u", object->objectId)); - break; - - case OCLASS_OPERATOR: - appendStringInfoString(&buffer, - format_operator_qualified(object->objectId)); - if (objname) - format_operator_parts(object->objectId, objname, objargs); - break; - - case OCLASS_OPCLASS: - { - HeapTuple opcTup; - Form_pg_opclass opcForm; - HeapTuple amTup; - Form_pg_am amForm; - char *schema; - - opcTup = SearchSysCache1(CLAOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(opcTup)) - elog(ERROR, "cache lookup failed for opclass %u", - object->objectId); - opcForm = (Form_pg_opclass) GETSTRUCT(opcTup); - schema = get_namespace_name_or_temp(opcForm->opcnamespace); - - amTup = SearchSysCache1(AMOID, - ObjectIdGetDatum(opcForm->opcmethod)); - if (!HeapTupleIsValid(amTup)) - elog(ERROR, "cache lookup failed for access method %u", - opcForm->opcmethod); - amForm = (Form_pg_am) GETSTRUCT(amTup); - - appendStringInfo(&buffer, "%s USING %s", - quote_qualified_identifier(schema, - NameStr(opcForm->opcname)), - quote_identifier(NameStr(amForm->amname))); - if (objname) - *objname = list_make3(pstrdup(NameStr(amForm->amname)), - schema, - pstrdup(NameStr(opcForm->opcname))); - - ReleaseSysCache(amTup); - ReleaseSysCache(opcTup); - break; - } - - case OCLASS_OPFAMILY: - getOpFamilyIdentity(&buffer, object->objectId, objname); - break; - - case OCLASS_AMOP: - { - Relation amopDesc; - HeapTuple tup; - ScanKeyData skey[1]; - SysScanDesc amscan; - Form_pg_amop amopForm; - StringInfoData opfam; - char *ltype; - char *rtype; - - amopDesc = heap_open(AccessMethodOperatorRelationId, - AccessShareLock); - - ScanKeyInit(&skey[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(object->objectId)); - - amscan = systable_beginscan(amopDesc, AccessMethodOperatorOidIndexId, true, - NULL, 1, skey); - - tup = systable_getnext(amscan); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for amop entry %u", - object->objectId); - - amopForm = (Form_pg_amop) GETSTRUCT(tup); - - initStringInfo(&opfam); - getOpFamilyIdentity(&opfam, amopForm->amopfamily, objname); - - ltype = format_type_be_qualified(amopForm->amoplefttype); - rtype = format_type_be_qualified(amopForm->amoprighttype); - - if (objname) - { - *objname = lappend(*objname, - psprintf("%d", amopForm->amopstrategy)); - *objargs = list_make2(ltype, rtype); - } - - appendStringInfo(&buffer, "operator %d (%s, %s) of %s", - amopForm->amopstrategy, - ltype, rtype, opfam.data); - - pfree(opfam.data); - - systable_endscan(amscan); - heap_close(amopDesc, AccessShareLock); - break; - } - - case OCLASS_AMPROC: - { - Relation amprocDesc; - ScanKeyData skey[1]; - SysScanDesc amscan; - HeapTuple tup; - Form_pg_amproc amprocForm; - StringInfoData opfam; - char *ltype; - char *rtype; - - amprocDesc = heap_open(AccessMethodProcedureRelationId, - AccessShareLock); - - ScanKeyInit(&skey[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(object->objectId)); - - amscan = systable_beginscan(amprocDesc, AccessMethodProcedureOidIndexId, true, - NULL, 1, skey); - - tup = systable_getnext(amscan); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for amproc entry %u", - object->objectId); - - amprocForm = (Form_pg_amproc) GETSTRUCT(tup); - - initStringInfo(&opfam); - getOpFamilyIdentity(&opfam, amprocForm->amprocfamily, objname); - - ltype = format_type_be_qualified(amprocForm->amproclefttype); - rtype = format_type_be_qualified(amprocForm->amprocrighttype); - - if (objname) - { - *objname = lappend(*objname, - psprintf("%d", amprocForm->amprocnum)); - *objargs = list_make2(ltype, rtype); - } - - appendStringInfo(&buffer, "function %d (%s, %s) of %s", - amprocForm->amprocnum, - ltype, rtype, opfam.data); - - pfree(opfam.data); - - systable_endscan(amscan); - heap_close(amprocDesc, AccessShareLock); - break; - } - - case OCLASS_REWRITE: - { - Relation ruleDesc; - HeapTuple tup; - Form_pg_rewrite rule; - - ruleDesc = heap_open(RewriteRelationId, AccessShareLock); - - tup = get_catalog_object_by_oid(ruleDesc, object->objectId); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for rule %u", - object->objectId); - - rule = (Form_pg_rewrite) GETSTRUCT(tup); - - appendStringInfo(&buffer, "%s on ", - quote_identifier(NameStr(rule->rulename))); - getRelationIdentity(&buffer, rule->ev_class, objname); - if (objname) - *objname = lappend(*objname, pstrdup(NameStr(rule->rulename))); - - heap_close(ruleDesc, AccessShareLock); - break; - } - - case OCLASS_TRIGGER: - { - Relation trigDesc; - HeapTuple tup; - Form_pg_trigger trig; - - trigDesc = heap_open(TriggerRelationId, AccessShareLock); - - tup = get_catalog_object_by_oid(trigDesc, object->objectId); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for trigger %u", - object->objectId); - - trig = (Form_pg_trigger) GETSTRUCT(tup); - - appendStringInfo(&buffer, "%s on ", - quote_identifier(NameStr(trig->tgname))); - getRelationIdentity(&buffer, trig->tgrelid, objname); - if (objname) - *objname = lappend(*objname, pstrdup(NameStr(trig->tgname))); - - heap_close(trigDesc, AccessShareLock); - break; - } - - case 32: - { - Relation polDesc; - HeapTuple tup; - Form_pg_policy policy; - - polDesc = heap_open(PolicyRelationId, AccessShareLock); - - tup = get_catalog_object_by_oid(polDesc, object->objectId); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for policy %u", - object->objectId); - - policy = (Form_pg_policy) GETSTRUCT(tup); - - appendStringInfo(&buffer, "%s on ", - quote_identifier(NameStr(policy->polname))); - getRelationIdentity(&buffer, policy->polrelid, objname); - if (objname) - *objname = lappend(*objname, pstrdup(NameStr(policy->polname))); - - heap_close(polDesc, AccessShareLock); - break; - } - - case OCLASS_SCHEMA: - { - char *nspname; - - nspname = get_namespace_name_or_temp(object->objectId); - if (!nspname) - elog(ERROR, "cache lookup failed for namespace %u", - object->objectId); - appendStringInfoString(&buffer, - quote_identifier(nspname)); - if (objname) - *objname = list_make1(nspname); - break; - } - - case OCLASS_TSPARSER: - { - HeapTuple tup; - Form_pg_ts_parser formParser; - char *schema; - - tup = SearchSysCache1(TSPARSEROID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for text search parser %u", - object->objectId); - formParser = (Form_pg_ts_parser) GETSTRUCT(tup); - schema = get_namespace_name_or_temp(formParser->prsnamespace); - appendStringInfoString(&buffer, - quote_qualified_identifier(schema, - NameStr(formParser->prsname))); - if (objname) - *objname = list_make2(schema, - pstrdup(NameStr(formParser->prsname))); - ReleaseSysCache(tup); - break; - } - - case OCLASS_TSDICT: - { - HeapTuple tup; - Form_pg_ts_dict formDict; - char *schema; - - tup = SearchSysCache1(TSDICTOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for text search dictionary %u", - object->objectId); - formDict = (Form_pg_ts_dict) GETSTRUCT(tup); - schema = get_namespace_name_or_temp(formDict->dictnamespace); - appendStringInfoString(&buffer, - quote_qualified_identifier(schema, - NameStr(formDict->dictname))); - if (objname) - *objname = list_make2(schema, - pstrdup(NameStr(formDict->dictname))); - ReleaseSysCache(tup); - break; - } - - case OCLASS_TSTEMPLATE: - { - HeapTuple tup; - Form_pg_ts_template formTmpl; - char *schema; - - tup = SearchSysCache1(TSTEMPLATEOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for text search template %u", - object->objectId); - formTmpl = (Form_pg_ts_template) GETSTRUCT(tup); - schema = get_namespace_name_or_temp(formTmpl->tmplnamespace); - appendStringInfoString(&buffer, - quote_qualified_identifier(schema, - NameStr(formTmpl->tmplname))); - if (objname) - *objname = list_make2(schema, - pstrdup(NameStr(formTmpl->tmplname))); - ReleaseSysCache(tup); - break; - } - - case OCLASS_TSCONFIG: - { - HeapTuple tup; - Form_pg_ts_config formCfg; - char *schema; - - tup = SearchSysCache1(TSCONFIGOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for text search configuration %u", - object->objectId); - formCfg = (Form_pg_ts_config) GETSTRUCT(tup); - schema = get_namespace_name_or_temp(formCfg->cfgnamespace); - appendStringInfoString(&buffer, - quote_qualified_identifier(schema, - NameStr(formCfg->cfgname))); - if (objname) - *objname = list_make2(schema, - pstrdup(NameStr(formCfg->cfgname))); - ReleaseSysCache(tup); - break; - } - - case OCLASS_ROLE: - { - char *username; - - username = GetUserNameFromId(object->objectId, false); - if (objname) - *objname = list_make1(username); - appendStringInfoString(&buffer, - quote_identifier(username)); - break; - } - - case OCLASS_DATABASE: - { - char *datname; - - datname = get_database_name(object->objectId); - if (!datname) - elog(ERROR, "cache lookup failed for database %u", - object->objectId); - if (objname) - *objname = list_make1(datname); - appendStringInfoString(&buffer, - quote_identifier(datname)); - break; - } - - case OCLASS_TBLSPACE: - { - char *tblspace; - - tblspace = get_tablespace_name(object->objectId); - if (!tblspace) - elog(ERROR, "cache lookup failed for tablespace %u", - object->objectId); - if (objname) - *objname = list_make1(tblspace); - appendStringInfoString(&buffer, - quote_identifier(tblspace)); - break; - } - - case OCLASS_FDW: - { - ForeignDataWrapper *fdw; - - fdw = GetForeignDataWrapper(object->objectId); - appendStringInfoString(&buffer, quote_identifier(fdw->fdwname)); - if (objname) - *objname = list_make1(pstrdup(fdw->fdwname)); - break; - } - - case OCLASS_FOREIGN_SERVER: - { - ForeignServer *srv; - - srv = GetForeignServer(object->objectId); - appendStringInfoString(&buffer, - quote_identifier(srv->servername)); - if (objname) - *objname = list_make1(pstrdup(srv->servername)); - break; - } - - case OCLASS_USER_MAPPING: - { - HeapTuple tup; - Oid useid; - Form_pg_user_mapping umform; - ForeignServer *srv; - const char *usename; - - tup = SearchSysCache1(USERMAPPINGOID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for user mapping %u", - object->objectId); - umform = (Form_pg_user_mapping) GETSTRUCT(tup); - useid = umform->umuser; - srv = GetForeignServer(umform->umserver); - - ReleaseSysCache(tup); - - if (OidIsValid(useid)) - usename = GetUserNameFromId(useid, false); - else - usename = "public"; - - if (objname) - { - *objname = list_make1(pstrdup(usename)); - *objargs = list_make1(pstrdup(srv->servername)); - } - - appendStringInfo(&buffer, "%s on server %s", - quote_identifier(usename), - srv->servername); - break; - } - - case OCLASS_DEFACL: - { - Relation defaclrel; - ScanKeyData skey[1]; - SysScanDesc rcscan; - HeapTuple tup; - Form_pg_default_acl defacl; - char *schema; - char *username; - - defaclrel = heap_open(DefaultAclRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(object->objectId)); - - rcscan = systable_beginscan(defaclrel, DefaultAclOidIndexId, - true, NULL, 1, skey); - - tup = systable_getnext(rcscan); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for default ACL %u", - object->objectId); - - defacl = (Form_pg_default_acl) GETSTRUCT(tup); - - username = GetUserNameFromId(defacl->defaclrole, false); - appendStringInfo(&buffer, - "for role %s", - quote_identifier(username)); - - if (OidIsValid(defacl->defaclnamespace)) - { - schema = get_namespace_name_or_temp(defacl->defaclnamespace); - appendStringInfo(&buffer, - " in schema %s", - quote_identifier(schema)); - } - else - schema = NULL; - - switch (defacl->defaclobjtype) - { - case DEFACLOBJ_RELATION: - appendStringInfoString(&buffer, - " on tables"); - break; - case DEFACLOBJ_SEQUENCE: - appendStringInfoString(&buffer, - " on sequences"); - break; - case DEFACLOBJ_FUNCTION: - appendStringInfoString(&buffer, - " on functions"); - break; - case DEFACLOBJ_TYPE: - appendStringInfoString(&buffer, - " on types"); - break; - } - - if (objname) - { - *objname = list_make1(username); - if (schema) - *objname = lappend(*objname, schema); - *objargs = list_make1(psprintf("%c", defacl->defaclobjtype)); - } - - systable_endscan(rcscan); - heap_close(defaclrel, AccessShareLock); - break; - } - - case OCLASS_EXTENSION: - { - char *extname; - - extname = get_extension_name(object->objectId); - if (!extname) - elog(ERROR, "cache lookup failed for extension %u", - object->objectId); - appendStringInfoString(&buffer, quote_identifier(extname)); - if (objname) - *objname = list_make1(extname); - break; - } - - case 31: - { - HeapTuple tup; - Form_pg_event_trigger trigForm; - - /* no objname support here */ - if (objname) - *objname = NIL; - - tup = SearchSysCache1(EVENTTRIGGEROID, - ObjectIdGetDatum(object->objectId)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for event trigger %u", - object->objectId); - trigForm = (Form_pg_event_trigger) GETSTRUCT(tup); - appendStringInfoString(&buffer, - quote_identifier(NameStr(trigForm->evtname))); - ReleaseSysCache(tup); - break; - } - - case 33: - { - Relation transformDesc; - HeapTuple tup; - Form_pg_transform transform; - char *transformLang; - char *transformType; - - transformDesc = heap_open(TransformRelationId, AccessShareLock); - - tup = get_catalog_object_by_oid(transformDesc, object->objectId); - - if (!HeapTupleIsValid(tup)) - elog(ERROR, "could not find tuple for transform %u", - object->objectId); - - transform = (Form_pg_transform) GETSTRUCT(tup); - - transformType = format_type_be_qualified(transform->trftype); - transformLang = get_language_name(transform->trflang); - - appendStringInfo(&buffer, "for %s on language %s", - transformType, - transformLang); - if (objname) - { - *objname = list_make1(transformType); - *objargs = list_make1(pstrdup(transformLang)); - } - - heap_close(transformDesc, AccessShareLock); - } - break; - - case 29: - { - char *amname; - - amname = get_am_name(object->objectId); - if (!amname) - elog(ERROR, "cache lookup failed for access method %u", - object->objectId); - appendStringInfoString(&buffer, quote_identifier(amname)); - if (objname) - *objname = list_make1(amname); - } - break; - - default: - appendStringInfo(&buffer, "unrecognized object %u %u %d", - object->classId, - object->objectId, - object->objectSubId); - break; - } - - /* - * If a get_object_address representation was requested, make sure we are - * providing one. We don't check objargs, because many of the cases above - * leave it as NIL. - */ - if (objname && *objname == NIL) - elog(ERROR, "requested object address for unsupported object class %d: text result \"%s\"", - (int) getObjectClass(object), buffer.data); - - return buffer.data; -} - -static void -getOpFamilyIdentity(StringInfo buffer, Oid opfid, List **objname) -{ - HeapTuple opfTup; - Form_pg_opfamily opfForm; - HeapTuple amTup; - Form_pg_am amForm; - char *schema; - - opfTup = SearchSysCache1(OPFAMILYOID, ObjectIdGetDatum(opfid)); - if (!HeapTupleIsValid(opfTup)) - elog(ERROR, "cache lookup failed for opfamily %u", opfid); - opfForm = (Form_pg_opfamily) GETSTRUCT(opfTup); - - amTup = SearchSysCache1(AMOID, ObjectIdGetDatum(opfForm->opfmethod)); - if (!HeapTupleIsValid(amTup)) - elog(ERROR, "cache lookup failed for access method %u", - opfForm->opfmethod); - amForm = (Form_pg_am) GETSTRUCT(amTup); - - schema = get_namespace_name_or_temp(opfForm->opfnamespace); - appendStringInfo(buffer, "%s USING %s", - quote_qualified_identifier(schema, - NameStr(opfForm->opfname)), - NameStr(amForm->amname)); - - if (objname) - *objname = list_make3(pstrdup(NameStr(amForm->amname)), - pstrdup(schema), - pstrdup(NameStr(opfForm->opfname))); - - ReleaseSysCache(amTup); - ReleaseSysCache(opfTup); -} - -/* - * Append the relation identity (quoted qualified name) to the given - * StringInfo. - */ -static void -getRelationIdentity(StringInfo buffer, Oid relid, List **objname) -{ - HeapTuple relTup; - Form_pg_class relForm; - char *schema; - - relTup = SearchSysCache1(RELOID, - ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(relTup)) - elog(ERROR, "cache lookup failed for relation %u", relid); - relForm = (Form_pg_class) GETSTRUCT(relTup); - - schema = get_namespace_name_or_temp(relForm->relnamespace); - appendStringInfoString(buffer, - quote_qualified_identifier(schema, - NameStr(relForm->relname))); - if (objname) - *objname = list_make2(schema, pstrdup(NameStr(relForm->relname))); - - ReleaseSysCache(relTup); -} - -/* - * Auxiliary function to return a TEXT array out of a list of C-strings. - */ -ArrayType * -strlist_to_textarray(List *list) -{ - ArrayType *arr; - Datum *datums; - int j = 0; - ListCell *cell; - MemoryContext memcxt; - MemoryContext oldcxt; - - memcxt = AllocSetContextCreate(CurrentMemoryContext, - "strlist to array", - ALLOCSET_DEFAULT_SIZES); - oldcxt = MemoryContextSwitchTo(memcxt); - - datums =(Datum *) palloc(sizeof(text *) * list_length(list)); - foreach(cell, list) - { - char *name =(char *) lfirst(cell); - - datums[j++] = CStringGetTextDatum(name); - } - - MemoryContextSwitchTo(oldcxt); - - arr = construct_array(datums, list_length(list), - TEXTOID, -1, false, 'i'); - MemoryContextDelete(memcxt); - - return arr; -} - - -#define MAX_INT32_LEN 11 - - - -char * -format_procedure_qualified(Oid procedure_oid) -{ - return format_procedure_internal(procedure_oid, true); -} - -void -format_procedure_parts(Oid procedure_oid, List **objnames, List **objargs) -{ - HeapTuple proctup; - Form_pg_proc procform; - int nargs; - int i; - - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(procedure_oid)); - - if (!HeapTupleIsValid(proctup)) - elog(ERROR, "cache lookup failed for procedure with OID %u", procedure_oid); - - procform = (Form_pg_proc) GETSTRUCT(proctup); - nargs = procform->pronargs; - - *objnames = list_make2(get_namespace_name_or_temp(procform->pronamespace), - pstrdup(NameStr(procform->proname))); - *objargs = NIL; - for (i = 0; i < nargs; i++) - { - Oid thisargtype = procform->proargtypes.values[i]; - - *objargs = lappend(*objargs, format_type_be_qualified(thisargtype)); - } - - ReleaseSysCache(proctup); -} - -char * -get_namespace_name_or_temp(Oid nspid) -{ - if (isTempNamespace(nspid)) - return "pg_temp"; - else - return get_namespace_name(nspid); -} - -char * -get_am_name(Oid amOid) -{ - HeapTuple tup; - char *result = NULL; - - tup = SearchSysCache1(AMOID, ObjectIdGetDatum(amOid)); - if (HeapTupleIsValid(tup)) - { - Form_pg_am amform = (Form_pg_am) GETSTRUCT(tup); - - result = pstrdup(NameStr(amform->amname)); - ReleaseSysCache(tup); - } - return result; -} - - -char * -format_type_be_qualified(Oid type_oid) -{ - return format_type_internal(type_oid, -1, false, false, true); -} - -void -format_operator_parts(Oid operator_oid, List **objnames, List **objargs) -{ - HeapTuple opertup; - Form_pg_operator oprForm; - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operator_oid)); - if (!HeapTupleIsValid(opertup)) - elog(ERROR, "cache lookup failed for operator with OID %u", - operator_oid); - - oprForm = (Form_pg_operator) GETSTRUCT(opertup); - *objnames = list_make2(get_namespace_name_or_temp(oprForm->oprnamespace), - pstrdup(NameStr(oprForm->oprname))); - *objargs = NIL; - if (oprForm->oprleft) - *objargs = lappend(*objargs, - format_type_be_qualified(oprForm->oprleft)); - if (oprForm->oprright) - *objargs = lappend(*objargs, - format_type_be_qualified(oprForm->oprright)); - - ReleaseSysCache(opertup); -} - -char * -format_operator_qualified(Oid operator_oid) -{ - return format_operator_internal(operator_oid, true); -} - -bool -is_objectclass_supported(Oid class_id) -{ - int index; - - for (index = 0; index < lengthof(ObjectProperty); index++) - { - if (ObjectProperty[index].class_oid == class_id) - return true; - } - - return false; -} - - -static char* format_type_internal( - Oid type_oid, int32 typemod, bool typemod_given, bool allow_invalid, bool include_nspname) -{ - bool with_typemod = typemod_given && (typemod >= 0); - HeapTuple tuple; - Form_pg_type typeform; - Oid array_base_type; - bool is_array = false; - char* buf = NULL; - - if (type_oid == InvalidOid && allow_invalid) - return pstrdup("-"); - - tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_oid)); - if (!HeapTupleIsValid(tuple)) { - if (allow_invalid) - return pstrdup("???"); - else - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", type_oid))); - } - typeform = (Form_pg_type)GETSTRUCT(tuple); - - /* - * Check if it's a regular (variable length) array type. Fixed-length - * array types such as "name" shouldn't get deconstructed. As of openGauss - * 8.1, rather than checking typlen we check the toast property, and don't - * deconstruct "plain storage" array types --- this is because we don't - * want to show oidvector as oid[]. - */ - array_base_type = typeform->typelem; - - if (array_base_type != InvalidOid && typeform->typstorage != 'p' && type_oid != OIDVECTOREXTENDOID && - type_oid != INT2VECTOREXTENDOID) { - /* Switch our attention to the array element type */ - ReleaseSysCache(tuple); - tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(array_base_type)); - if (!HeapTupleIsValid(tuple)) { - if (allow_invalid) - return pstrdup("???[]"); - else - ereport( - ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", type_oid))); - } - typeform = (Form_pg_type)GETSTRUCT(tuple); - type_oid = array_base_type; - is_array = true; - } else - is_array = false; - - /* - * See if we want to special-case the output for certain built-in types. - * Note that these special cases should all correspond to special - * productions in gram.y, to ensure that the type name will be taken as a - * system type, not a user type of the same name. - * - * If we do not provide a special-case output here, the type name will be - * handled the same way as a user type name --- in particular, it will be - * double-quoted if it matches any lexer keyword. This behavior is - * essential for some cases, such as types "bit" and "char". - */ - buf = NULL; /* flag for no special case */ - if ((type_oid == BYTEAWITHOUTORDERWITHEQUALCOLOID || type_oid == BYTEAWITHOUTORDERCOLOID) && typemod > 0) { - type_oid = typemod; - with_typemod = false; - } - - switch (type_oid) { - case BITOID: - if (with_typemod) - buf = printTypmod("bit", typemod, typeform->typmodout); - else if (typemod_given) { - /* - * bit with typmod -1 is not the same as BIT, which means - * BIT(1) per SQL spec. Report it as the quoted typename so - * that parser will not assign a bogus typmod. - */ - } else - buf = pstrdup("bit"); - break; - - case BOOLOID: - buf = pstrdup("boolean"); - break; - - case BPCHAROID: - if (with_typemod) - buf = printTypmod("character", typemod, typeform->typmodout); - else if (typemod_given) { - /* - * bpchar with typmod -1 is not the same as CHARACTER, which - * means CHARACTER(1) per SQL spec. Report it as bpchar so - * that parser will not assign a bogus typmod. - */ - } else - buf = pstrdup("character"); - break; - - case DATEOID: - buf = pstrdup("date"); - break; - - case FLOAT4OID: - buf = pstrdup("real"); - break; - - case FLOAT8OID: - buf = pstrdup("double precision"); - break; - case INT1OID: - buf = pstrdup("tinyint"); - break; - case INT2OID: - buf = pstrdup("smallint"); - break; - - case INT4OID: - buf = pstrdup("integer"); - break; - - case INT8OID: - buf = pstrdup("bigint"); - break; - - case NUMERICOID: - if (with_typemod) - buf = printTypmod("numeric", typemod, typeform->typmodout); - else - buf = pstrdup("numeric"); - break; - - case INTERVALOID: - if (with_typemod) - buf = printTypmod("interval", typemod, typeform->typmodout); - else - buf = pstrdup("interval"); - break; - - case TIMEOID: - if (with_typemod) - buf = printTypmod("time", typemod, typeform->typmodout); - else - buf = pstrdup("time without time zone"); - break; - - case TIMETZOID: - if (with_typemod) - buf = printTypmod("time", typemod, typeform->typmodout); - else - buf = pstrdup("time with time zone"); - break; - - case TIMESTAMPOID: - if (with_typemod) - buf = printTypmod("timestamp", typemod, typeform->typmodout); - else - buf = pstrdup("timestamp without time zone"); - break; - - case TIMESTAMPTZOID: - if (with_typemod) - buf = printTypmod("timestamp", typemod, typeform->typmodout); - else - buf = pstrdup("timestamp with time zone"); - break; - case SMALLDATETIMEOID: - buf = pstrdup("smalldatetime"); - break; - case VARBITOID: - if (with_typemod) - buf = printTypmod("bit varying", typemod, typeform->typmodout); - else - buf = pstrdup("bit varying"); - break; - - case VARCHAROID: - if (with_typemod) - buf = printTypmod("character varying", typemod, typeform->typmodout); - else - buf = pstrdup("character varying"); - break; - - case NVARCHAR2OID: - if (with_typemod) - buf = printTypmod("nvarchar2", typemod, typeform->typmodout); - else - buf = pstrdup("nvarchar2"); - break; - case TEXTOID: - buf = pstrdup("text"); - break; - case BYTEAOID: - buf = pstrdup("bytea"); - break; - case BYTEAWITHOUTORDERWITHEQUALCOLOID: - buf = pstrdup("byteawithoutorderwithequalcol"); - break; - default: - break; - } - - if (buf == NULL) { - /* - * Default handling: report the name as it appears in the catalog. - * Here, we must qualify the name if it is not visible in the search - * path, and we must double-quote it if it's not a standard identifier - * or if it matches any keyword. - */ - char* nspname = NULL; - char* typname = NULL; - - if (TypeIsVisible(type_oid)) { - nspname = NULL; - - /* get namespace string if we foce to deparse namespace name */ - if (include_nspname && PG_CATALOG_NAMESPACE != typeform->typnamespace) - nspname = get_namespace_name(typeform->typnamespace); - } else { - nspname = get_namespace_name(typeform->typnamespace); - } - - typname = NameStr(typeform->typname); - - buf = quote_qualified_identifier(nspname, typname); - - if (with_typemod && !(type_oid == BYTEAWITHOUTORDERWITHEQUALCOLOID || type_oid == BYTEAWITHOUTORDERCOLOID)) - buf = printTypmod(buf, typemod, typeform->typmodout); - } - - if (is_array) - buf = psnprintf(strlen(buf) + 3, "%s[]", buf); - - ReleaseSysCache(tuple); - - return buf; -} - - - -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout) -{ - char* res = NULL; - - /* Shouldn't be called if typmod is -1 */ - Assert(typmod >= 0); - - if (typmodout == InvalidOid) { - /* Default behavior: just print the integer typmod with parens */ - res = psnprintf(strlen(typname) + MAX_INT32_LEN + 3, "%s(%d)", typname, (int)typmod); - } else { - /* Use the type-specific typmodout procedure */ - char* tmstr = NULL; - - tmstr = DatumGetCString(OidFunctionCall1(typmodout, Int32GetDatum(typmod))); - res = psnprintf(strlen(typname) + strlen(tmstr) + 1, "%s%s", typname, tmstr); - } - - return res; -} - - -static char* psnprintf(size_t len, const char* fmt, ...) -{ - va_list ap; - char* buf = NULL; - - buf = (char*)palloc0(len); - - va_start(ap, fmt); - errno_t rc = vsnprintf_s(buf, len, len - 1, fmt, ap); - securec_check_ss(rc, "\0", "\0"); - va_end(ap); - - return buf; -} - -static char * -format_procedure_internal(Oid procedure_oid, bool force_qualify) -{ - char *result; - HeapTuple proctup; - - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(procedure_oid)); - - if (HeapTupleIsValid(proctup)) - { - Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); - char *proname = NameStr(procform->proname); - int nargs = procform->pronargs; - int i; - char *nspname; - StringInfoData buf; - - /* XXX no support here for bootstrap mode */ - - initStringInfo(&buf); - - /* - * Would this proc be found (given the right args) by regprocedurein? - * If not, or if caller requests it, we need to qualify it. - */ - if (!force_qualify && FunctionIsVisible(procedure_oid)) - nspname = NULL; - else - nspname = get_namespace_name(procform->pronamespace); - - appendStringInfo(&buf, "%s(", - quote_qualified_identifier(nspname, proname)); - for (i = 0; i < nargs; i++) - { - Oid thisargtype = procform->proargtypes.values[i]; - - if (i > 0) - appendStringInfoChar(&buf, ','); - appendStringInfoString(&buf, - force_qualify ? - format_type_be_qualified(thisargtype) : - format_type_be(thisargtype)); - } - appendStringInfoChar(&buf, ')'); - - result = buf.data; - - ReleaseSysCache(proctup); - } - else - { - /* If OID doesn't match any pg_proc entry, return it numerically */ - result = (char *) palloc(NAMEDATALEN); - snprintf(result, NAMEDATALEN, "%u", procedure_oid); - } - - return result; -} - - -static char * -format_operator_internal(Oid operator_oid, bool force_qualify) -{ - char *result; - HeapTuple opertup; - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operator_oid)); - - if (HeapTupleIsValid(opertup)) - { - Form_pg_operator operform = (Form_pg_operator) GETSTRUCT(opertup); - char *oprname = NameStr(operform->oprname); - char *nspname; - StringInfoData buf; - - /* XXX no support here for bootstrap mode */ - - initStringInfo(&buf); - - /* - * Would this oper be found (given the right args) by regoperatorin? - * If not, or if caller explicitly requests it, we need to qualify it. - */ - if (force_qualify || !OperatorIsVisible(operator_oid)) - { - nspname = get_namespace_name(operform->oprnamespace); - appendStringInfo(&buf, "%s.", - quote_identifier(nspname)); - } - - appendStringInfo(&buf, "%s(", oprname); - - if (operform->oprleft) - appendStringInfo(&buf, "%s,", - force_qualify ? - format_type_be_qualified(operform->oprleft) : - format_type_be(operform->oprleft)); - else - appendStringInfoString(&buf, "NONE,"); - - if (operform->oprright) - appendStringInfo(&buf, "%s)", - force_qualify ? - format_type_be_qualified(operform->oprright) : - format_type_be(operform->oprright)); - else - appendStringInfoString(&buf, "NONE)"); - - result = buf.data; - - ReleaseSysCache(opertup); - } - else - { - /* - * If OID doesn't match any pg_operator entry, return it numerically - */ - result = (char *) palloc(NAMEDATALEN); - snprintf(result, NAMEDATALEN, "%u", operator_oid); - } - - return result; -} \ No newline at end of file diff --git a/contrib/timescaledb/src/tsdb_get_obj.h b/contrib/timescaledb/src/tsdb_get_obj.h deleted file mode 100644 index a4f3bd6e6..000000000 --- a/contrib/timescaledb/src/tsdb_get_obj.h +++ /dev/null @@ -1,66 +0,0 @@ -/*------------------------------------------------------------------------- - * - * objectaddress.h - * functions for working with object addresses - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/objectaddress.h - * - *------------------------------------------------------------------------- - */ -#ifndef tsdb_OBJECTADDRESS_H -#define tsdb_OBJECTADDRESS_H - -#include "nodes/pg_list.h" - -#include "utils/acl.h" -#include "utils/relcache.h" - - - -extern const ObjectAddress InvalidObjectAddress; - -#define ObjectAddressSubSet(addr, class_id, object_id, object_sub_id) \ - do { \ - (addr).classId = (class_id); \ - (addr).objectId = (object_id); \ - (addr).objectSubId = (object_sub_id); \ - } while (0) - -#define ObjectAddressSet(addr, class_id, object_id) \ - ObjectAddressSubSet(addr, class_id, object_id, 0) - -extern Oid get_object_oid_index(Oid class_id); -extern int get_object_catcache_oid(Oid class_id); -extern AttrNumber get_object_attnum_name(Oid class_id); -extern AttrNumber get_object_attnum_namespace(Oid class_id); -extern bool get_object_namensp_unique(Oid class_id); - -extern HeapTuple get_catalog_object_by_oid(Relation catalog, - Oid objectId); - -extern char *getObjectTypeDescription(const ObjectAddress *object); -extern char *getObjectIdentity(const ObjectAddress *address); -extern char *getObjectIdentityParts(const ObjectAddress *address, - List **objname, List **objargs); -extern ArrayType *strlist_to_textarray(List *list); - - -extern char *format_procedure_qualified(Oid procedure_oid); - -extern void format_procedure_parts(Oid operator_oid, List **objnames, - List **objargs); -extern char *get_namespace_name_or_temp(Oid nspid); - -extern char *get_am_name(Oid amOid); - -extern char *format_type_be_qualified(Oid type_oid); - -extern void format_operator_parts(Oid operator_oid, List **objnames, - List **objargs); - -extern char *format_operator_qualified(Oid operator_oid); - -#endif /* tsdb_OBJECTADDRESS_H */ diff --git a/contrib/timescaledb/src/tsdb_head.h b/contrib/timescaledb/src/tsdb_head.h index 80b80c164..24990a01a 100644 --- a/contrib/timescaledb/src/tsdb_head.h +++ b/contrib/timescaledb/src/tsdb_head.h @@ -24,7 +24,7 @@ #include "parser/parse_coerce.h" #include "access/reloptions.h" #include "tsdb_shm.h" -#include "tsdb_event_trigger.h" +#include "event_trigger.h" static const struct { @@ -64,6 +64,31 @@ static const struct #define REINDEX_REL_FORCE_INDEXES_UNLOGGED 0x08 #define REINDEX_REL_FORCE_INDEXES_PERMANENT 0x10 +#if defined _WIN32 || defined __CYGWIN__ + #ifdef BUILDING_DLL + #ifdef __GNUC__ + #define DLL_PUBLIC __attribute__ ((dllexport)) + #else + #define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax. + #endif + #else + #ifdef __GNUC__ + #define DLL_PUBLIC __attribute__ ((dllimport)) + #else + #define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax. + #endif + #endif + #define DLL_LOCAL +#else + #if __GNUC__ >= 4 + #define DLL_PUBLIC __attribute__ ((visibility ("default"))) + #define DLL_LOCAL __attribute__ ((visibility ("hidden"))) + #else + #define DLL_PUBLIC + #define DLL_LOCAL + #endif +#endif + extern bool row_security; @@ -2383,4 +2408,6 @@ typedef struct BackgroundWorker_TS { #define BackgroundWorker BackgroundWorker_TS extern PGDLLIMPORT BackgroundWorker *MyBgworkerEntry; + + #endif \ No newline at end of file diff --git a/contrib/timescaledb/src/tsdb_static.cpp b/contrib/timescaledb/src/tsdb_static.cpp index fc2a2692e..0a70090f4 100644 --- a/contrib/timescaledb/src/tsdb_static.cpp +++ b/contrib/timescaledb/src/tsdb_static.cpp @@ -52,7 +52,7 @@ #include "storage/procarray.h" -#include "tsdb_event_trigger.h" +#include "event_trigger.h" #include "tsdb_static2.cpp" @@ -554,185 +554,9 @@ ResourceArrayEnlarge(ResourceArray *resarr) Assert(resarr->nitems < resarr->maxitems); } -static int -DecodeTextArrayToCString(Datum array, char ***cstringp) -{ - ArrayType *arr = DatumGetArrayTypeP(array); - Datum *elems; - char **cstring; - int i; - int nelems; - - if (ARR_NDIM(arr) != 1 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != TEXTOID) - elog(ERROR, "expected 1-D text array"); - deconstruct_array(arr, TEXTOID, -1, false, 'i', &elems, NULL, &nelems); - - cstring =(char **) palloc(nelems * sizeof(char *)); - for (i = 0; i < nelems; ++i) - cstring[i] = TextDatumGetCString(elems[i]); - - pfree(elems); - *cstringp = cstring; - return nelems; -} - -static void -InvalidateEventCacheCallback(Datum arg, int cacheid, uint32 hashvalue) -{ - /* - * If the cache isn't valid, then there might be a rebuild in progress, so - * we can't immediately blow it away. But it's advantageous to do this - * when possible, so as to immediately free memory. - */ - if (EventTriggerCacheState == ETCS_VALID) - { - MemoryContextResetAndDeleteChildren(EventTriggerCacheContext); - EventTriggerCache = NULL; - } - - /* Mark cache for rebuild. */ - EventTriggerCacheState = ETCS_NEEDS_REBUILD; -} - -static void -BuildEventTriggerCache(void) -{ - HASHCTL ctl; - HTAB *cache; - MemoryContext oldcontext; - Relation rel; - Relation irel; - SysScanDesc scan; - - if (EventTriggerCacheContext != NULL) - { - /* - * Free up any memory already allocated in EventTriggerCacheContext. - * This can happen either because a previous rebuild failed, or - * because an invalidation happened before the rebuild was complete. - */ - MemoryContextResetAndDeleteChildren(EventTriggerCacheContext); - } - else - { - /* - * This is our first time attempting to build the cache, so we need to - * set up the memory context and register a syscache callback to - * capture future invalidation events. - */ - if (u_sess->cache_mem_cxt == NULL) - CreateCacheMemoryContext(); - EventTriggerCacheContext = - AllocSetContextCreate(u_sess->cache_mem_cxt, - "EventTriggerCache", - ALLOCSET_DEFAULT_SIZES); - CacheRegisterThreadSyscacheCallback(EVENTTRIGGEROID, - InvalidateEventCacheCallback, - (Datum) 0); - } - /* Switch to correct memory context. */ - oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext); - /* Prevent the memory context from being nuked while we're rebuilding. */ - EventTriggerCacheState = ETCS_REBUILD_STARTED; - /* Create new hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(EventTriggerEvent); - ctl.entrysize = sizeof(EventTriggerCacheEntry); - ctl.hcxt = EventTriggerCacheContext; - cache = hash_create("Event Trigger Cache", 32, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - - /* - * Prepare to scan pg_event_trigger in name order. - */ - rel = relation_open(EventTriggerRelationId, AccessShareLock); - irel = index_open(EventTriggerNameIndexId, AccessShareLock); - scan = systable_beginscan_ordered(rel, irel, NULL, 0, NULL); - - /* - * Build a cache item for each pg_event_trigger tuple, and append each one - * to the appropriate cache entry. - */ - for (;;) - { - HeapTuple tup; - Form_pg_event_trigger form; - char *evtevent; - EventTriggerEvent event; - EventTriggerCacheItem *item; - Datum evttags; - bool evttags_isnull; - EventTriggerCacheEntry *entry; - bool found; - - /* Get next tuple. */ - tup = systable_getnext_ordered(scan, ForwardScanDirection); - if (!HeapTupleIsValid(tup)) - break; - - /* Skip trigger if disabled. */ - form = (Form_pg_event_trigger) GETSTRUCT(tup); - if (form->evtenabled == TRIGGER_DISABLED) - continue; - - /* Decode event name. */ - evtevent = NameStr(form->evtevent); - if (strcmp(evtevent, "ddl_command_start") == 0) - event = EVT_DDLCommandStart; - else if (strcmp(evtevent, "ddl_command_end") == 0) - event = EVT_DDLCommandEnd; - else if (strcmp(evtevent, "sql_drop") == 0) - event = EVT_SQLDrop; - else if (strcmp(evtevent, "table_rewrite") == 0) - event = EVT_TableRewrite; - else - continue; - - /* Allocate new cache item. */ - item =(EventTriggerCacheItem*) palloc0(sizeof(EventTriggerCacheItem)); - item->fnoid = form->evtfoid; - item->enabled = form->evtenabled; - - /* Decode and sort tags array. */ - evttags = heap_getattr_tsdb(tup, Anum_pg_event_trigger_evttags, - RelationGetDescr(rel), &evttags_isnull); - if (!evttags_isnull) - { - item->ntags = DecodeTextArrayToCString(evttags, &item->tag); - qsort(item->tag, item->ntags, sizeof(char *), pg_qsort_strcmp); - } - - /* Add to cache entry. */ - entry =(EventTriggerCacheEntry *) hash_search(cache, &event, HASH_ENTER, &found); - if (found) - entry->triggerlist = lappend(entry->triggerlist, item); - else - entry->triggerlist = list_make1(item); - } - - /* Done with pg_event_trigger scan. */ - systable_endscan_ordered(scan); - index_close(irel, AccessShareLock); - relation_close(rel, AccessShareLock); - - /* Restore previous memory context. */ - MemoryContextSwitchTo(oldcontext); - - /* Install new cache. */ - EventTriggerCache = cache; - - /* - * If the cache has been invalidated since we entered this routine, we - * still use and return the cache we just finished constructing, to avoid - * infinite loops, but we leave the cache marked stale so that we'll - * rebuild it again on next access. Otherwise, we mark the cache valid. - */ - if (EventTriggerCacheState == ETCS_REBUILD_STARTED) - EventTriggerCacheState = ETCS_VALID; -} static bool index_recheck_constraint( Relation index, Oid* constr_procs, Datum* existing_values, const bool* existing_isnull, Datum* new_values) diff --git a/contrib/timescaledb/src/utils.cpp b/contrib/timescaledb/src/utils.cpp index b612695a6..ec4578e4e 100644 --- a/contrib/timescaledb/src/utils.cpp +++ b/contrib/timescaledb/src/utils.cpp @@ -517,11 +517,19 @@ ts_lookup_proc_filtered(const char *schema, const char *funcname, Oid *rettype, * that would not allow us to check for functions that take either * ANYELEMENTOID or a dimension-specific in the same search. */ - catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); + #ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum < 92470) { + catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); + } else { + catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(funcname)); + } + #else + catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); + #endif for (i = 0; i < catlist->n_members; i++) { - HeapTuple proctup = &catlist->systups[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); if (procform->pronamespace == namespace_oid && diff --git a/contrib/timescaledb/test/src/CMakeLists.txt b/contrib/timescaledb/test/src/CMakeLists.txt index a4937d266..4fdab18a1 100644 --- a/contrib/timescaledb/test/src/CMakeLists.txt +++ b/contrib/timescaledb/test/src/CMakeLists.txt @@ -27,3 +27,10 @@ add_subdirectory(net) add_subdirectory(telemetry) add_subdirectory(loader) + +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/bgw/CMakeLists.txt b/contrib/timescaledb/test/src/bgw/CMakeLists.txt index e82c3bd99..82d0c743d 100644 --- a/contrib/timescaledb/test/src/bgw/CMakeLists.txt +++ b/contrib/timescaledb/test/src/bgw/CMakeLists.txt @@ -7,9 +7,11 @@ set(SOURCES target_sources(${TESTS_LIB_NAME} PRIVATE ${SOURCES}) + + set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") -include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR}) -#include_directories(${KERBEROS_INCLUDE_PATH} ${LIBOBS_INCLUDE_PATH} ${LIBCURL_INCLUDE_PATH} ${LIBOPENSSL_INCLUDE_PATH}) -#INCLUDE_DIRECTORIES(${CJSON_INCLUDE_PATH} ${BOOST_INCLUDE_PATH}) \ No newline at end of file +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/bgw/log.cpp b/contrib/timescaledb/test/src/bgw/log.cpp index c07225b0e..e7306477e 100644 --- a/contrib/timescaledb/test/src/bgw/log.cpp +++ b/contrib/timescaledb/test/src/bgw/log.cpp @@ -19,7 +19,7 @@ #include "compat.h" static char *bgw_application_name = "unset"; - +emit_log_hook_type emit_log_hook = NULL; void ts_bgw_log_set_application_name(char *name) { diff --git a/contrib/timescaledb/test/src/bgw/params.cpp b/contrib/timescaledb/test/src/bgw/params.cpp index 6d25595e0..d3d0f8e95 100644 --- a/contrib/timescaledb/test/src/bgw/params.cpp +++ b/contrib/timescaledb/test/src/bgw/params.cpp @@ -50,7 +50,7 @@ params_register_dsm_handle(dsm_handle handle) rel = table_open(get_dsm_handle_table_oid(), RowExclusiveLock); scan = table_beginscan(rel, SnapshotSelf, 0, NULL); - tuple = heap_copytuple(heap_getnext(scan, ForwardScanDirection)); + tuple = heap_copytuple(heap_getnext((TableScanDescData *)scan, ForwardScanDirection)); fd = (FormData_bgw_dsm_handle *) GETSTRUCT(tuple); fd->handle = handle; ts_catalog_update(rel, tuple); @@ -70,7 +70,7 @@ params_load_dsm_handle() rel = table_open(get_dsm_handle_table_oid(), RowExclusiveLock); scan = table_beginscan(rel, SnapshotSelf, 0, NULL); - tuple = heap_getnext(scan, ForwardScanDirection); + tuple = heap_getnext((TableScanDescData *)scan, ForwardScanDirection); Assert(tuple != NULL); tuple = heap_copytuple(tuple); fd = (FormData_bgw_dsm_handle *) GETSTRUCT(tuple); @@ -121,7 +121,7 @@ params_open_wrapper() Assert(seg != NULL); - wrapper = dsm_segment_address(seg); + wrapper =(TestParamsWrapper *) dsm_segment_address(seg); Assert(wrapper != NULL); @@ -235,7 +235,7 @@ TS_FUNCTION_INFO_V1(ts_bgw_params_mock_wait_returns_immediately); Datum ts_bgw_params_mock_wait_returns_immediately(PG_FUNCTION_ARGS) { - params_set_mock_wait_type(PG_GETARG_INT32(0)); + params_set_mock_wait_type((MockWaitType)PG_GETARG_INT32(0)); PG_RETURN_VOID(); } @@ -249,11 +249,12 @@ ts_bgw_params_create(PG_FUNCTION_ARGS) Assert(seg != NULL); - params = dsm_segment_address(seg); + params = (TestParamsWrapper *)dsm_segment_address(seg); *params = (TestParamsWrapper) { .params = { + .timer_latch = {}, .current_time = 0, }, }; diff --git a/contrib/timescaledb/test/src/bgw/test_job_refresh.cpp b/contrib/timescaledb/test/src/bgw/test_job_refresh.cpp index 1e4ce39cc..257d5137d 100644 --- a/contrib/timescaledb/test/src/bgw/test_job_refresh.cpp +++ b/contrib/timescaledb/test/src/bgw/test_job_refresh.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +67,7 @@ ts_test_job_refresh(PG_FUNCTION_ARGS) Datum *values =(Datum *) palloc(sizeof(*values) * funcctx->tuple_desc->natts); bool *nulls =(bool *) palloc(sizeof(*nulls) * funcctx->tuple_desc->natts); - ts_populate_scheduled_job_tuple(lfirst(lc), values); + ts_populate_scheduled_job_tuple((ScheduledBgwJob*)lfirst(lc), values); memset(nulls, 0, sizeof(*nulls) * funcctx->tuple_desc->natts); tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); diff --git a/contrib/timescaledb/test/src/loader/CMakeLists.txt b/contrib/timescaledb/test/src/loader/CMakeLists.txt index 06fd7d50f..66cf71745 100644 --- a/contrib/timescaledb/test/src/loader/CMakeLists.txt +++ b/contrib/timescaledb/test/src/loader/CMakeLists.txt @@ -34,3 +34,9 @@ foreach(MOCK_VERSION mock-1 mock-2 mock-3 mock-4 mock-broken mock-5 mock-6) TARGETS ${PROJECT_NAME}-${MOCK_VERSION} DESTINATION ${PG_PKGLIBDIR} OPTIONAL) endforeach(MOCK_VERSION) +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/loader/init.cpp b/contrib/timescaledb/test/src/loader/init.cpp index 7bf178857..087e6929a 100644 --- a/contrib/timescaledb/test/src/loader/init.cpp +++ b/contrib/timescaledb/test/src/loader/init.cpp @@ -7,9 +7,7 @@ #include #include #include -#ifndef WIN32 -#include "parallel.h" -#endif + #include #include #include @@ -54,7 +52,7 @@ post_analyze_hook(ParseState *pstate, Query *query) * not perform this check */ #ifndef WIN32 - if (prev_post_parse_analyze_hook != NULL && !IsParallelWorker()) + if (prev_post_parse_analyze_hook != NULL) elog(ERROR, "the extension called with a loader should always have a NULL prev hook"); #endif if (BROKEN && !creating_extension) @@ -77,7 +75,7 @@ _PG_init(void) * not perform this check */ #ifndef WIN32 - if (prev_post_parse_analyze_hook != NULL && !IsParallelWorker()) + if (prev_post_parse_analyze_hook != NULL) elog(ERROR, "the extension called with a loader should always have a NULL prev hook"); #endif post_parse_analyze_hook = post_analyze_hook; diff --git a/contrib/timescaledb/test/src/net/CMakeLists.txt b/contrib/timescaledb/test/src/net/CMakeLists.txt index 9d3fe37e5..d36b4cd45 100644 --- a/contrib/timescaledb/test/src/net/CMakeLists.txt +++ b/contrib/timescaledb/test/src/net/CMakeLists.txt @@ -6,3 +6,9 @@ set(SOURCES target_sources(${TESTS_LIB_NAME} PRIVATE ${SOURCES}) target_include_directories(${TESTS_LIB_NAME} PRIVATE ${PROJECT_SOURCE_DIR}/src/net) +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/telemetry/CMakeLists.txt b/contrib/timescaledb/test/src/telemetry/CMakeLists.txt index e6e76286f..596f9de70 100644 --- a/contrib/timescaledb/test/src/telemetry/CMakeLists.txt +++ b/contrib/timescaledb/test/src/telemetry/CMakeLists.txt @@ -5,3 +5,9 @@ set(SOURCES ) target_sources(${TESTS_LIB_NAME} PRIVATE ${SOURCES}) +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/CMakeLists.txt b/contrib/timescaledb/tsl/CMakeLists.txt index 2022cefc7..c413f45fd 100644 --- a/contrib/timescaledb/tsl/CMakeLists.txt +++ b/contrib/timescaledb/tsl/CMakeLists.txt @@ -1,2 +1,8 @@ add_subdirectory(test) add_subdirectory(src) +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/src/CMakeLists.txt b/contrib/timescaledb/tsl/src/CMakeLists.txt index 1bcfd84e5..d55a2e842 100644 --- a/contrib/timescaledb/tsl/src/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/CMakeLists.txt @@ -66,9 +66,7 @@ ${PROJECT_SOURCE_DIR}/src/time_bucket.cpp ${PROJECT_SOURCE_DIR}/src/tsdb.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_dsm.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_event_trigger.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_extension.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_get_obj.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_head.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_shm.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_static.cpp diff --git a/contrib/timescaledb/tsl/src/compression/create.cpp b/contrib/timescaledb/tsl/src/compression/create.cpp index 260642bc0..7b118700d 100644 --- a/contrib/timescaledb/tsl/src/compression/create.cpp +++ b/contrib/timescaledb/tsl/src/compression/create.cpp @@ -396,7 +396,7 @@ create_compressed_table_indexes(Oid compresstable_relid, CompressColInfo *compre IndexStmt stmt = { .type = T_IndexStmt, .missing_ok = false, - .schemaname = {}, + .schemaname = "public", .idxname = NULL, .relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0), .accessMethod = DEFAULT_INDEX_TYPE, diff --git a/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp b/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp index 16fb838bc..a0206bb46 100644 --- a/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp +++ b/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp @@ -395,7 +395,7 @@ mattablecolumninfo_add_mattable_index(MatTableColumnInfo *matcolinfo, Hypertable IndexStmt stmt = { .type = T_IndexStmt, .missing_ok = false, - .schemaname = NULL, + .schemaname = "public", .idxname = NULL, .relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0), .accessMethod = DEFAULT_INDEX_TYPE, diff --git a/contrib/timescaledb/tsl/src/tsdb_tsl.cpp b/contrib/timescaledb/tsl/src/tsdb_tsl.cpp index b8b11043b..36194ba86 100644 --- a/contrib/timescaledb/tsl/src/tsdb_tsl.cpp +++ b/contrib/timescaledb/tsl/src/tsdb_tsl.cpp @@ -108,8 +108,8 @@ static void inline do_load() */ PG_TRY(); { - PGFunction ts_post_load_init = - load_external_function(soname, POST_LOAD_INIT_FN, false, NULL); + CFunInfo temp_for_tsdb = load_external_function(soname, POST_LOAD_INIT_FN, false, NULL); + PGFunction ts_post_load_init = temp_for_tsdb.user_fn; if (ts_post_load_init != NULL) DirectFunctionCall1(ts_post_load_init, CharGetDatum(0)); diff --git a/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt b/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt index a5e1be39b..052585ccb 100644 --- a/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt +++ b/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt @@ -1,3 +1,9 @@ #dummy file to ensure isolation output is created correctly add_subdirectory(specs) +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/test/src/CMakeLists.txt b/contrib/timescaledb/tsl/test/src/CMakeLists.txt index 846e20a10..e221d0adf 100644 --- a/contrib/timescaledb/tsl/test/src/CMakeLists.txt +++ b/contrib/timescaledb/tsl/test/src/CMakeLists.txt @@ -15,3 +15,9 @@ add_library(${TSL_TESTS_LIB_NAME} OBJECT ${SOURCES}) set_target_properties(${TSL_TESTS_LIB_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) target_compile_definitions(${TSL_TESTS_LIB_NAME} PUBLIC TS_SUBMODULE) +set(PROJECT_TRUNK_DIR ${CMAKE_SOURCE_DIR}/../..) +set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") +set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") +set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) +add_definitions(-DPGXC) +include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/test/src/test_auto_policy.cpp b/contrib/timescaledb/tsl/test/src/test_auto_policy.cpp index fe5e88bf1..e253bd39d 100644 --- a/contrib/timescaledb/tsl/test/src/test_auto_policy.cpp +++ b/contrib/timescaledb/tsl/test/src/test_auto_policy.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "bgw_policy/job.h" #include "bgw/job_stat.h" diff --git a/contrib/timescaledb/tsl/test/src/test_chunk_stats.cpp b/contrib/timescaledb/tsl/test/src/test_chunk_stats.cpp index 01161bcfd..5007597a5 100644 --- a/contrib/timescaledb/tsl/test/src/test_chunk_stats.cpp +++ b/contrib/timescaledb/tsl/test/src/test_chunk_stats.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include "bgw/job.h" #include "export.h" diff --git a/contrib/timescaledb/tsl/test/src/test_compression.cpp b/contrib/timescaledb/tsl/test/src/test_compression.cpp index 9754b46cb..a15685689 100644 --- a/contrib/timescaledb/tsl/test/src/test_compression.cpp +++ b/contrib/timescaledb/tsl/test/src/test_compression.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include @@ -93,7 +93,7 @@ test_int_array() for (i = 0; i < 1015; i++) array_compressor_append(compressor, Int32GetDatum(i)); - compressed = array_compressor_finish(compressor); + compressed =(ArrayCompressed *) array_compressor_finish(compressor); Assert(compressed != NULL); i = 0; @@ -135,7 +135,7 @@ test_string_array() for (i = 0; i < 1015; i++) array_compressor_append(compressor, PointerGetDatum(texts[i % 5])); - compressed = array_compressor_finish(compressor); + compressed = (ArrayCompressed *) array_compressor_finish(compressor); Assert(compressed != NULL); i = 0; @@ -184,7 +184,7 @@ test_int_dictionary() for (i = 0; i < 1015; i++) dictionary_compressor_append(compressor, Int32GetDatum(i % 15)); - compressed = dictionary_compressor_finish(compressor); + compressed =(DictionaryCompressed* ) dictionary_compressor_finish(compressor); Assert(compressed != NULL); i = 0; @@ -215,7 +215,7 @@ test_string_dictionary() for (i = 0; i < 1014; i++) dictionary_compressor_append(compressor, PointerGetDatum(texts[i % 5])); - compressed = dictionary_compressor_finish(compressor); + compressed = (DictionaryCompressed *) dictionary_compressor_finish(compressor); Assert(compressed != NULL); i = 0; @@ -266,7 +266,7 @@ test_gorilla_int() for (i = 0; i < 1015; i++) gorilla_compressor_append_value(compressor, i); - compressed = gorilla_compressor_finish(compressor); + compressed =(GorillaCompressed*) gorilla_compressor_finish(compressor); Assert(compressed != NULL); AssertInt64Eq(VARSIZE(compressed), 1344); @@ -332,7 +332,7 @@ test_gorilla_float() for (i = 0.0; i < 1015.0; i++) gorilla_compressor_append_value(compressor, float_get_bits(i)); - compressed = gorilla_compressor_finish(compressor); + compressed = (GorillaCompressed*)gorilla_compressor_finish(compressor); Assert(compressed != NULL); AssertInt64Eq(VARSIZE(compressed), 1200); @@ -370,7 +370,7 @@ test_gorilla_double() for (i = 0.0; i < 1015.0; i++) gorilla_compressor_append_value(compressor, double_get_bits(i)); - compressed = gorilla_compressor_finish(compressor); + compressed = (GorillaCompressed*)gorilla_compressor_finish(compressor); Assert(compressed != NULL); AssertInt64Eq(VARSIZE(compressed), 1200); @@ -510,7 +510,7 @@ compression_info_from_array(ArrayType *compression_info_arr, Oid form_oid) tmptup.t_len = HeapTupleHeaderGetDatumLength(form); tmptup.t_data = form; - compression_info_vec_append(compression_info, (void *) GETSTRUCT(&tmptup)); + compression_info_vec_append(compression_info, (Form_hypertable_compression) GETSTRUCT(&tmptup)); } if (form_desc != NULL) ReleaseTupleDesc(form_desc); @@ -612,7 +612,7 @@ Datum ts_compression_custom_type_in(PG_FUNCTION_ARGS) { char *num = PG_GETARG_CSTRING(0); - int16 *val = palloc(sizeof(*val)); + int16 *val =(int16 *) palloc(sizeof(*val)); *val = pg_atoi(num, sizeof(int16), '\0'); PG_RETURN_POINTER(val); diff --git a/contrib/timescaledb/tsl/test/src/test_ddl_hook.cpp b/contrib/timescaledb/tsl/test/src/test_ddl_hook.cpp index c5eb32455..d61c5fe7a 100644 --- a/contrib/timescaledb/tsl/test/src/test_ddl_hook.cpp +++ b/contrib/timescaledb/tsl/test/src/test_ddl_hook.cpp @@ -121,7 +121,7 @@ test_sql_drop(List *dropped_objects) { ListCell *lc; int num_objects = list_length(dropped_objects); - EventTriggerDropObject **objects = palloc(num_objects * sizeof(EventTriggerDropObject *)); + EventTriggerDropObject **objects = (EventTriggerDropObject **)palloc(num_objects * sizeof(EventTriggerDropObject *)); int i = 0; /* Sort the list of dropped objects for predictible order in tests across @@ -129,7 +129,7 @@ test_sql_drop(List *dropped_objects) * but it is not available in earlier PostgreSQL versions so we're doing * our own sorting. */ foreach (lc, dropped_objects) - objects[i++] = lfirst(lc); + objects[i++] = (EventTriggerDropObject *)lfirst(lc); qsort(objects, num_objects, sizeof(EventTriggerDropObject *), event_trigger_event_cmp); -- Gitee From c8108eccc2032a5d332d539d985d2853db86bf00 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Thu, 26 Oct 2023 17:30:15 +0800 Subject: [PATCH 032/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91fix?= =?UTF-8?q?=20up=20issue=20I7A4TE:=20https://gitee.com/opengauss/Plugin/is?= =?UTF-8?q?sues/I7A4TE=3Ffrom=3Dproject-issue=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E5=BC=82=E5=B8=B8=E7=9A=84=E5=A4=84=E7=90=86?= =?UTF-8?q?=E5=92=8Cmysql=E4=B8=80=E8=87=B4=20=E3=80=90=E6=A0=B9=E5=9B=A0?= =?UTF-8?q?=E5=88=86=E6=9E=90=E3=80=91:=20=E5=AE=9E=E7=8E=B0=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E5=BC=82=E5=B8=B8=E7=9A=84=E5=A4=84=E7=90=86=E5=92=8C?= =?UTF-8?q?mysql=E4=B8=80=E8=87=B4=E3=80=82=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E6=96=B9=E6=A1=88=E3=80=91:=20insert=E5=BC=82=E5=B8=B8?= =?UTF-8?q?=EF=BC=9A=E7=94=A80=E6=88=96=E8=80=85=E6=8A=9B=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=EF=BC=8Cselect=E5=BC=82=E5=B8=B8=EF=BC=9A=E6=98=BE?= =?UTF-8?q?=E7=A4=BANULL=E6=88=96=E8=80=85=E6=B3=A1=E5=BC=82=E5=B8=B8=20?= =?UTF-8?q?=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue?= =?UTF-8?q?=E3=80=91:=20I7A4TE=20=E3=80=90=E5=BC=80=E5=8F=91=E8=87=AA?= =?UTF-8?q?=E9=AA=8C=E6=8A=A5=E5=91=8A=E3=80=91:=20=E8=AF=B7=E9=99=84?= =?UTF-8?q?=E4=B8=8A=E8=87=AA=E9=AA=8C=E7=BB=93=E6=9E=9C(=E5=86=85?= =?UTF-8?q?=E5=AE=B9=E6=88=96=E8=80=85=E6=88=AA=E5=9B=BE)=20=E6=98=AF?= =?UTF-8?q?=E5=90=A6=E5=8F=AF=E4=BB=A5=E6=B7=BB=E5=8A=A0fastcheck=E6=B5=8B?= =?UTF-8?q?=E8=AF=95=E7=94=A8=E4=BE=8B=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C?= =?UTF-8?q?=E8=AF=B7=E8=A1=A5=E5=85=85fastcheck=E7=94=A8=E4=BE=8B=20->=20?= =?UTF-8?q?=E6=98=AF=20=E6=98=AF=E5=90=A6=E6=B6=89=E5=8F=8A=E8=B5=84?= =?UTF-8?q?=E6=96=99=E4=BF=AE=E6=94=B9=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C?= =?UTF-8?q?=E5=9C=A8docs=E4=BB=93=E5=BA=93=E8=A1=A5=E5=85=85=E8=B5=84?= =?UTF-8?q?=E6=96=99=20=20=20=20->=20=E6=97=A0=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E8=80=83=E8=99=91=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(?= =?UTF-8?q?=E7=B3=BB=E7=BB=9F=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5?= =?UTF-8?q?=E5=BF=97=E6=8C=81=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=E6=89=A7=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F)=20=20->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF?= =?UTF-8?q?=E5=90=A6=E8=80=83=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9?= =?UTF-8?q?=E7=AD=89=E6=89=A9=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20?= =?UTF-8?q?=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=BC=82=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6?= =?UTF-8?q?=E5=8F=91=E5=9C=BA=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC?= =?UTF-8?q?=E5=AE=B9/=E6=80=A7=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20?= =?UTF-8?q?=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9?= =?UTF-8?q?=E5=85=B6=E4=BB=96=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1?= =?UTF-8?q?=E5=93=8D=20=20=20->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90?= =?UTF-8?q?=E5=85=B6=E4=BB=96=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 34 +- .../b_compatibility_time_funcs.out | 4 - .../b_compatibility_time_funcs3.out | 409 +++++++++++++++++- .../include/plugin_commands/mysqlmode.h | 5 + contrib/dolphin/include/plugin_utils/date.h | 23 + .../dolphin/include/plugin_utils/datetime.h | 2 + contrib/dolphin/plugin_parser/gram.y | 15 +- .../dolphin/plugin_parser/parse_coerce.cpp | 19 +- .../dolphin/plugin_parser/parse_target.cpp | 7 + contrib/dolphin/plugin_utils/adt/date.cpp | 168 ++++++- contrib/dolphin/plugin_utils/adt/datetime.cpp | 25 ++ .../rollback_script/dolphin--3.0--2.0.sql | 5 + .../b_compatibility_time_funcs3.sql | 96 ++++ .../upgrade_script/dolphin--2.0--3.0.sql | 12 + 14 files changed, 758 insertions(+), 66 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index 428cc624e..797c8b676 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -362,38 +362,18 @@ SELECT time'12:12:12.123456'; SELECT time'34 22:59:59.999999'; ERROR: Incorrect time value -LINE 1: SELECT time'34 22:59:59.999999'; - ^ CONTEXT: referenced column: time SELECT time'12:60:12.123456'; -WARNING: date/time field value out of range: "12:60:12.123456" -LINE 1: SELECT time'12:60:12.123456'; - ^ +ERROR: date/time field value out of range: "12:60:12.123456" CONTEXT: referenced column: time - time ------------------ - 13:00:12.123456 -(1 row) - SELECT time'12:12:60.123456'; -WARNING: date/time field value out of range: "12:12:60.123456" -LINE 1: SELECT time'12:12:60.123456'; - ^ +ERROR: date/time field value out of range: "12:12:60.123456" CONTEXT: referenced column: time - time ------------------ - 12:13:00.123456 -(1 row) - SELECT time'34 23:00:00'; ERROR: Incorrect time value -LINE 1: SELECT time'34 23:00:00'; - ^ CONTEXT: referenced column: time SELECT time'-34 23:00:00'; ERROR: Incorrect time value -LINE 1: SELECT time'-34 23:00:00'; - ^ CONTEXT: referenced column: time SELECT time'34 22:59:59'; time @@ -409,23 +389,15 @@ SELECT time'-34 22:59:59'; SELECT time'34 22:59:59.999999'; ERROR: Incorrect time value -LINE 1: SELECT time'34 22:59:59.999999'; - ^ CONTEXT: referenced column: time SELECT time'34 22:59:59.9999999999999999999999'; ERROR: Incorrect time value -LINE 1: SELECT time'34 22:59:59.9999999999999999999999'; - ^ CONTEXT: referenced column: time SELECT time'-34 22:59:59.9999999999999999999999'; ERROR: Incorrect time value -LINE 1: SELECT time'-34 22:59:59.9999999999999999999999'; - ^ CONTEXT: referenced column: time SELECT time'-34 22:59:59.9999999999999999999999'; ERROR: Incorrect time value -LINE 1: SELECT time'-34 22:59:59.9999999999999999999999'; - ^ CONTEXT: referenced column: time SELECT time'838:59:59'; time @@ -435,8 +407,6 @@ SELECT time'838:59:59'; SELECT time'839:00:00'; ERROR: Incorrect time value -LINE 1: SELECT time'839:00:00'; - ^ CONTEXT: referenced column: time SELECT time'59:59'; time diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index b5805e741..32546cee4 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -481,13 +481,9 @@ select * from func_test; SELECT SUBDATE(time'839:59:59', interval 2 hour); ERROR: Incorrect time value -LINE 1: SELECT SUBDATE(time'839:59:59', interval 2 hour); - ^ CONTEXT: referenced column: subdate SELECT SUBDATE(time'-838:59:59.9', interval 2 hour); ERROR: Incorrect time value -LINE 1: SELECT SUBDATE(time'-838:59:59.9', interval 2 hour); - ^ CONTEXT: referenced column: subdate SELECT SUBDATE('839:59:59', interval 2 hour); WARNING: date/time field value out of range: "839:59:59" diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index f090667bd..d02aee709 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -399,6 +399,413 @@ CONTEXT: referenced column: to_seconds (1 row) +-- è¿›ä½å’Œéžæ³•值 +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +create table test1(a time); +insert into test1 values('23:65:66'); +WARNING: date/time field value out of range: "23:65:66" +LINE 1: insert into test1 values('23:65:66'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +---------- + 00:00:00 +(1 row) + +truncate test1; +insert into test1 values('23:65'); +WARNING: date/time field value out of range: "23:65" +LINE 1: insert into test1 values('23:65'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +---------- + 00:00:00 +(1 row) + +truncate test1; +insert into test1 values('23-65'); +WARNING: time zone displacement out of range: "23-65" +LINE 1: insert into test1 values('23-65'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +---------- + 00:00:23 +(1 row) + +truncate test1; +insert into test1 values('23:59:59'); +select * from test1; + a +---------- + 23:59:59 +(1 row) + +truncate test1; +insert into test1 values('23:59:59.8888'); +select * from test1; + a +---------- + 24:00:00 +(1 row) + +truncate test1; +insert into test1 values('23:65:66.8888'); +WARNING: date/time field value out of range: "23:65:66.8888" +LINE 1: insert into test1 values('23:65:66.8888'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +---------- + 00:00:00 +(1 row) + +truncate test1; +insert into test1 values('0:0:0'); +select * from test1; + a +---------- + 00:00:00 +(1 row) + +truncate test1; +insert into test1 values('-1:-:-1:1'); +WARNING: invalid input syntax for type time: "-1:-:-1:1" +LINE 1: insert into test1 values('-1:-:-1:1'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +----------- + -00:00:01 +(1 row) + +truncate test1; +insert into test1 values('23:55:56.1234'); +select * from test1; + a +---------- + 23:55:56 +(1 row) + +truncate test1; +select time'23:65:66'; +ERROR: date/time field value out of range: "23:65:66" +CONTEXT: referenced column: time +select time'23:65'; +ERROR: date/time field value out of range: "23:65" +CONTEXT: referenced column: time +select time'23-65'; +ERROR: time zone displacement out of range: "23-65" +CONTEXT: referenced column: time +select time'23:59:59.8888'; + time +--------------- + 23:59:59.8888 +(1 row) + +select time'23:65:66.8888'; +ERROR: date/time field value out of range: "23:65:66.8888" +CONTEXT: referenced column: time +select time'0:0:0'; + time +---------- + 00:00:00 +(1 row) + +select time'-1:-1:-1'; +ERROR: time zone displacement out of range: "-1:-1:-1" +CONTEXT: referenced column: time +select time'23:55:56.1234'; + time +--------------- + 23:55:56.1234 +(1 row) + +select cast('23:65:66' as time); +WARNING: date/time field value out of range: "23:65:66" +LINE 1: select cast('23:65:66' as time); + ^ +CONTEXT: referenced column: time + time +---------- + 24:06:06 +(1 row) + +select cast('23:65'as time); +WARNING: date/time field value out of range: "23:65" +LINE 1: select cast('23:65'as time); + ^ +CONTEXT: referenced column: time + time +---------- + 24:05:00 +(1 row) + +select cast('23-65' as time); +WARNING: time zone displacement out of range: "23-65" +LINE 1: select cast('23-65' as time); + ^ +CONTEXT: referenced column: time + time +---------- + 00:00:23 +(1 row) + +select cast('23:59:59.8888' as time); + time +---------- + 24:00:00 +(1 row) + +select cast('23:65:66.8888' as time); +WARNING: date/time field value out of range: "23:65:66.8888" +LINE 1: select cast('23:65:66.8888' as time); + ^ +CONTEXT: referenced column: time + time +---------- + 24:06:07 +(1 row) + +select cast('0:0:0' as time); + time +---------- + 00:00:00 +(1 row) + +select cast('-1:-1:-1' as time); +WARNING: time zone displacement out of range: "-1:-1:-1" +LINE 1: select cast('-1:-1:-1' as time); + ^ +CONTEXT: referenced column: time + time +----------- + -00:00:01 +(1 row) + +select cast('23:55:56.1234' as time); + time +---------- + 23:55:56 +(1 row) + +select addtime('12aaa43', '12aa43'); +WARNING: Truncated incorrect time value: "12aaa43" +CONTEXT: referenced column: addtime +WARNING: Truncated incorrect time value: "12aa43" +CONTEXT: referenced column: addtime + addtime +---------- + 00:00:24 +(1 row) + +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +insert into test1 values('23:65:66'); +ERROR: date/time field value out of range: "23:65:66" +LINE 1: insert into test1 values('23:65:66'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +--- +(0 rows) + +truncate test1; +insert into test1 values('23:65'); +ERROR: date/time field value out of range: "23:65" +LINE 1: insert into test1 values('23:65'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +--- +(0 rows) + +truncate test1; +insert into test1 values('23-65'); +ERROR: time zone displacement out of range: "23-65" +LINE 1: insert into test1 values('23-65'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +--- +(0 rows) + +truncate test1; +insert into test1 values('23:59:59'); +select * from test1; + a +---------- + 23:59:59 +(1 row) + +truncate test1; +insert into test1 values('23:59:59.8888'); +select * from test1; + a +---------- + 24:00:00 +(1 row) + +truncate test1; +insert into test1 values('23:65:66.8888'); +ERROR: date/time field value out of range: "23:65:66.8888" +LINE 1: insert into test1 values('23:65:66.8888'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +--- +(0 rows) + +truncate test1; +insert into test1 values('0:0:0'); +select * from test1; + a +---------- + 00:00:00 +(1 row) + +truncate test1; +insert into test1 values('-1:-:-1:1'); +ERROR: invalid input syntax for type time: "-1:-:-1:1" +LINE 1: insert into test1 values('-1:-:-1:1'); + ^ +CONTEXT: referenced column: a +select * from test1; + a +--- +(0 rows) + +truncate test1; +insert into test1 values('23:55:56.1234'); +select * from test1; + a +---------- + 23:55:56 +(1 row) + +truncate test1; +select time'23:65:66'; +ERROR: date/time field value out of range: "23:65:66" +CONTEXT: referenced column: time +select time'23:65'; +ERROR: date/time field value out of range: "23:65" +CONTEXT: referenced column: time +select time'23-65'; +ERROR: time zone displacement out of range: "23-65" +CONTEXT: referenced column: time +select time'23:59:59.8888'; + time +--------------- + 23:59:59.8888 +(1 row) + +select time'23:65:66.8888'; +ERROR: date/time field value out of range: "23:65:66.8888" +CONTEXT: referenced column: time +select time'0:0:0'; + time +---------- + 00:00:00 +(1 row) + +select time'-1:-1:-1'; +ERROR: time zone displacement out of range: "-1:-1:-1" +CONTEXT: referenced column: time +select time'23:55:56.1234'; + time +--------------- + 23:55:56.1234 +(1 row) + +select cast('23:65:66' as time); +WARNING: date/time field value out of range: "23:65:66" +LINE 1: select cast('23:65:66' as time); + ^ +CONTEXT: referenced column: time + time +---------- + 24:06:06 +(1 row) + +select cast('23:65'as time); +WARNING: date/time field value out of range: "23:65" +LINE 1: select cast('23:65'as time); + ^ +CONTEXT: referenced column: time + time +---------- + 24:05:00 +(1 row) + +select cast('23-65' as time); +WARNING: time zone displacement out of range: "23-65" +LINE 1: select cast('23-65' as time); + ^ +CONTEXT: referenced column: time + time +---------- + 00:00:23 +(1 row) + +select cast('23:59:59.8888' as time); + time +---------- + 24:00:00 +(1 row) + +select cast('23:65:66.8888' as time); +WARNING: date/time field value out of range: "23:65:66.8888" +LINE 1: select cast('23:65:66.8888' as time); + ^ +CONTEXT: referenced column: time + time +---------- + 24:06:07 +(1 row) + +select cast('0:0:0' as time); + time +---------- + 00:00:00 +(1 row) + +select cast('-1:-1:-1' as time); +WARNING: time zone displacement out of range: "-1:-1:-1" +LINE 1: select cast('-1:-1:-1' as time); + ^ +CONTEXT: referenced column: time + time +----------- + -00:00:01 +(1 row) + +select cast('23:55:56.1234' as time); + time +---------- + 23:55:56 +(1 row) + +select addtime('12aaa43', '12aa43'); +WARNING: Truncated incorrect time value: "12aaa43" +CONTEXT: referenced column: addtime +WARNING: Truncated incorrect time value: "12aa43" +CONTEXT: referenced column: addtime + addtime +---------- + 00:00:24 +(1 row) + drop schema b_time_funcs3 cascade; -NOTICE: drop cascades to table func_test3 +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table func_test3 +drop cascades to table test1 reset current_schema; diff --git a/contrib/dolphin/include/plugin_commands/mysqlmode.h b/contrib/dolphin/include/plugin_commands/mysqlmode.h index 22527cabd..259d6a932 100644 --- a/contrib/dolphin/include/plugin_commands/mysqlmode.h +++ b/contrib/dolphin/include/plugin_commands/mysqlmode.h @@ -22,6 +22,11 @@ #define OPT_SQL_MODE_ERROR_FOR_DIVISION_BY_ZERO (1 << 9) #define OPT_SQL_MODE_MAX 10 #define SQL_MODE_STRICT() ((GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_STRICT) && !CMD_TAG_IS_SELECT()) +#define SQL_MODE_STRICT_ON_SELECT() ((GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_STRICT) && CMD_TAG_IS_SELECT()) +#define SQL_MODE_NOT_STRICT_ON_INSERT() \ + (!(GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_STRICT) && !CMD_TAG_IS_SELECT()) +#define SQL_MODE_NOT_STRICT_ON_SELECT() \ + (!(GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_STRICT) && CMD_TAG_IS_SELECT()) #define SQL_MODE_FULL_GROUP() (GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_FULL_GROUP) #define SQL_MODE_PIPES_AS_CONCAT() (GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_PIPES_AS_CONCAT) #define SQL_MODE_ANSI_QUOTES() (GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_ANSI_QUOTES) diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index 330110f76..bbf37d393 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -42,6 +42,8 @@ #define NANO2MICRO_BASE 1000 #define HALF_NANO2MICRO_BASE 500 #define FRAC_PART_LEN_IN_NUMERICSEC 100000000 +#define TIME_WITH_FORMAT_ARGS_SIZE 4 +#define TIME_MS_TO_S_RADIX 1000 #ifdef DOLPHIN #define TWO_DIGITS_YEAR_DATE_ONE 691231 /* 2069-12-31 */ @@ -86,6 +88,7 @@ extern Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst); extern "C" Datum time_float(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum date_enum(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum timestamp_enum(PG_FUNCTION_ARGS); +extern Datum textout (PG_FUNCTION_ARGS); typedef struct DateTimeFormat { @@ -94,6 +97,26 @@ typedef struct DateTimeFormat const char *datetime_format; const char *time_format; }DateTimeFormat; + +typedef enum +{ + TIME_CORRECT = 0, + TIME_IGNORED_INCORRECT, + TIME_INCORRECT +}TimeErrorType; + +typedef enum +{ + TIME_IN = 0, + TIME_CAST, + TIME_CAST_IMPLICIT, + TEXT_TIME_EXPLICIT +}TimeCastType; + + +extern TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int is_time_sconst, Datum* datum_internal); +char* parser_function_input(Datum txt, Oid oid); + #endif #endif /* DATE_H */ \ No newline at end of file diff --git a/contrib/dolphin/include/plugin_utils/datetime.h b/contrib/dolphin/include/plugin_utils/datetime.h index 009c81e6e..ca83944e8 100644 --- a/contrib/dolphin/include/plugin_utils/datetime.h +++ b/contrib/dolphin/include/plugin_utils/datetime.h @@ -128,6 +128,8 @@ extern void DateTimeParseErrorInternal(int dterr, const char* str, const char* d extern bool datetime_add_nanoseconds_with_round(pg_tm *tm, fsec_t &fsec, int nano); extern bool cstring_to_tm(const char *expr, pg_tm *tm, fsec_t &fsec, int* tzp = NULL, int* invalid_tz = NULL); +extern bool IsResetUnavailableDataTime(int dterr, bool is_support_reset_unavailable_datatime = false); + #define tmfsec2float(tm, fsec) ((tm)->tm_hour * 10000 + (tm)->tm_min * 100 + (tm)->tm_sec + (fsec) / 1000000.0) #define date2int(tm) ((tm)->tm_year * 10000 + (tm)->tm_mon * 100 + (tm)->tm_mday) diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index fbfb8d7ee..3c7683228 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -36982,9 +36982,18 @@ AexprConst_without_Sconst: Iconst } | TIME SCONST { - TypeName * tmp = SystemTypeName("time"); - tmp->location = @1; - $$ = makeStringConstCast($2, @2, tmp); + FuncCall *n = makeNode(FuncCall); + n->funcname = SystemFuncName("time_cast"); + n->colname = pstrdup("time"); + n->args = list_make2(makeStringConst($2, @2), makeBoolAConst(TRUE, -1)); + n->agg_order = NIL; + n->agg_star = FALSE; + n->agg_distinct = FALSE; + n->func_variadic = FALSE; + n->over = NULL; + n->location = @1; + n->call_func = false; + $$ = (Node *)n; } | TIME WITH_TIME ZONE SCONST { diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index 7db6d95d4..b4e729121 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -517,6 +517,7 @@ static bool hasTextCoercePath(Oid* srcoid, Oid destoid, CoercionContext ccontext { if (ccontext == COERCION_EXPLICIT && ((ENABLE_B_CMPT_MODE && destoid == INT8OID) || + (ENABLE_B_CMPT_MODE && destoid == TIMEOID) || destoid == get_typeoid(PG_CATALOG_NAMESPACE, "uint1") || destoid == get_typeoid(PG_CATALOG_NAMESPACE, "uint2") || destoid == get_typeoid(PG_CATALOG_NAMESPACE, "uint4") || @@ -3423,6 +3424,18 @@ bool IsEquivalentEnums(Oid enumOid1, Oid enumOid2) heap_close(enumRel, AccessShareLock); return isEquivalent; } + +void TryFindSpecifiedCastFunction(const Oid sourceTypeId, const Oid targetTypeId, Oid defaultFuncId, Oid* funcId) +{ + if (sourceTypeId == TEXTOID && targetTypeId == TIMEOID) { + *funcId = get_func_oid("text_time_explicit", PG_CATALOG_NAMESPACE, NULL); + } else if (ENABLE_B_CMPT_MODE && targetTypeId == INT8OID) { + *funcId = findSignedExplicitCastFunction(sourceTypeId, defaultFuncId); + } else { + *funcId = findUnsignedExplicitCastFunction(targetTypeId, sourceTypeId, defaultFuncId); + } +} + #endif /* * find_coercion_pathway @@ -3529,11 +3542,7 @@ CoercionPathType find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId, Coerc result = COERCION_PATH_FUNC; #ifdef DOLPHIN if (ccontext == COERCION_EXPLICIT) { - if (ENABLE_B_CMPT_MODE && targetTypeId == INT8OID) { - *funcid = findSignedExplicitCastFunction(sourceTypeId, castForm->castfunc); - } else { - *funcid = findUnsignedExplicitCastFunction(targetTypeId, sourceTypeId, castForm->castfunc); - } + TryFindSpecifiedCastFunction(sourceTypeId, targetTypeId, castForm->castfunc, funcid); } else #endif { diff --git a/contrib/dolphin/plugin_parser/parse_target.cpp b/contrib/dolphin/plugin_parser/parse_target.cpp index fe2f6dcb0..4d097974b 100644 --- a/contrib/dolphin/plugin_parser/parse_target.cpp +++ b/contrib/dolphin/plugin_parser/parse_target.cpp @@ -1646,6 +1646,13 @@ static int FigureColnameInternal(Node* node, char** name) } else { *name = strVal(llast(((FuncCall*)node)->funcname)); } +#ifdef DOLPHIN + /* to make the last displayed column name as the type name instead of the function name. */ + if (strcmp(strVal(llast(((FuncCall*)node)->funcname)), "time_cast") == 0 || + strcmp(strVal(llast(((FuncCall*)node)->funcname)), "date_cast") == 0) { + return 1; + } +#endif return 2; case T_A_Expr: /* make nullif() act like a regular function */ diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index d6464a104..dd430c526 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -194,6 +194,16 @@ PG_FUNCTION_INFO_V1_PUBLIC(date_int); extern "C" DLL_PUBLIC Datum date_int(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(date_cast); extern "C" DLL_PUBLIC Datum date_cast(PG_FUNCTION_ARGS); + + +PG_FUNCTION_INFO_V1_PUBLIC(time_cast); +extern "C" DLL_PUBLIC Datum time_cast(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(time_cast_implicit); +extern "C" DLL_PUBLIC Datum time_cast_implicit(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(text_time_explicit); +extern "C" DLL_PUBLIC Datum text_time_explicit(PG_FUNCTION_ARGS); #endif /* common code for timetypmodin and timetztypmodin */ static int32 anytime_typmodin(bool istz, ArrayType* ta) @@ -1641,15 +1651,66 @@ Datum abstime_date(PG_FUNCTION_ARGS) Datum time_in(PG_FUNCTION_ARGS) { - char* str = PG_GETARG_CSTRING(0); +#ifdef DOLPHIN + Datum datum_internal; + char* input_str = PG_GETARG_CSTRING(0); + time_internal(fcinfo, input_str, TIME_IN, &datum_internal); + return datum_internal; +} + +/* + * time_cast_implicit, such as select time'23:65:66' + * + */ +Datum time_cast(PG_FUNCTION_ARGS) +{ + Datum datum_internal; + char* input_str = PG_GETARG_CSTRING(0); + time_internal(fcinfo, input_str, TIME_CAST, &datum_internal); + return datum_internal; +} + + +Datum time_cast_implicit(PG_FUNCTION_ARGS) +{ + char* input_str = DatumGetCString(textout(fcinfo)); + return DirectFunctionCall1(time_in, CStringGetDatum(input_str)); +} + + +char* parser_function_input(Datum txt, Oid oid) +{ + Oid typeOutput; + bool typIsVarlena; + getTypeOutputInfo(oid, &typeOutput, &typIsVarlena); + return DatumGetCString(OidOutputFunctionCall(typeOutput, txt)); +} + + +/* + * text_time_explicit, such as select cast('23:65:66' as time) + * + */ +Datum text_time_explicit(PG_FUNCTION_ARGS) +{ + Datum datum_internal; + char* input_str = parser_function_input(PG_GETARG_DATUM(0), fcinfo->argTypes[0]); + if (time_internal(fcinfo, input_str, TEXT_TIME_EXPLICIT, &datum_internal) == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return datum_internal; +} +TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, Datum* datum_internal) +{ #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif - int32 typmod = PG_GETARG_INT32(2); + int32 typmod = time_cast_type > 0 ? -1 : PG_GETARG_INT32(2); TimeADT result; fsec_t fsec; - struct pg_tm tt, *tm = &tt; + struct pg_tm tt; + struct pg_tm *tm = &tt; int tz; int nf; int dterr; @@ -1658,16 +1719,14 @@ Datum time_in(PG_FUNCTION_ARGS) int dtype; int ftype[MAXDATEFIELDS]; char* time_fmt = NULL; -#ifdef DOLPHIN int timeSign = 1; /* tt2 stores openGauss's parsing result while tt stores M*'s parsing result */ struct pg_tm tt2; bool null_func_result = false; -#endif /* * this case is used for time format is specified. */ - if (4 == PG_NARGS()) { + if (TIME_WITH_FORMAT_ARGS_SIZE == PG_NARGS() && time_cast_type == 0) { time_fmt = PG_GETARG_CSTRING(3); if (time_fmt == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), errmsg("specified time format is null"))); @@ -1676,7 +1735,6 @@ Datum time_in(PG_FUNCTION_ARGS) /* the following logic shared from to_timestamp(). */ to_timestamp_from_format(tm, &fsec, str, (void*)time_fmt); } else { -#ifdef DOLPHIN int tm_type; bool warnings; errno_t rc = memset_s(tm, sizeof(struct pg_tm), 0, sizeof(struct pg_tm)); @@ -1690,7 +1748,8 @@ Datum time_in(PG_FUNCTION_ARGS) char *adjusted = adjust_b_format_time(str, &timeSign, &D, &hasD); /* check if empty */ if (strlen(adjusted) == 0) { - PG_RETURN_TIMEADT(0); + *datum_internal = TimeADTGetDatum(0); + return TIME_INCORRECT; } dterr = ParseDateTime(adjusted, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr == 0) { @@ -1702,30 +1761,96 @@ Datum time_in(PG_FUNCTION_ARGS) * otherwise we can return tt which stores M*'s parsing result. */ if (SQL_MODE_STRICT()) { - DateTimeParseError(dterr, str, "time", fcinfo->can_ignore); + DateTimeParseErrorWithFlag(dterr, str, "time", fcinfo->can_ignore, !fcinfo->can_ignore); /* * can_ignore == true means hint string "ignore_error" used. warning report instead of error. * then we will return 00:00:xx if the first 1 or 2 character is lower than 60, otherwise return 00:00:00 */ char* field_str = field[0]; if (field_str == NULL) { - PG_RETURN_TIMEADT(0); + *datum_internal = TimeADTGetDatum(0); + return TIME_INCORRECT; } if (*field_str == '+') { field_str++; } int trunc_val = getStartingDigits(field_str); if (trunc_val < 0 || trunc_val >= 60) { - PG_RETURN_TIMEADT(0); + *datum_internal = TimeADTGetDatum(0); + return TIME_INCORRECT; + } + *datum_internal = TimeADTGetDatum(trunc_val * TIME_MS_TO_S_RADIX * TIME_MS_TO_S_RADIX); + return TIME_INCORRECT; + } else if (SQL_MODE_NOT_STRICT_ON_INSERT()) { + /* for case insert unavailable data, need to set the unavailable data to 0 to compatible with M */ + DateTimeParseError(dterr, str, "time", true); + if (IsResetUnavailableDataTime(dterr, !SQL_MODE_STRICT() && !CMD_TAG_IS_SELECT())) { + *datum_internal = TimeADTGetDatum(0); + return TIME_INCORRECT; + } else { + tm = &tt; // switch to M*'s parsing result } - PG_RETURN_TIMEADT(trunc_val * 1000 * 1000); } else { - DateTimeParseError(dterr, str, "time", !SQL_MODE_STRICT()); - tm = &tt; // switch to M*'s parsing result + if (time_cast_type == TEXT_TIME_EXPLICIT) { + DateTimeParseError(dterr, str, "time", true); + tm = &tt; // switch to M*'s parsing result + if (dterr != DTERR_TZDISP_OVERFLOW) { + return TIME_INCORRECT; + } + } + if (time_cast_type == TIME_CAST) { + DateTimeParseErrorWithFlag(dterr, str, "time", fcinfo->can_ignore, !SQL_MODE_STRICT()); + tm = &tt; // switch to M*'s parsing result + } else { + DateTimeParseError(dterr, str, "time", !SQL_MODE_STRICT()); + tm = &tt; // switch to M*'s parsing result + } } } } + } + + /* + * the following logic is unified for time parsing. + */ + tm2time(tm, fsec, &result); + AdjustTimeForTypmod(&result, typmod); + result *= timeSign; + *datum_internal = TimeADTGetDatum(result); + return TIME_CORRECT; + #else + + char* str = PG_GETARG_CSTRING(0); + +#ifdef NOT_USED + Oid typelem = PG_GETARG_OID(1); +#endif + int32 typmod = PG_GETARG_INT32(2); + TimeADT result; + fsec_t fsec; + struct pg_tm tt; + struct pg_tm *tm = &tt; + int tz; + int nf; + int dterr; + char workbuf[MAXDATELEN + 1]; + char* field[MAXDATEFIELDS] = {0}; + int dtype; + int ftype[MAXDATEFIELDS]; + char* time_fmt = NULL; + /* + * this case is used for time format is specified. + */ + if (TIME_WITH_FORMAT_ARGS_SIZE == PG_NARGS()) { + time_fmt = PG_GETARG_CSTRING(3); + if (time_fmt == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), errmsg("specified time format is null"))); + } + + /* the following logic shared from to_timestamp(). */ + to_timestamp_from_format(tm, &fsec, str, (void*)time_fmt); + } else { /* * original pg time format parsing */ @@ -1746,9 +1871,8 @@ Datum time_in(PG_FUNCTION_ARGS) if (trunc_val < 0 || trunc_val >= 60) { PG_RETURN_TIMEADT(0); } - PG_RETURN_TIMEADT(trunc_val * 1000 * 1000); + PG_RETURN_TIMEADT(trunc_val * TIME_MS_TO_S_RADIX * TIME_MS_TO_S_RADIX); } -#endif } /* @@ -1756,11 +1880,11 @@ Datum time_in(PG_FUNCTION_ARGS) */ tm2time(tm, fsec, &result); AdjustTimeForTypmod(&result, typmod); -#ifdef DOLPHIN - result *= timeSign; -#endif PG_RETURN_TIMEADT(result); +#endif } + + #ifdef DOLPHIN int NumberTime(bool timeIn24, char *str, pg_tm *tm, fsec_t *fsec, int D, bool hasD) { @@ -5242,7 +5366,8 @@ Datum adddate_datetime_interval_t(PG_FUNCTION_ARGS) text* tmp = PG_GETARG_TEXT_PP(0); Interval span = *PG_GETARG_INTERVAL_P(1); char *expr; - struct pg_tm tt, *tm = &tt; + struct pg_tm tt; + struct pg_tm* tm = &tt; fsec_t fsec; int tm_type = DTK_NONE; @@ -5802,7 +5927,8 @@ Datum time_float(PG_FUNCTION_ARGS) Datum date_int(PG_FUNCTION_ARGS) { DateADT date = PG_GETARG_DATEADT(0); - struct pg_tm tt, *tm = &tt; + struct pg_tm tt; + struct pg_tm *tm = &tt; if (unlikely(date > 0 && (INT_MAX - date < POSTGRES_EPOCH_JDATE))) { ereport(ERROR, diff --git a/contrib/dolphin/plugin_utils/adt/datetime.cpp b/contrib/dolphin/plugin_utils/adt/datetime.cpp index 7fa2d303e..70c323beb 100644 --- a/contrib/dolphin/plugin_utils/adt/datetime.cpp +++ b/contrib/dolphin/plugin_utils/adt/datetime.cpp @@ -3587,6 +3587,31 @@ void DateTimeParseErrorInternal(int dterr, const char* str, const char* datatype } } +/* + * ignore the error on unstrict mode. + * + * return values: need to reset the time value or not + */ +bool IsResetUnavailableDataTime(int dterr, bool is_support_reset_unavailable_datatime) +{ + switch (dterr) { + case DTERR_FIELD_OVERFLOW: + return is_support_reset_unavailable_datatime; + case DTERR_MD_FIELD_OVERFLOW: + break; + case DTERR_INTERVAL_OVERFLOW: + break; + case DTERR_TZDISP_OVERFLOW: + break; + case DTERR_ZERO_DATE: + break; + case DTERR_BAD_FORMAT: + default: + break; + } + return false; +} + /* datebsearch() * Binary search -- from Knuth (6.2.1) Algorithm B. Special case like this * is WAY faster than the generic bsearch(). diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 208a06d11..2cb54c598 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -5,6 +5,11 @@ CREATE FUNCTION pg_catalog.dolphin_invoke() DROP FUNCTION IF EXISTS pg_catalog.date_cast(cstring, boolean); DROP FUNCTION IF EXISTS pg_catalog.timestamp_cast(cstring, oid, integer, boolean); +DROP FUNCTION IF EXISTS pg_catalog.time_cast(cstring, boolean); +DROP CAST IF EXISTS (TEXT AS time); +DROP FUNCTION IF EXISTS pg_catalog.time_cast_implicit(TEXT); +DROP FUNCTION IF EXISTS pg_catalog.text_time_explicit(TEXT); + DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp without time zone); DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp without time zone); DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp without time zone); diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index 44bf9beec..d2135e8c2 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -192,5 +192,101 @@ select * from func_test3; select to_seconds('-838:59:59'); select to_seconds(-8385959); +-- è¿›ä½å’Œéžæ³•值 +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +create table test1(a time); +insert into test1 values('23:65:66'); +select * from test1; +truncate test1; +insert into test1 values('23:65'); +select * from test1; +truncate test1; +insert into test1 values('23-65'); +select * from test1; +truncate test1; +insert into test1 values('23:59:59'); +select * from test1; +truncate test1; +insert into test1 values('23:59:59.8888'); +select * from test1; +truncate test1; +insert into test1 values('23:65:66.8888'); +select * from test1; +truncate test1; +insert into test1 values('0:0:0'); +select * from test1; +truncate test1; +insert into test1 values('-1:-:-1:1'); +select * from test1; +truncate test1; +insert into test1 values('23:55:56.1234'); +select * from test1; +truncate test1; + +select time'23:65:66'; +select time'23:65'; +select time'23-65'; +select time'23:59:59.8888'; +select time'23:65:66.8888'; +select time'0:0:0'; +select time'-1:-1:-1'; +select time'23:55:56.1234'; +select cast('23:65:66' as time); +select cast('23:65'as time); +select cast('23-65' as time); +select cast('23:59:59.8888' as time); +select cast('23:65:66.8888' as time); +select cast('0:0:0' as time); +select cast('-1:-1:-1' as time); +select cast('23:55:56.1234' as time); +select addtime('12aaa43', '12aa43'); + +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +insert into test1 values('23:65:66'); +select * from test1; +truncate test1; +insert into test1 values('23:65'); +select * from test1; +truncate test1; +insert into test1 values('23-65'); +select * from test1; +truncate test1; +insert into test1 values('23:59:59'); +select * from test1; +truncate test1; +insert into test1 values('23:59:59.8888'); +select * from test1; +truncate test1; +insert into test1 values('23:65:66.8888'); +select * from test1; +truncate test1; +insert into test1 values('0:0:0'); +select * from test1; +truncate test1; +insert into test1 values('-1:-:-1:1'); +select * from test1; +truncate test1; +insert into test1 values('23:55:56.1234'); +select * from test1; +truncate test1; + +select time'23:65:66'; +select time'23:65'; +select time'23-65'; +select time'23:59:59.8888'; +select time'23:65:66.8888'; +select time'0:0:0'; +select time'-1:-1:-1'; +select time'23:55:56.1234'; +select cast('23:65:66' as time); +select cast('23:65'as time); +select cast('23-65' as time); +select cast('23:59:59.8888' as time); +select cast('23:65:66.8888' as time); +select cast('0:0:0' as time); +select cast('-1:-1:-1' as time); +select cast('23:55:56.1234' as time); +select addtime('12aaa43', '12aa43'); + drop schema b_time_funcs3 cascade; reset current_schema; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 1e37f8966..a1ad59f6f 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -8,6 +8,18 @@ CREATE OR REPLACE FUNCTION pg_catalog.date_cast(cstring, boolean) RETURNS date L DROP FUNCTION IF EXISTS pg_catalog.timestamp_cast(cstring, oid, integer, boolean); CREATE OR REPLACE FUNCTION pg_catalog.timestamp_cast(cstring, oid, integer, boolean) RETURNS timestamp without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'timestamp_cast'; +DROP FUNCTION IF EXISTS pg_catalog.time_cast(cstring, boolean); +CREATE OR REPLACE FUNCTION pg_catalog.time_cast(cstring, boolean) RETURNS time without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'time_cast'; + +DROP CAST IF EXISTS (TEXT AS time); +DROP FUNCTION IF EXISTS pg_catalog.time_cast_implicit(TEXT); +CREATE OR REPLACE FUNCTION pg_catalog.time_cast_implicit(TEXT) RETURNS time without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'time_cast_implicit'; + +DROP FUNCTION IF EXISTS pg_catalog.text_time_explicit(TEXT); +CREATE OR REPLACE FUNCTION pg_catalog.text_time_explicit(TEXT) RETURNS time without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'text_time_explicit'; + +CREATE CAST(TEXT AS time) WITH FUNCTION time_cast_implicit(TEXT) AS ASSIGNMENT; + --CREATE TIME_TIMESTAMP'S COMPARATION FUNCTION DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamp (time, timestamp without time zone) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.time_eq_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_eq_timestamp'; -- Gitee From 80d251c9a2afdb7f4d5614fe640fb103df67e5dd Mon Sep 17 00:00:00 2001 From: lukeman Date: Wed, 25 Oct 2023 16:03:08 +0800 Subject: [PATCH 033/434] =?UTF-8?q?=E8=A7=A3=E5=86=B3period=5Fadd=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E5=90=8Cbetween=20and=E4=BD=BF=E7=94=A8=E8=BF=94?= =?UTF-8?q?=E5=9B=9E=E7=BB=93=E6=9E=9C=E6=9C=89=E8=AF=AF=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/between.out | 57 ++++++++++++++- .../dolphin/include/plugin_utils/timestamp.h | 2 + .../dolphin/plugin_utils/adt/timestamp.cpp | 21 +++++- contrib/dolphin/plugin_utils/adt/varlena.cpp | 73 ++++++++++++++++++- contrib/dolphin/sql/builtin_funcs/between.sql | 12 ++- 5 files changed, 156 insertions(+), 9 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/between.out b/contrib/dolphin/expected/builtin_funcs/between.out index 3466d5d76..b03b1569e 100644 --- a/contrib/dolphin/expected/builtin_funcs/between.out +++ b/contrib/dolphin/expected/builtin_funcs/between.out @@ -671,7 +671,7 @@ explain (costs off) select * from t_b1 where i between '1' and '2'; Index Cond: ((i >= '1'::bpchar) AND (i <= '2'::bpchar)) (3 rows) ---test time cmp of time and date +-- test the cmp of time and date select date'2018-12-31' between time'23:56:59' and timestamp'2018-12-31 23:56:59'; b_between_and --------------- @@ -720,6 +720,61 @@ select time'23:56:59' not between datetime'2018-12-31' and timestamptz'2018-12-3 t (1 row) +-- test the cmp of number and date/time/datetime +select period_add(202201,2) between '2022-01-01' and date'2022-03-31'; + b_between_and +--------------- + t +(1 row) + +select sec_to_time(4396) between sec_to_time(4395) and 4396; + b_between_and +--------------- + f +(1 row) + +select 10 between to_days('0000-01-01') and date'0000-01-04'; + b_between_and +--------------- + t +(1 row) + +select 10 between to_days('0000-01-01') and time'12:30:00'; + b_between_and +--------------- + t +(1 row) + +select 10 between to_days('0000-01-01') and datetime'0000-01-04 12:30:00'; + b_between_and +--------------- + t +(1 row) + +select 10 between to_days('0000-01-01') and timestamp'0000-01-04 12:30:00'; + b_between_and +--------------- + t +(1 row) + +select 102 between '0000-01-01' and date'0000-01-03'; + b_between_and +--------------- + t +(1 row) + +select 102 between time'00:01:01' and date'0000-01-03'; + b_between_and +--------------- + t +(1 row) + +select 102 between date'0000-01-01' and timestamp'0000-01-03 00:00:00'; + b_between_and +--------------- + t +(1 row) + drop schema db_between cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table t_between_and_0007 diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index f44ffad20..e8a77bcbe 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -136,6 +136,8 @@ extern bool datetime_in_range(Timestamp datetime); extern int128 timestamp_int128(Timestamp timestamp); extern int128 timestamptz_int128(TimestampTz timestampTz); extern Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst); +extern TimestampTz time2timestamptz(TimeADT timeVal); +extern TimestampTz timetz2timestamptz(TimeTzADT* timetzVal); extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); #endif diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index cd2ed7bd8..c23652223 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -10591,8 +10591,8 @@ extern "C" DLL_PUBLIC Datum timestamptz_float8(PG_FUNCTION_ARGS); Datum timestamptz_float8(PG_FUNCTION_ARGS) { TimestampTz dt = PG_GETARG_TIMESTAMPTZ(0); - struct pg_tm tt; - struct pg_tm* tm = &tt; + pg_tm tt; + pg_tm* tm = &tt; fsec_t fsec; int tz; const char *tzn = NULL; @@ -11280,7 +11280,7 @@ extern "C" DLL_PUBLIC Datum timestamptz_gt_time(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_ge_time); extern "C" DLL_PUBLIC Datum timestamptz_ge_time(PG_FUNCTION_ARGS); -static Timestamp time2timestamptz(TimeADT timeVal) +TimestampTz time2timestamptz(TimeADT timeVal) { TimestampTz result; struct pg_tm tt; @@ -11293,6 +11293,21 @@ static Timestamp time2timestamptz(TimeADT timeVal) tm2timestamp(tm, fsec, &tz, &result); return result; } + +TimestampTz timetz2timestamptz(TimeTzADT* timetzVal) +{ + TimestampTz result; + struct pg_tm tt; + struct pg_tm* tm = &tt; + fsec_t fsec = 0; + int tz = 0; + GetCurrentDateTime(tm); + if (timetzVal) { + timetz2tm(timetzVal, tm, &fsec, &tz); + } + tm2timestamp(tm, fsec, &tz, &result); + return result; +} /* time_timestamp */ Datum time_eq_timestamptz(PG_FUNCTION_ARGS) { diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 5d5c5b1eb..34ff74477 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -55,6 +56,9 @@ #include "utils/varbit.h" #include "plugin_commands/mysqlmode.h" #include "plugin_utils/varbit.h" +#include "plugin_utils/timestamp.h" +#include "plugin_utils/date.h" +#include "libpq/libpq-int.h" #define BETWEEN_AND_ARGC 3 #define SUBSTR_WITH_LEN_OFFSET 2 @@ -79,8 +83,11 @@ #define CONV_MAX_CHAR_LEN 65 //max 64bit and 1 sign bit #define MYSQL_SUPPORT_MINUS_MAX_LENGTH 65 #define MAX_UINT32_STR "0xffffffff" +#define MAXBI64LEN 25 static long convert_bit_to_int (PG_FUNCTION_ARGS, int idx); +static TimestampTz temporal_to_timestamptz(Oid type, int index, PG_FUNCTION_ARGS); +static bool is_type_with_date(Oid type); #endif static int getResultPostionReverse(text* textStr, text* textStrToSearch, int32 beginIndex, int occurTimes); static int getResultPostion(text* textStr, text* textStrToSearch, int32 beginIndex, int occurTimes); @@ -7846,8 +7853,9 @@ static char* db_b_format_get_cstring(Datum param, Oid param_oid) ch_value = "0"; } } else { - if (param_oid == BOOLOID) { - ch_value = const_cast(param ? "1" : "0"); + ch_value = to_cstring_type(param, param_oid); + if (param_oid == BOOLOID && param) { + ch_value = "1"; } else { ch_value = "0"; } @@ -9236,6 +9244,29 @@ static char* numeric_to_cstring(Numeric n) return DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(n))); } +/** + Convert pg_tm value to cstring in YYYYMMDDHHmmSS/YYYYMMDD/HHmmSS format + @param tm The pg_tm value to convert. + @param type The oid of the type of timestamp/date/time + @return A cstring in format YYYYMMDDHHmmSS/YYYYMMDD/HHmmSS. +*/ +static char* tm_to_cstring(pg_tm* tm, Oid type) +{ + char str_val[MAXBI64LEN]; + uint64 val = 0; + if (type == TIMEOID || type == TIMETZOID) { + val = (tm->tm_hour * 10000UL + tm->tm_min * 100UL + tm->tm_sec); + } else if (type == DATEOID) { + val = (tm->tm_year * 10000UL + tm->tm_mon * 100UL + tm->tm_mday); + } else { + val = (tm->tm_year * 10000000000UL + tm->tm_mon * 100000000UL + + tm->tm_mday * 1000000UL + tm->tm_hour * 10000UL + + tm->tm_min * 100UL + tm->tm_sec); + } + check_sprintf_s(sprintf_s(str_val, MAXBI64LEN, "%" PRIu64, val)); + return pstrdup(str_val); +} + static char* cmp_get_string_type(PG_FUNCTION_ARGS, int argn) { char* res; @@ -9284,9 +9315,30 @@ static float8 cmp_get_real_type(PG_FUNCTION_ARGS, int argn) { float8 res; char* str_val = NULL; + struct pg_tm tt; + struct pg_tm* tm = &tt; + int tz = 0; + fsec_t fsec = 0; + bool flag = true; Oid typeoid = InvalidOid; typeoid = fcinfo->argTypes[argn]; - str_val = db_b_format_get_cstring(PG_GETARG_DATUM(argn), fcinfo->argTypes[argn]); + if (is_type_with_date(typeoid)) { + const char* tzn = NULL; + TimestampTz tsVal = temporal_to_timestamptz(fcinfo->argTypes[argn], argn, fcinfo); + timestamp2tm(tsVal, &tz, tm, &fsec, &tzn, NULL); + str_val = tm_to_cstring(tm, typeoid); + } else if (typeoid == TIMEOID) { + TimeADT timeVal = PG_GETARG_TIMEADT(argn); + time2tm(timeVal, tm, &fsec); + str_val = tm_to_cstring(tm, typeoid); + } else if (typeoid == TIMETZOID) { + TimeTzADT* timetzVal = PG_GETARG_TIMETZADT_P(argn); + timetz2tm(timetzVal, tm, &fsec, &tz); + str_val = tm_to_cstring(tm, typeoid); + } else { + flag = false; + str_val = db_b_format_get_cstring(PG_GETARG_DATUM(argn), fcinfo->argTypes[argn]); + } if (typeoid == FLOAT8OID) { res = PG_GETARG_FLOAT8(argn); } else if (typeoid == FLOAT4OID) { @@ -9296,6 +9348,9 @@ static float8 cmp_get_real_type(PG_FUNCTION_ARGS, int argn) } else { res = DatumGetFloat8(DirectFunctionCall1(float8in, CStringGetDatum(str_val))); } + if (flag) { + pfree_ext(str_val); + } return res; } @@ -9711,7 +9766,11 @@ static TimestampTz nontemporal_to_timestamptz(Oid type, Datum param) TimestampTz timestamptz; char* str_val = NULL; getTypeOutputInfo(type, &typeOutput, &typIsVarlena); - str_val = OidOutputFunctionCall(typeOutput, param); + if (typIsVarlena) { + str_val = DatumGetCString(DirectFunctionCall1(textout, param)); + } else { + str_val = OidOutputFunctionCall(typeOutput, param); + } //Convert string to the TIMESTAMPTZ representation. timestamptz = DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in, CStringGetDatum(str_val), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); @@ -9733,6 +9792,12 @@ static TimestampTz temporal_to_timestamptz(Oid type, int index, PG_FUNCTION_ARGS case TIMESTAMPTZOID: timestamptz = PG_GETARG_TIMESTAMPTZ(index); break; + case TIMEOID: + timestamptz = time2timestamptz(PG_GETARG_TIMEADT(index)); + break; + case TIMETZOID: + timestamptz = timetz2timestamptz(PG_GETARG_TIMETZADT_P(index)); + break; default: timestamptz = nontemporal_to_timestamptz(fcinfo->argTypes[index], fcinfo->arg[index]); break; diff --git a/contrib/dolphin/sql/builtin_funcs/between.sql b/contrib/dolphin/sql/builtin_funcs/between.sql index 42769244b..611f732de 100644 --- a/contrib/dolphin/sql/builtin_funcs/between.sql +++ b/contrib/dolphin/sql/builtin_funcs/between.sql @@ -156,7 +156,7 @@ explain (costs off) select * from t_b1 where f between 1 and 2; explain (costs off) select * from t_b1 where g between '1' and '2'; explain (costs off) select * from t_b1 where h between '1' and '2'; explain (costs off) select * from t_b1 where i between '1' and '2'; ---test time cmp of time and date +-- test the cmp of time and date select date'2018-12-31' between time'23:56:59' and timestamp'2018-12-31 23:56:59'; select date'2018-12-31' not between time'23:56:59' and timestamp'2018-12-31 23:56:59'; select date'2018-12-31' between symmetric time'23:56:59' and timestamp'2018-12-31 23:56:59'; @@ -165,5 +165,15 @@ select time'23:56:59' between date'2018-12-31' and timestamp'2018-12-31 23:56:59 select time'23:56:59' not between date'2018-12-31' and timestamp'2018-12-31 23:56:59'; select time'23:56:59' between datetime'2018-12-31' and timestamptz'2018-12-31 23:56:59'; select time'23:56:59' not between datetime'2018-12-31' and timestamptz'2018-12-31 23:56:59'; +-- test the cmp of number and date/time/datetime +select period_add(202201,2) between '2022-01-01' and date'2022-03-31'; +select sec_to_time(4396) between sec_to_time(4395) and 4396; +select 10 between to_days('0000-01-01') and date'0000-01-04'; +select 10 between to_days('0000-01-01') and time'12:30:00'; +select 10 between to_days('0000-01-01') and datetime'0000-01-04 12:30:00'; +select 10 between to_days('0000-01-01') and timestamp'0000-01-04 12:30:00'; +select 102 between '0000-01-01' and date'0000-01-03'; +select 102 between time'00:01:01' and date'0000-01-03'; +select 102 between date'0000-01-01' and timestamp'0000-01-03 00:00:00'; drop schema db_between cascade; reset current_schema; \ No newline at end of file -- Gitee From bf28794fee38bb2ade0b379540b9874489119fe0 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 30 Oct 2023 15:52:54 +0800 Subject: [PATCH 034/434] Change int <-> bit cast from 'e' to 'a' --- contrib/dolphin/expected/implicit_cast.out | 18 ++++++++++++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 14 ++++++++++++++ contrib/dolphin/sql/implicit_cast.sql | 6 ++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 14 ++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/contrib/dolphin/expected/implicit_cast.out b/contrib/dolphin/expected/implicit_cast.out index f6a1cfad6..22930946d 100644 --- a/contrib/dolphin/expected/implicit_cast.out +++ b/contrib/dolphin/expected/implicit_cast.out @@ -288,5 +288,23 @@ select 1::int8 | 1::text; 1 (1 row) +create table t1(a bit default 0,b int default b'0', c bit default 0::int unsigned, d int unsigned default b'0'); +desc t1; + Field | Type | Null | Key | Default | Extra +-------+---------+------+-----+-------------+------- + a | bit(1) | YES | | 0 | + b | integer | YES | | B'0'::"bit" | + c | bit(1) | YES | | (0)::uint4 | + d | uint4 | YES | | B'0'::"bit" | +(4 rows) + +insert into t1 values (default,default,default,default); +select * from t1; + a | b | c | d +---+---+---+--- + 0 | 0 | 0 | 0 +(1 row) + +drop table t1; drop schema implicit_cast cascade; reset current_schema; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 2cb54c598..c16d246db 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -59,6 +59,20 @@ DROP FUNCTION IF EXISTS pg_catalog.timestamptz_le_time (timestamp with time zone DROP FUNCTION IF EXISTS pg_catalog.timestamptz_lt_time (timestamp with time zone, time); DROP FUNCTION IF EXISTS pg_catalog.timestamptz_ge_time (timestamp with time zone, time); DROP FUNCTION IF EXISTS pg_catalog.timestamptz_gt_time (timestamp with time zone, time); + +do $$ +begin + update pg_cast set castcontext='e', castowner=10 where castsource=1560 and casttarget=20 and castcontext='a'; + update pg_cast set castcontext='e', castowner=10 where castsource=1560 and casttarget=23 and castcontext='a'; + update pg_cast set castcontext='e', castowner=10 where castsource=20 and casttarget=1560 and castcontext='a'; + update pg_cast set castcontext='e', castowner=10 where castsource=23 and casttarget=1560 and castcontext='a'; +end +$$; +drop CAST IF EXISTS (uint4 AS bit); +drop CAST IF EXISTS (uint8 AS bit); +CREATE CAST (uint4 AS bit) WITH FUNCTION bitfromuint4(uint4, int4); +CREATE CAST (uint8 AS bit) WITH FUNCTION bitfromuint8(uint8, int4); + CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_rawout ( tinyblob ) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; diff --git a/contrib/dolphin/sql/implicit_cast.sql b/contrib/dolphin/sql/implicit_cast.sql index b179db7ff..e60ac5205 100644 --- a/contrib/dolphin/sql/implicit_cast.sql +++ b/contrib/dolphin/sql/implicit_cast.sql @@ -61,5 +61,11 @@ select 1::int2 | 1::text; select 1::int4 | 1::text; select 1::int8 | 1::text; +create table t1(a bit default 0,b int default b'0', c bit default 0::int unsigned, d int unsigned default b'0'); +desc t1; +insert into t1 values (default,default,default,default); +select * from t1; +drop table t1; + drop schema implicit_cast cascade; reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index a1ad59f6f..7827d9169 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -126,6 +126,20 @@ CREATE OPERATOR pg_catalog.>=(leftarg = timestamp with time zone, rightarg = tim CREATE OPERATOR pg_catalog.>(leftarg = timestamp with time zone, rightarg = time, procedure = timestamptz_gt_time, COMMUTATOR = <, NEGATOR = <=, restrict = scalarltsel, join = scalarltjoinsel); +--change bit -> int/bigint castcontext from 'e' to 'a' +do $$ +begin + update pg_cast set castcontext='a', castowner=10 where castsource=1560 and casttarget=20 and castcontext='e'; + update pg_cast set castcontext='a', castowner=10 where castsource=1560 and casttarget=23 and castcontext='e'; + update pg_cast set castcontext='a', castowner=10 where castsource=20 and casttarget=1560 and castcontext='e'; + update pg_cast set castcontext='a', castowner=10 where castsource=23 and casttarget=1560 and castcontext='e'; +end +$$; +drop CAST IF EXISTS (uint4 AS bit); +drop CAST IF EXISTS (uint8 AS bit); +CREATE CAST (uint4 AS bit) WITH FUNCTION bitfromuint4(uint4, int4) AS ASSIGNMENT; +CREATE CAST (uint8 AS bit) WITH FUNCTION bitfromuint8(uint8, int4) AS ASSIGNMENT; + -- The reason for using replace is because we don't want to change the OID CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_rawout ( tinyblob -- Gitee From 528e2fa843fbc8e48de6420d157b9569dd4c3cbb Mon Sep 17 00:00:00 2001 From: Mijamind Date: Tue, 31 Oct 2023 20:16:32 +0800 Subject: [PATCH 035/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=201.#I8C2T4=20bugfix=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E8=AE=BE=E7=BD=AEclustermap=20=E6=9F=A5=E8=AF=A2=E8=A1=A8?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E5=87=BA=E9=94=99=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/src/guc_spq.cpp | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 90cfd606c..450e7e4e0 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1499,7 +1499,7 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) { if (liststring == nullptr || liststring[0] == '\0') { GUC_check_errdetail("spq cluster map is null"); - return true; + return false; } char *rawname = nullptr; List *nodelist = nullptr; @@ -1508,17 +1508,19 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) NodeDefinition* node; int idx = 0; errno_t rc; - + MemoryContext oldContext = MemoryContextSwitchTo(u_sess->spq_cxt.spq_worker_context); rawname = pstrdup(liststring); if (rawname == nullptr) { - ereport(DEBUG1, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("spq cluster map pstrdup is null"))); - return true; + GUC_check_errdetail("spq cluster map pstrdup is null"); + MemoryContextSwitchTo(oldContext); + return false; } if (!SplitIdentifierString(rawname, ',', &nodelist)) { pfree(rawname); /* syntax error in name list */ - ereport(DEBUG1, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("spq cluster map is invalid, name|ip|port|oid|cport|sport,..."))); - return true; + GUC_check_errdetail("spq cluster map is invalid, name|ip|port|oid|cport|sport,..."); + MemoryContextSwitchTo(oldContext); + return false; } array_size = list_length(nodelist); // mem build in spq_cxt->spq_worker_context @@ -1530,13 +1532,14 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) char *nodestring = pstrdup((char *)lfirst(lnode)); (void)SplitIdentifierString(nodestring, '|', &itemlist); if (list_length(itemlist) != 6) { - ereport(DEBUG1, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("spq cluster map is invalid, name|ip|port|oid|cport|sport,..."))); + GUC_check_errdetail("spq cluster map is invalid, name|ip|port|oid|cport|sport,..."); pfree(rawname); pfree(nodestring); list_free(nodelist); list_free(itemlist); pfree(nodesDefinition); - return true; + MemoryContextSwitchTo(oldContext); + return false; } name = (char *)list_nth(itemlist, 0); ip = (char *)list_nth(itemlist, 1); @@ -1561,8 +1564,12 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) } pfree(rawname); list_free(nodelist); + if (t_thrd.spq_ctx.nodesDefinition != nullptr) { + pfree(t_thrd.spq_ctx.nodesDefinition); + } t_thrd.spq_ctx.num_nodes = array_size; t_thrd.spq_ctx.nodesDefinition = nodesDefinition; + MemoryContextSwitchTo(oldContext); return true; } @@ -1571,7 +1578,6 @@ static bool check_spq_cluster_map(char **newval, void **extra, GucSource source) if (source == PGC_S_DEFAULT) { return true; } - t_thrd.spq_ctx.num_nodes = 0; return spq_verify_gauss_cluster_map_syntax(*newval); } -- Gitee From 75ecfd6e92b040c35a6c3820e338908b5d020f92 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Wed, 1 Nov 2023 10:22:36 +0800 Subject: [PATCH 036/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dblob=E5=AF=BC?= =?UTF-8?q?=E5=85=A5=E5=AF=BC=E5=87=BA=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../json_cmp_operator_test.out | 24 ++-- .../dolphin/expected/test_charset_collate.out | 12 +- contrib/dolphin/expected/test_condition.out | 108 +++++++++--------- contrib/dolphin/output/b_proto_jdbc.source | 6 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 39 +++---- 5 files changed, 92 insertions(+), 97 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out index c1bf4a38a..e3289233e 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out @@ -589,9 +589,9 @@ select `tinyblob`, `json`, `json` < `tinyblob` as `json `tinyblob` as `json<>tinyblob`, `json` = `tinyblob` as `json=tinyblob`, `json` <=> `tinyblob` as `json<=>tinyblob` from test_json_table; - tinyblob | json | json>tinyblob | json>=tinyblob | jsontinyblob | json=tinyblob | json<=>tinyblob -----------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- - 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + tinyblob | json | json>tinyblob | json>=tinyblob | jsontinyblob | json=tinyblob | json<=>tinyblob +--------------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- + \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `blob`, `json`, @@ -609,9 +609,9 @@ select `blob`, `json`, `json` < `blob` as `json `blob` as `json<>blob`, `json` = `blob` as `json=blob`, `json` <=> `blob` as `json<=>blob` from test_json_table; - blob | json | json>blob | json>=blob | jsonblob | json=blob | json<=>blob --------+------------------+-----------+------------+-----------+------------+------------+------------+-----------+------------- - 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + blob | json | json>blob | json>=blob | jsonblob | json=blob | json<=>blob +--------------+------------------+-----------+------------+-----------+------------+------------+------------+-----------+------------- + \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `mediumblob`, `json`, @@ -629,9 +629,9 @@ select `mediumblob`, `json`, `json` < `mediumblob` as `json `mediumblob` as `json<>mediumblob`, `json` = `mediumblob` as `json=mediumblob`, `json` <=> `mediumblob` as `json<=>mediumblob` from test_json_table; - mediumblob | json | json>mediumblob | json>=mediumblob | jsonmediumblob | json=mediumblob | json<=>mediumblob -------------+------------------+-----------------+------------------+-----------------+------------------+------------------+------------------+-----------------+------------------- - 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + mediumblob | json | json>mediumblob | json>=mediumblob | jsonmediumblob | json=mediumblob | json<=>mediumblob +--------------+------------------+-----------------+------------------+-----------------+------------------+------------------+------------------+-----------------+------------------- + \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `longblob`, `json`, @@ -649,9 +649,9 @@ select `longblob`, `json`, `json` < `longblob` as `json `longblob` as `json<>longblob`, `json` = `longblob` as `json=longblob`, `json` <=> `longblob` as `json<=>longblob` from test_json_table; - longblob | json | json>longblob | json>=longblob | jsonlongblob | json=longblob | json<=>longblob -----------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- - 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + longblob | json | json>longblob | json>=longblob | jsonlongblob | json=longblob | json<=>longblob +--------------+------------------+---------------+----------------+---------------+----------------+----------------+----------------+---------------+----------------- + \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `text`, `json`, diff --git a/contrib/dolphin/expected/test_charset_collate.out b/contrib/dolphin/expected/test_charset_collate.out index d62e8770e..25da489a9 100644 --- a/contrib/dolphin/expected/test_charset_collate.out +++ b/contrib/dolphin/expected/test_charset_collate.out @@ -153,8 +153,8 @@ create table test_binary1( ); insert into test_binary1 values('ppp'),('PpP'),('PPP'),('AAA'),('Aaa'),('aaa'),('Å '),('S'); select distinct f1 from test_binary1 order by f1; - f1 ------ + f1 +---------- AAA Aaa PPP @@ -162,7 +162,7 @@ select distinct f1 from test_binary1 order by f1; S aaa ppp - Å  + \305\240 (8 rows) select f1 from test_binary1 where f1 = 'ppp'::blob collate 'binary'; @@ -178,8 +178,8 @@ LINE 1: ...elect f1 from test_binary1 where f1 = 'ppp'::blob collate 'u... alter table test_binary1 add column f2 tinyblob collate 'binary'; insert into test_binary1 (f2) values('ppp'),('PpP'),('PPP'),('AAA'),('Aaa'),('aaa'),('Å '),('S'); select distinct f2 from test_binary1 order by f2; - f2 ------ + f2 +---------- AAA Aaa PPP @@ -187,7 +187,7 @@ select distinct f2 from test_binary1 order by f2; S aaa ppp - Å  + \305\240 (9 rows) diff --git a/contrib/dolphin/expected/test_condition.out b/contrib/dolphin/expected/test_condition.out index c6a40c3ae..19c092272 100644 --- a/contrib/dolphin/expected/test_condition.out +++ b/contrib/dolphin/expected/test_condition.out @@ -5753,9 +5753,9 @@ select ifnull(tyint, vch) from typeset; (1 row) select ifnull(tyint, blb) from typeset; - ifnull --------- - 127 + ifnull +---------- + \x313237 (1 row) select ifnull(tyint, txt) from typeset; @@ -5831,9 +5831,9 @@ select ifnull(smint, vch) from typeset; (1 row) select ifnull(smint, blb) from typeset; - ifnull --------- - 127 + ifnull +---------- + \x313237 (1 row) select ifnull(smint, txt) from typeset; @@ -5903,9 +5903,9 @@ select ifnull(anint, vch) from typeset; (1 row) select ifnull(anint, blb) from typeset; - ifnull --------- - 127 + ifnull +---------- + \x313237 (1 row) select ifnull(anint, txt) from typeset; @@ -5969,9 +5969,9 @@ select ifnull(bgint, vch) from typeset; (1 row) select ifnull(bgint, blb) from typeset; - ifnull --------- - 127 + ifnull +---------- + \x313237 (1 row) select ifnull(bgint, txt) from typeset; @@ -6029,9 +6029,9 @@ select ifnull(dcmal, vch) from typeset; (1 row) select ifnull(dcmal, blb) from typeset; - ifnull --------- - 127 + ifnull +---------- + \x313237 (1 row) select ifnull(dcmal, txt) from typeset; @@ -6083,9 +6083,9 @@ select ifnull(nmric, vch) from typeset; (1 row) select ifnull(nmric, blb) from typeset; - ifnull --------- - 127 + ifnull +---------- + \x313237 (1 row) select ifnull(nmric, txt) from typeset; @@ -6131,9 +6131,9 @@ select ifnull(flt, vch) from typeset; (1 row) select ifnull(flt, blb) from typeset; - ifnull ---------- - 127.213 + ifnull +------------------ + \x3132372e323133 (1 row) select ifnull(flt, txt) from typeset; @@ -6145,43 +6145,43 @@ select ifnull(flt, txt) from typeset; select ifnull(bt, dt) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(bt, tmstp) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(bt, tm) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(bt, ch) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(bt, vch) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(bt, blb) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(bt, txt) from typeset; ifnull -------- - \x7F + \x7f (1 row) select ifnull(dt, tmstp) from typeset; @@ -6209,9 +6209,9 @@ select ifnull(dt, vch) from typeset; (1 row) select ifnull(dt, blb) from typeset; - ifnull ------------- - 2001-04-19 + ifnull +------------------------ + \x323030312d30342d3139 (1 row) select ifnull(dt, txt) from typeset; @@ -6239,9 +6239,9 @@ select ifnull(tmstp, vch) from typeset; (1 row) select ifnull(tmstp, blb) from typeset; - ifnull ------------------------- - 2001-04-19 00:00:00-07 + ifnull +------------------------------------------------ + \x323030312d30342d31392030303a30303a30302d3037 (1 row) select ifnull(tmstp, txt) from typeset; @@ -6263,9 +6263,9 @@ select ifnull(tm, vch) from typeset; (1 row) select ifnull(tm, blb) from typeset; - ifnull ----------- - 22:23:44 + ifnull +-------------------- + \x32323a32333a3434 (1 row) select ifnull(tm, txt) from typeset; @@ -6281,9 +6281,9 @@ select ifnull(ch, vch) from typeset; (1 row) select ifnull(ch, blb) from typeset; - ifnull --------------------------------- - 2001-04-19 22:23:44 + ifnull +---------------------------------------------------------------- + \x323030312d30342d31392032323a32333a34342020202020202020202020 (1 row) select ifnull(ch, txt) from typeset; @@ -6293,9 +6293,9 @@ select ifnull(ch, txt) from typeset; (1 row) select ifnull(vch, blb) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vch, txt) from typeset; @@ -6305,9 +6305,9 @@ select ifnull(vch, txt) from typeset; (1 row) select ifnull(blb, txt) from typeset; - ifnull -------------- - \x123EB\x12 + ifnull +-------------- + \x1233454212 (1 row) select ifnull(bin, smint) from typeset; @@ -6385,13 +6385,13 @@ select ifnull(bin, vch) from typeset; select ifnull(bin, blb) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, txt) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(vbin, smint) from typeset; @@ -6467,15 +6467,15 @@ select ifnull(vbin, vch) from typeset; (1 row) select ifnull(vbin, blb) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, txt) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select pg_typeof(ifnull(tyint, smint)) from typeset; diff --git a/contrib/dolphin/output/b_proto_jdbc.source b/contrib/dolphin/output/b_proto_jdbc.source index f0faee639..340104708 100644 --- a/contrib/dolphin/output/b_proto_jdbc.source +++ b/contrib/dolphin/output/b_proto_jdbc.source @@ -170,9 +170,9 @@ c33:CHAR:null update failed:invalid input syntax for type tinyint: "abc" delete failed:invalid input syntax for type tinyint: "abc" select * from t3; - c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 | c11 | c12 | c13 | c15 | c16 | c18 | c19 | c20 | c21 | c22 | c23 | c24 | c25 | c26 | c27 | c28 | c29 | c30 | c31 | c32 | c33 -----+------+----+----+----+----+----+----+-----+-----+------------+-----+------------+---------+----------+------+------------+----------+----------------------------+-------------------------+------+----------+------+------+----------+--------------------+------+--------+------------+-----+----- - 1 | 2000 | 1 | 4 | 5 | 6 | 7 | 8 | 9.9 | 10 | 0001100011 | t | char | varchar | nvarchar | 2023 | 2023-02-27 | 14:46:30 | 2023-03-07 00:16:16.666-08 | 2023-03-07 16:16:16.666 | blob | 626C6F62 | blob | blob | (1000,0) | ((1000,0),(200,3)) | text | 20.000 | {"k": "v"} | a | a + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 | c11 | c12 | c13 | c15 | c16 | c18 | c19 | c20 | c21 | c22 | c23 | c24 | c25 | c26 | c27 | c28 | c29 | c30 | c31 | c32 | c33 +----+------+----+----+----+----+----+----+-----+-----+------------+-----+------------+---------+----------+------+------------+----------+----------------------------+-------------------------+------------+----------+------------+------------+----------+--------------------+------+--------+------------+-----+----- + 1 | 2000 | 1 | 4 | 5 | 6 | 7 | 8 | 9.9 | 10 | 0001100011 | t | char | varchar | nvarchar | 2023 | 2023-02-27 | 14:46:30 | 2023-03-07 00:16:16.666-08 | 2023-03-07 16:16:16.666 | \x626c6f62 | 626C6F62 | \x626c6f62 | \x626c6f62 | (1000,0) | ((1000,0),(200,3)) | text | 20.000 | {"k": "v"} | a | a (1 row) \! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/proto_jdbc/class/mysql-connector-java-5.1.47.jar:@abs_builddir@/proto_jdbc/class/. MySQLJdbcAutoCommitTest localhost $dp db_jdbc proto_user Gauss@123 diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 34ff74477..51e20b91a 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -896,10 +896,8 @@ Datum rawin(PG_FUNCTION_ARGS) } } -// output interface of RAW type -Datum rawout(PG_FUNCTION_ARGS) +Datum normal_rawout(PG_FUNCTION_ARGS) { - if (!GetSessionContext()->enableBCmptMode) { /*fcinfo->fncollation is set to 0 when calling Macro FuncCall1, *so the collation value needs to be reset. */ @@ -932,6 +930,13 @@ Datum rawout(PG_FUNCTION_ARGS) } pfree_ext(ans); PG_RETURN_CSTRING(out_string); +} + +// output interface of RAW type +Datum rawout(PG_FUNCTION_ARGS) +{ + if (!GetSessionContext()->enableBCmptMode) { + return normal_rawout(fcinfo); } else { return dolphin_blob_rawout(fcinfo); } @@ -8611,26 +8616,16 @@ Datum tinyblob_rawin(PG_FUNCTION_ARGS) Datum dolphin_blob_rawout(PG_FUNCTION_ARGS) { - bytea* vlena = PG_GETARG_BYTEA_PP(0); - char* result = NULL; - char* rp = NULL; - - char* vp = NULL; - int len; - int i; - - len = 1 + VARSIZE_ANY_EXHDR(vlena); /* empty string has 1 char */ - rp = result = (char*)palloc(len); - vp = VARDATA_ANY(vlena); - for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++) { - *rp++ = *vp; + if (strcmp(u_sess->attr.attr_common.application_name, "gs_dump") == 0 || + strcmp(u_sess->attr.attr_common.application_name, "gs_dumpall") == 0 || + strcmp(u_sess->attr.attr_common.application_name, "gsql") == 0 || + strcmp(u_sess->attr.attr_common.application_name, "gs_probackup") == 0 || + strcmp(u_sess->attr.attr_common.application_name, "gs_rewind") == 0 || + strcmp(u_sess->attr.attr_common.application_name, "gs_clean") == 0) { + return byteaout(fcinfo); + } else { + return normal_rawout(fcinfo); } - *rp = '\0'; - - /* free memory if allocated by the toaster */ - PG_FREE_IF_COPY(vlena, 0); - - PG_RETURN_CSTRING(result); } Datum mediumblob_rawin(PG_FUNCTION_ARGS) -- Gitee From f456b82504ad35f1e2caff2d8a3c3455e22045b9 Mon Sep 17 00:00:00 2001 From: totaj Date: Thu, 2 Nov 2023 16:57:18 +0800 Subject: [PATCH 037/434] Fix like bug. --- .../expected/pad_char_to_full_length.out | 40 +++++------ .../dolphin/expected/test_mysql_operator.out | 69 +++++++++++++++---- contrib/dolphin/plugin_parser/gram.y | 36 +++------- contrib/dolphin/plugin_utils/adt/like.cpp | 10 +-- contrib/dolphin/sql/test_mysql_operator.sql | 24 ++++--- 5 files changed, 102 insertions(+), 77 deletions(-) diff --git a/contrib/dolphin/expected/pad_char_to_full_length.out b/contrib/dolphin/expected/pad_char_to_full_length.out index e385cc714..c37e9a122 100644 --- a/contrib/dolphin/expected/pad_char_to_full_length.out +++ b/contrib/dolphin/expected/pad_char_to_full_length.out @@ -90,10 +90,8 @@ select * from t where name like 'test %'; select * from t where name not like 'test %'; name ------------------------------------------------------------------------------------------------------ - test - test test -(3 rows) +(1 row) select * from t where name like 'Test %'; name @@ -129,17 +127,17 @@ select * from t where name not like 'test %'; (1 row) select * from t where name like 'Test %'; - name ------------------------------------------------------------------------------------------------------- - test - test -(2 rows) + name +------ +(0 rows) select * from t where name not like 'Test %'; name ------------------------------------------------------------------------------------------------------ + test + test test -(1 row) +(3 rows) select cast('a' as char(10)), 'a'::char(10); varchar | varchar @@ -406,17 +404,17 @@ select * from t_col where name not like 'test %'; (1 row) select * from t_col where name like 'Test %'; - name ------------------------------------------------------------------------------------------------------- - test - test -(2 rows) + name +------ +(0 rows) select * from t_col where name not like 'Test %'; name ------------------------------------------------------------------------------------------------------ + test + test test -(1 row) +(3 rows) set dolphin.b_compatibility_mode = true; select * from t_col where name like 'test %'; @@ -433,17 +431,17 @@ select * from t_col where name not like 'test %'; (1 row) select * from t_col where name like 'Test %'; - name ------------------------------------------------------------------------------------------------------- - test - test -(2 rows) + name +------ +(0 rows) select * from t_col where name not like 'Test %'; name ------------------------------------------------------------------------------------------------------ + test + test test -(1 row) +(3 rows) set dolphin.b_compatibility_mode = false; set try_vector_engine_strategy='force'; diff --git a/contrib/dolphin/expected/test_mysql_operator.out b/contrib/dolphin/expected/test_mysql_operator.out index 72404178f..4465ab4ff 100644 --- a/contrib/dolphin/expected/test_mysql_operator.out +++ b/contrib/dolphin/expected/test_mysql_operator.out @@ -1555,13 +1555,15 @@ drop table testforint2_p5; set dolphin.b_compatibility_mode = 0; drop schema test_op_xor cascade; reset current_schema; -create schema like_test; -set current_schema to 'like_test'; set dolphin.b_compatibility_mode = 1; +create schema like_test CHARACTER SET ='utf8' COLLATE ='utf8mb4_general_ci'; +set current_schema to 'like_test'; +set b_format_behavior_compat_options to 'enable_multi_charset'; +set names 'utf8mb4' collate 'utf8_bin'; select 'a' like 'A'; ?column? ---------- - t + f (1 row) ---正常报错,likeå³è¾¹ç¼ºå‚æ•° @@ -1576,6 +1578,49 @@ select 'a' like 'A' 'a'; f (1 row) +set names 'utf8mb4' collate 'utf8mb4_general_ci'; +select 'a' like 'A'; + ?column? +---------- + t +(1 row) + +select 'a'::char(10) like 'A'; + ?column? +---------- + t +(1 row) + +select 'a'::varchar(10) like 'A'; + ?column? +---------- + t +(1 row) + +select 'a'::VARCHAR2(10) like 'A'; + ?column? +---------- + t +(1 row) + +select 'a'::NVARCHAR2(10) like 'A'; + ?column? +---------- + t +(1 row) + +select 'a'::text like 'A'; + ?column? +---------- + t +(1 row) + +select 'a'::clob like 'A'; + ?column? +---------- + t +(1 row) + select 100 like 100; ?column? ---------- @@ -1892,7 +1937,7 @@ select * from hotel natural inner join price where name like binary 'b/%' escape ----+------+-----+------+-------+------+-----------+------- (0 rows) -select * from hotel natural inner join price where name not like 'b%'; +select * from hotel natural inner join price where name not like 'b%' order by 1; id | name | cin | cout | hotel | room | hotelname | price ----+------------+------------+------------+------------+------+------------+------- 1 | Alice | 2022-05-07 | 2022-05-08 | Vienna | 5 | Vienna | 500 @@ -1986,11 +2031,11 @@ select avg(cout like '2022%') from hotel group by hotel ; 1.00000000000000000000 (2 rows) -select sum(cout like '2022%') from hotel group by hotel ; - sum ------ - 2 - 3 +select hotel, sum(cout like '2022%') from hotel group by hotel order by 1; + hotel | sum +------------+----- + Holiday | 2 + Vienna | 3 (2 rows) select count(cout like binary '2022%') from hotel group by hotel ; @@ -2048,12 +2093,12 @@ create table price(hotelname char(10),price int) with (orientation = column); insert into price values ('Vienna',500), ('Holiday',700); -select * from hotel natural inner join price where name like 'b%'; +select * from hotel natural inner join price where name like 'b%' order by 1; id | name | cin | cout | hotel | room | hotelname | price ----+------------+------------+------------+------------+------+------------+------- 3 | Bob | 2022-06-17 | 2022-06-25 | Holiday | 1 | Vienna | 500 - 5 | Band | 2022-03-06 | 2022-03-09 | Vienna | 3 | Vienna | 500 3 | Bob | 2022-06-17 | 2022-06-25 | Holiday | 1 | Holiday | 700 + 5 | Band | 2022-03-06 | 2022-03-09 | Vienna | 3 | Vienna | 500 5 | Band | 2022-03-06 | 2022-03-09 | Vienna | 3 | Holiday | 700 (4 rows) @@ -2072,7 +2117,7 @@ select * from hotel natural inner join price where name like binary 'b/%' escape ----+------+-----+------+-------+------+-----------+------- (0 rows) -select * from hotel natural inner join price where name not like 'b%'; +select * from hotel natural inner join price where name not like 'b%' order by 1; id | name | cin | cout | hotel | room | hotelname | price ----+------------+------------+------------+------------+------+------------+------- 1 | Alice | 2022-05-07 | 2022-05-08 | Vienna | 5 | Vienna | 500 diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 3c7683228..482ec26c5 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -32697,10 +32697,7 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } } | a_expr LIKE a_expr { - if (GetSessionContext()->enableBCmptMode) - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~*", $1, $3, @2); - else - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~", $1, $3, @2); + $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~", $1, $3, @2); } | a_expr LIKE a_expr ESCAPE a_expr { @@ -32714,17 +32711,11 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } n->over = NULL; n->location = @2; n->call_func = false; - if (GetSessionContext()->enableBCmptMode) - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~*", $1, (Node *) n, @2); - else - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~", $1, (Node *) n, @2); + $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~", $1, (Node *) n, @2); } | a_expr NOT_LIKE a_expr %prec NOT_LIKE - { - if (GetSessionContext()->enableBCmptMode) - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "!~~*", $1, $3, @2); - else - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "!~~", $1, $3, @2); + { + $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "!~~", $1, $3, @2); } | a_expr NOT_LIKE a_expr ESCAPE a_expr %prec NOT_LIKE { @@ -32738,10 +32729,7 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } n->over = NULL; n->location = @2; n->call_func = false; - if (GetSessionContext()->enableBCmptMode) - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "!~~*", $1, (Node *) n, @2); - else - $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "!~~", $1, (Node *) n, @2); + $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "!~~", $1, (Node *) n, @2); } | a_expr ILIKE a_expr { $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "~~*", $1, $3, @2); } @@ -35625,18 +35613,12 @@ subquery_Op: | OPERATOR '(' any_operator ')' { $$ = $3; } | LIKE - { - if (GetSessionContext()->enableBCmptMode) - $$ = list_make1(makeString("~~*")); - else - $$ = list_make1(makeString("~~")); + { + $$ = list_make1(makeString("~~")); } | NOT_LIKE %prec NOT_LIKE - { - if (GetSessionContext()->enableBCmptMode) - $$ = list_make1(makeString("!~~*")); - else - $$ = list_make1(makeString("!~~")); + { + $$ = list_make1(makeString("!~~")); } | LIKE BINARY { $$ = list_make1(makeString("~~")); } diff --git a/contrib/dolphin/plugin_utils/adt/like.cpp b/contrib/dolphin/plugin_utils/adt/like.cpp index 0e448bd07..08911681e 100644 --- a/contrib/dolphin/plugin_utils/adt/like.cpp +++ b/contrib/dolphin/plugin_utils/adt/like.cpp @@ -571,11 +571,7 @@ Datum bpchartextlike(PG_FUNCTION_ARGS) int slen, plen; s = VARDATA_ANY(str); -#ifdef DOLPHIN slen = SQL_MODE_PAD_CHAR_TO_FULL_LENGTH() ? VARSIZE_ANY_EXHDR(str) : bcTruelen(str); -#else - slen = bcTruelen(str); -#endif p = VARDATA_ANY(pat); plen = VARSIZE_ANY_EXHDR(pat); @@ -594,15 +590,11 @@ Datum bpchartextnlike(PG_FUNCTION_ARGS) int slen, plen; s = VARDATA_ANY(str); - slen = bcTruelen(str); + slen = SQL_MODE_PAD_CHAR_TO_FULL_LENGTH() ? VARSIZE_ANY_EXHDR(str) : bcTruelen(str); p = VARDATA_ANY(pat); plen = VARSIZE_ANY_EXHDR(pat); -#ifdef DOLPHIN result = (generic_match_text_with_collation(s, slen, p, plen, PG_GET_COLLATION()) != LIKE_TRUE); -#else - result = (generic_match_text_with_collation(s, slen, p, plen, PG_GET_COLLATION()) == LIKE_FALSE); -#endif PG_RETURN_BOOL(result); } diff --git a/contrib/dolphin/sql/test_mysql_operator.sql b/contrib/dolphin/sql/test_mysql_operator.sql index a69baeac5..9da30f718 100644 --- a/contrib/dolphin/sql/test_mysql_operator.sql +++ b/contrib/dolphin/sql/test_mysql_operator.sql @@ -623,10 +623,11 @@ set dolphin.b_compatibility_mode = 0; drop schema test_op_xor cascade; reset current_schema; - -create schema like_test; -set current_schema to 'like_test'; set dolphin.b_compatibility_mode = 1; +create schema like_test CHARACTER SET ='utf8' COLLATE ='utf8mb4_general_ci'; +set current_schema to 'like_test'; +set b_format_behavior_compat_options to 'enable_multi_charset'; +set names 'utf8mb4' collate 'utf8_bin'; select 'a' like 'A'; ---正常报错,likeå³è¾¹ç¼ºå‚æ•° @@ -634,7 +635,14 @@ select 'a' like; ---ä¸æŠ¥é”™ï¼Œ'A' 'a' 被当æˆ'Aa'å¤„ç† select 'a' like 'A' 'a'; - +set names 'utf8mb4' collate 'utf8mb4_general_ci'; +select 'a' like 'A'; +select 'a'::char(10) like 'A'; +select 'a'::varchar(10) like 'A'; +select 'a'::VARCHAR2(10) like 'A'; +select 'a'::NVARCHAR2(10) like 'A'; +select 'a'::text like 'A'; +select 'a'::clob like 'A'; select 100 like 100; select -100 like 100; @@ -727,7 +735,7 @@ select * from hotel natural inner join price where name like 'b%'; select * from hotel natural inner join price where name like binary 'b%'; select * from hotel natural inner join price where name like 'b/%' escape '/'; select * from hotel natural inner join price where name like binary 'b/%' escape '/'; -select * from hotel natural inner join price where name not like 'b%'; +select * from hotel natural inner join price where name not like 'b%' order by 1; select * from hotel natural inner join price where name not like binary 'b%'; select * from hotel natural inner join price where name not like 'b/%' escape '/'; select * from hotel natural inner join price where name not like binary 'b/%' escape '/'; @@ -750,7 +758,7 @@ select count(cout like '2022%') from hotel group by hotel ; select max(cout like '2022%') from hotel group by hotel ; select min(cout like '2021%') from hotel group by hotel ; select avg(cout like '2022%') from hotel group by hotel ; -select sum(cout like '2022%') from hotel group by hotel ; +select hotel, sum(cout like '2022%') from hotel group by hotel order by 1; select count(cout like binary '2022%') from hotel group by hotel ; select max(cout like binary '2022%') from hotel group by hotel ; select min(cout like binary '2021%') from hotel group by hotel ; @@ -768,11 +776,11 @@ create table price(hotelname char(10),price int) with (orientation = column); insert into price values ('Vienna',500), ('Holiday',700); -select * from hotel natural inner join price where name like 'b%'; +select * from hotel natural inner join price where name like 'b%' order by 1; select * from hotel natural inner join price where name like binary 'b%'; select * from hotel natural inner join price where name like 'b/%' escape '/'; select * from hotel natural inner join price where name like binary 'b/%' escape '/'; -select * from hotel natural inner join price where name not like 'b%'; +select * from hotel natural inner join price where name not like 'b%' order by 1; select * from hotel natural inner join price where name not like binary 'b%'; select * from hotel natural inner join price where name not like 'b/%' escape '/'; select * from hotel natural inner join price where name not like binary 'b/%' escape '/'; -- Gitee From 7870e029b2838513393396fc891c02d1752d49de Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 3 Nov 2023 10:31:23 +0800 Subject: [PATCH 038/434] Fix show create view bug. --- contrib/dolphin/expected/show_create.out | 22 +++++++++++++++++-- .../dolphin/plugin_utils/adt/ruleutils.cpp | 21 ++++++++++++------ contrib/dolphin/sql/show_create.sql | 10 ++++++++- 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/contrib/dolphin/expected/show_create.out b/contrib/dolphin/expected/show_create.out index c4b4935a3..b56b484f7 100644 --- a/contrib/dolphin/expected/show_create.out +++ b/contrib/dolphin/expected/show_create.out @@ -486,6 +486,24 @@ show create view test_get_def.tt19v; | SELECT ('foo'::text = ANY (ARRAY['abc'::text, 'def'::text, 'foo'::text])) AS c1, ('foo'::text = ANY ((SELECT ARRAY['abc'::text, 'def'::text, 'foo'::text] AS `array`)::text[])) AS c2; | | (1 row) +create algorithm = merge view with_c_check_option_view as select * from generated_test where a < 10 with cascaded check option; +create algorithm = merge view with_l_check_option_view as select * from generated_test where a < 10 with local check option; +show create view with_c_check_option_view; + View | Create View | character_set_client | collation_connection +--------------------------+----------------------------------------------------------------------------------------------------------------+----------------------+---------------------- + with_c_check_option_view | CREATE OR REPLACE DEFINER = test_showcreate SQL SECURITY DEFINER VIEW test_get_def.with_c_check_option_view AS+| UTF8 | en_US.UTF-8 + | SELECT * FROM generated_test WHERE (generated_test.a < 10) WITH CASCADED CHECK OPTION; | | +(1 row) + +show create view with_l_check_option_view; + View | Create View | character_set_client | collation_connection +--------------------------+----------------------------------------------------------------------------------------------------------------+----------------------+---------------------- + with_l_check_option_view | CREATE OR REPLACE DEFINER = test_showcreate SQL SECURITY DEFINER VIEW test_get_def.with_l_check_option_view AS+| UTF8 | en_US.UTF-8 + | SELECT * FROM generated_test WHERE (generated_test.a < 10) WITH LOCAL CHECK OPTION; | | +(1 row) + +drop view with_c_check_option_view; +drop view with_l_check_option_view; -- table view exchange show create test show create view generated_test; ERROR: 'test_get_def.generated_test' is not VIEW @@ -615,7 +633,7 @@ show create table my_property_normal; show create view my_property_secure; View | Create View | character_set_client | collation_connection --------------------+--------------------------------------------------------------------------------------------------------+----------------------+---------------------- - my_property_secure | CREATE OR REPLACE DEFINER = test_showcreate SQL SECUIRTY DEFINER VIEW test_get_def.my_property_secure +| UTF8 | en_US.UTF-8 + my_property_secure | CREATE OR REPLACE DEFINER = test_showcreate SQL SECURITY DEFINER VIEW test_get_def.my_property_secure +| UTF8 | en_US.UTF-8 | WITH (security_barrier=true) AS +| | | SELECT * FROM customer WHERE (customer.name = (`current_user`())::text); | | (1 row) @@ -623,7 +641,7 @@ show create view my_property_secure; show create table my_property_secure; View | Create View | character_set_client | collation_connection --------------------+--------------------------------------------------------------------------------------------------------+----------------------+---------------------- - my_property_secure | CREATE OR REPLACE DEFINER = test_showcreate SQL SECUIRTY DEFINER VIEW test_get_def.my_property_secure +| UTF8 | en_US.UTF-8 + my_property_secure | CREATE OR REPLACE DEFINER = test_showcreate SQL SECURITY DEFINER VIEW test_get_def.my_property_secure +| UTF8 | en_US.UTF-8 | WITH (security_barrier=true) AS +| | | SELECT * FROM customer WHERE (customer.name = (`current_user`())::text); | | (1 row) diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index bef6e3d2c..d087fa0b4 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -13052,9 +13052,9 @@ static void get_viewdefinfo_oid(Oid viewOid, StringInfoData *buf) Datum isinvoker = OidFunctionCall2(ARRAY_CONTAIN_FUNC_OID, reloptions, OidFunctionCall3(ANYARRAYINFUNCOID, arrinvoker, TEXTOID, 0)); if (DatumGetBool(isinvoker)) { - appendStringInfo(buf, "SQL SECUIRTY INVOKER "); + appendStringInfo(buf, "SQL SECURITY INVOKER "); } else { - appendStringInfo(buf, "SQL SECUIRTY DEFINER "); + appendStringInfo(buf, "SQL SECURITY DEFINER "); } } if (classForm->relpersistence != RELPERSISTENCE_PERMANENT) { @@ -13080,23 +13080,30 @@ static void get_viewdefinfo_oid(Oid viewOid, StringInfoData *buf) appendStringInfo(buf, "\n WITH (%s) ", viewoption); } } + appendStringInfoString(buf, "AS\n"); viewdef = pg_get_viewdef_worker(viewOid, 0, -1); - appendStringInfo(buf, "AS\n%s", viewdef); + Size viewdef_len = strlen(viewdef); + if (viewdef[viewdef_len - 1] == ';') { + /* remove last ';', case we may need to add 'WITH XXX' option */ + viewdef[viewdef_len - 1] = 0; + } + appendStringInfoString(buf, viewdef); /* with local check option OR with cascade check option OR empty */ if (isnull == false) { Datum checkopt = OidFunctionCall2(ARRAY_CONTAIN_FUNC_OID, reloptions, OidFunctionCall3(ANYARRAYINFUNCOID, arrlocal, TEXTOID, 0)); - if (DatumGetBool(checkopt) && buf->len > 1) { - appendStringInfo(buf - 1, " WITH LOCAL CHECK OPTION;"); + if (DatumGetBool(checkopt)) { + appendStringInfo(buf, " WITH LOCAL CHECK OPTION"); } else { checkopt = OidFunctionCall2(ARRAY_CONTAIN_FUNC_OID, reloptions, OidFunctionCall3(ANYARRAYINFUNCOID, arrcascade, TEXTOID, 0)); - if (DatumGetBool(checkopt) && buf->len > 1) { - appendStringInfo(buf - 1, " WITH CASCADED CHECK OPTION;"); + if (DatumGetBool(checkopt)) { + appendStringInfo(buf, " WITH CASCADED CHECK OPTION"); } } } + appendStringInfoString(buf, ";"); ReleaseSysCache(tuple); pfree_ext(viewdef); } diff --git a/contrib/dolphin/sql/show_create.sql b/contrib/dolphin/sql/show_create.sql index 93ce14e0f..583bd3b38 100644 --- a/contrib/dolphin/sql/show_create.sql +++ b/contrib/dolphin/sql/show_create.sql @@ -163,6 +163,14 @@ select 'foo'::text = any(array['abc','def','foo']::text[]) c1, 'foo'::text = any((select array['abc','def','foo']::text[])::text[]) c2; show create view tt19v; show create view test_get_def.tt19v; + +create algorithm = merge view with_c_check_option_view as select * from generated_test where a < 10 with cascaded check option; +create algorithm = merge view with_l_check_option_view as select * from generated_test where a < 10 with local check option; +show create view with_c_check_option_view; +show create view with_l_check_option_view; +drop view with_c_check_option_view; +drop view with_l_check_option_view; + -- table view exchange show create test show create view generated_test; show create table tt19v; @@ -462,4 +470,4 @@ drop schema test_get_def cascade; RESET ROLE; DROP USER test_showcreate; drop schema show_create cascade; -reset current_schema; \ No newline at end of file +reset current_schema; -- Gitee From e23e20267c4a8b3e12c224a61c6f80f66091057d Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Fri, 3 Nov 2023 14:31:44 +0800 Subject: [PATCH 039/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddayofxx=E3=80=81date('xx'),=20timestamp('')?= =?UTF-8?q?=E7=AD=89=E5=9C=BA=E6=99=AF=E4=B8=8B=E8=BE=93=E5=85=A5=E4=B8=BA?= =?UTF-8?q?=E9=9D=9E=E6=B3=95=E5=80=BC=E8=BE=93=E5=87=BA=E4=B8=8EMysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddayofxx=E6=89=A7=E8=A1=8C=E5=BC=82=E5=B8=B8?= =?UTF-8?q?=E6=88=96=E8=80=85date=E3=80=81timestamp=E5=9C=A8=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=80=BC=E6=97=B6=E5=80=99=E6=98=BE=E7=A4=BA=E4=B8=BA?= =?UTF-8?q?=E9=9D=9ENULL=E5=80=BC=E5=AF=BC=E8=87=B4=E5=92=8CMySQL=E4=B8=8D?= =?UTF-8?q?=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20dayof?= =?UTF-8?q?xx=E6=9C=89=E4=B8=A4=E4=B8=AA=E9=97=AE=E9=A2=98=EF=BC=8C?= =?UTF-8?q?=E4=B8=80=E6=98=AF=E6=98=BE=E7=A4=BA=E5=80=BC=E4=B8=BA=E9=9D=9E?= =?UTF-8?q?NULL=EF=BC=8C=E8=80=8C=E6=98=AF=E9=83=A8=E5=88=86=E5=9C=BA?= =?UTF-8?q?=E6=99=AF=E5=8A=9F=E8=83=BD=E6=9C=AA=E5=AE=9E=E7=8E=B0=E3=80=82?= =?UTF-8?q?=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20?= =?UTF-8?q?=E6=98=BE=E7=A4=BANULL=E7=9A=84=E6=96=B9=E6=A1=88=E4=B8=BA?= =?UTF-8?q?=E5=85=81=E8=AE=B8Input=E5=87=BD=E6=95=B0=E8=BF=94=E5=9B=9ENull?= =?UTF-8?q?=E5=80=BC=EF=BC=8Cdayofxx=E7=9A=84=E6=96=B9=E6=A1=88=E4=B8=BA?= =?UTF-8?q?=E9=80=82=E9=85=8D=E6=8A=A5=E9=94=99=E5=9C=BA=E6=99=AF=20?= =?UTF-8?q?=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue?= =?UTF-8?q?=E3=80=91:=20https://e.gitee.com/opengaussorg/dashboard=3Fissue?= =?UTF-8?q?=3DI898UO=20=E3=80=90=E5=BC=80=E5=8F=91=E8=87=AA=E9=AA=8C?= =?UTF-8?q?=E6=8A=A5=E5=91=8A=E3=80=91:=20=E8=AF=B7=E9=99=84=E4=B8=8A?= =?UTF-8?q?=E8=87=AA=E9=AA=8C=E7=BB=93=E6=9E=9C(=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E6=88=96=E8=80=85=E6=88=AA=E5=9B=BE)=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E5=8F=AF=E4=BB=A5=E6=B7=BB=E5=8A=A0fastcheck=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E8=AF=B7?= =?UTF-8?q?=E8=A1=A5=E5=85=85fastcheck=E7=94=A8=E4=BE=8B=20->=20=E6=98=AF?= =?UTF-8?q?=20=E6=98=AF=E5=90=A6=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8?= =?UTF-8?q?docs=E4=BB=93=E5=BA=93=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20?= =?UTF-8?q?=20=20=20->=20=E6=97=A0=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5=E5=BF=97=E6=8C=81?= =?UTF-8?q?=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE=E6=94=B9=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC=E5=BC=8F)=20=20->?= =?UTF-8?q?=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9=E7=AD=89=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6=E5=8F=91=E5=9C=BA?= =?UTF-8?q?=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC=E5=AE=B9/=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1=E5=93=8D=20=20=20?= =?UTF-8?q?->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs.out | 170 +++++++++++++++++- .../b_compatibility_time_funcs3.out | 65 +++---- contrib/dolphin/expected/partition_test1.out | 3 +- contrib/dolphin/expected/test_dayofweek.out | 91 ++++++++++ .../include/plugin_parser/parse_type.h | 6 + contrib/dolphin/include/plugin_utils/date.h | 9 +- contrib/dolphin/include/plugin_utils/fmgr.h | 13 ++ .../dolphin/include/plugin_utils/timestamp.h | 10 +- .../test_trigger_ddl_import_and_export.source | 4 +- .../test_trigger_ddl_import_and_export.source | 24 +-- .../dolphin/plugin_parser/parse_coerce.cpp | 45 ++++- contrib/dolphin/plugin_parser/parse_type.cpp | 38 ++++ contrib/dolphin/plugin_utils/adt/date.cpp | 54 +++++- .../dolphin/plugin_utils/adt/timestamp.cpp | 135 +++++++++++++- contrib/dolphin/plugin_utils/fmgr/fmgr.cpp | 74 ++++++++ .../b_compatibility_time_funcs.sql | 23 +++ .../b_compatibility_time_funcs3.sql | 1 + contrib/dolphin/sql/test_dayofweek.sql | 14 ++ 18 files changed, 696 insertions(+), 83 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index 32546cee4..4cc8c6b45 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -645,6 +645,7 @@ CONTEXT: referenced column: hour 0 (1 row) +set dolphin.b_compatibility_mode = true; -- date_bool&time_bool select date_bool('0000-00-00'); date_bool @@ -665,7 +666,7 @@ LINE 1: select date_bool('0000-00-01'); CONTEXT: referenced column: date_bool date_bool ----------- - f + (1 row) select time_bool('00:00:01'); @@ -681,7 +682,7 @@ LINE 1: select date_bool('0000-01-00'); CONTEXT: referenced column: date_bool date_bool ----------- - f + (1 row) select time_bool('00:01:00'); @@ -709,7 +710,7 @@ LINE 1: select date_bool('0001-00-00'); CONTEXT: referenced column: date_bool date_bool ----------- - f + (1 row) select time_bool('01:00:00'); @@ -766,6 +767,169 @@ select time_bool('-838:59:59'); t (1 row) +-- 异常显示空测试 +select timestamp('2022-05'); +WARNING: invalid input syntax for type timestamp: "2022-05" +CONTEXT: referenced column: timestamp +WARNING: date/time field value out of range +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select timestamp('2022-05-05'); + timestamp +--------------------- + 2022-05-05 00:00:00 +(1 row) + +select timestamp('2022-05-05 1'); + timestamp +--------------------- + 2022-05-05 01:00:00 +(1 row) + +select timestamp('2023-13-15 1'); +WARNING: invalid input syntax for type timestamp: "2023-13-15 1" +CONTEXT: referenced column: timestamp +WARNING: date/time field value out of range +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select timestamp('2023-13-15 1:65'); +WARNING: date/time field value out of range: "2023-13-15 1:65" +CONTEXT: referenced column: timestamp +WARNING: date/time field value out of range +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select timestamp('2022-05-05 1:55:61'); +WARNING: date/time field value out of range: "2022-05-05 1:55:61" +CONTEXT: referenced column: timestamp +WARNING: date/time field value out of range +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select date('2022-15-05'); +WARNING: Incorrect datetime value: "2022-15-05" +CONTEXT: referenced column: date + date +------ + +(1 row) + +select date('2022-05-35'); +WARNING: Incorrect datetime value: "2022-05-35" +CONTEXT: referenced column: date + date +------ + +(1 row) + +select date('2022-05-05 01'); + date +------------ + 2022-05-05 +(1 row) + +select date('2022-05-05 01-20-30'); + date +------------ + 2022-05-05 +(1 row) + +select date('2022-05-05 20:59'); + date +------------ + 2022-05-05 +(1 row) + +select date('2022-05-05 20:60'); +WARNING: Incorrect datetime value: "2022-05-05 20:60" +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-15-05' as date); +WARNING: date/time field value out of range: "2022-15-05" +LINE 1: select cast('2022-15-05' as date); + ^ +HINT: Perhaps you need a different "datestyle" setting. +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-05-35' as date); +WARNING: date/time field value out of range: "2022-05-35" +LINE 1: select cast('2022-05-35' as date); + ^ +HINT: Perhaps you need a different "datestyle" setting. +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-05-05 01' as date); +WARNING: invalid input syntax for type date: "2022-05-05 01" +LINE 1: select cast('2022-05-05 01' as date); + ^ +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-05-05 20-70' as date); +WARNING: time zone displacement out of range: "2022-05-05 20-70" +LINE 1: select cast('2022-05-05 20-70' as date); + ^ +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-05-05 20:70' as date); +WARNING: date/time field value out of range: "2022-05-05 20:70" +LINE 1: select cast('2022-05-05 20:70' as date); + ^ +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-05-05 20:60' as date); +WARNING: date/time field value out of range: "2022-05-05 20:60" +LINE 1: select cast('2022-05-05 20:60' as date); + ^ +CONTEXT: referenced column: date + date +------ + +(1 row) + +select cast('2022-05-05 20:59' as date); + date +------------ + 2022-05-05 +(1 row) + reset dolphin.sql_mode; drop schema b_time_funcs cascade; NOTICE: drop cascades to table func_test diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index d02aee709..564b36a35 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -400,6 +400,7 @@ CONTEXT: referenced column: to_seconds (1 row) -- è¿›ä½å’Œéžæ³•值 +set dolphin.b_compatibility_mode = true; set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; create table test1(a time); insert into test1 values('23:65:66'); @@ -529,28 +530,24 @@ select time'23:55:56.1234'; select cast('23:65:66' as time); WARNING: date/time field value out of range: "23:65:66" -LINE 1: select cast('23:65:66' as time); - ^ CONTEXT: referenced column: time - time ----------- - 24:06:06 + time +------ + (1 row) select cast('23:65'as time); WARNING: date/time field value out of range: "23:65" -LINE 1: select cast('23:65'as time); - ^ CONTEXT: referenced column: time - time ----------- - 24:05:00 + time +------ + (1 row) select cast('23-65' as time); WARNING: time zone displacement out of range: "23-65" -LINE 1: select cast('23-65' as time); - ^ +CONTEXT: referenced column: time +WARNING: time zone displacement out of range: "23-65" CONTEXT: referenced column: time time ---------- @@ -565,12 +562,10 @@ select cast('23:59:59.8888' as time); select cast('23:65:66.8888' as time); WARNING: date/time field value out of range: "23:65:66.8888" -LINE 1: select cast('23:65:66.8888' as time); - ^ CONTEXT: referenced column: time - time ----------- - 24:06:07 + time +------ + (1 row) select cast('0:0:0' as time); @@ -581,8 +576,8 @@ select cast('0:0:0' as time); select cast('-1:-1:-1' as time); WARNING: time zone displacement out of range: "-1:-1:-1" -LINE 1: select cast('-1:-1:-1' as time); - ^ +CONTEXT: referenced column: time +WARNING: time zone displacement out of range: "-1:-1:-1" CONTEXT: referenced column: time time ----------- @@ -728,28 +723,24 @@ select time'23:55:56.1234'; select cast('23:65:66' as time); WARNING: date/time field value out of range: "23:65:66" -LINE 1: select cast('23:65:66' as time); - ^ CONTEXT: referenced column: time - time ----------- - 24:06:06 + time +------ + (1 row) select cast('23:65'as time); WARNING: date/time field value out of range: "23:65" -LINE 1: select cast('23:65'as time); - ^ CONTEXT: referenced column: time - time ----------- - 24:05:00 + time +------ + (1 row) select cast('23-65' as time); WARNING: time zone displacement out of range: "23-65" -LINE 1: select cast('23-65' as time); - ^ +CONTEXT: referenced column: time +WARNING: time zone displacement out of range: "23-65" CONTEXT: referenced column: time time ---------- @@ -764,12 +755,10 @@ select cast('23:59:59.8888' as time); select cast('23:65:66.8888' as time); WARNING: date/time field value out of range: "23:65:66.8888" -LINE 1: select cast('23:65:66.8888' as time); - ^ CONTEXT: referenced column: time - time ----------- - 24:06:07 + time +------ + (1 row) select cast('0:0:0' as time); @@ -780,8 +769,8 @@ select cast('0:0:0' as time); select cast('-1:-1:-1' as time); WARNING: time zone displacement out of range: "-1:-1:-1" -LINE 1: select cast('-1:-1:-1' as time); - ^ +CONTEXT: referenced column: time +WARNING: time zone displacement out of range: "-1:-1:-1" CONTEXT: referenced column: time time ----------- diff --git a/contrib/dolphin/expected/partition_test1.out b/contrib/dolphin/expected/partition_test1.out index 0fb9cd5b7..4b890de32 100644 --- a/contrib/dolphin/expected/partition_test1.out +++ b/contrib/dolphin/expected/partition_test1.out @@ -383,6 +383,7 @@ explain select * from test_subpart where ((980 < c and ((980 < c and c < 1000) o Iterations: 3, Sub Iterations: 9 -> Partitioned Bitmap Heap Scan on test_subpart (cost=13.22..29.32 rows=22 width=16) Recheck Cond: ((((980 < c) AND (c < 1000) AND (980 < c)) OR ((2180 < c) AND (c < 2200) AND (980 < c))) OR ((2180 < c) AND (c < 2200))) + Filter: (((980 < c) AND (((980 < c) AND (c < 1000)) OR ((2180 < c) AND (c < 2200)))) OR ((2180 < c) AND (c < 2200))) Selected Partitions: 1..3 Selected Subpartitions: ALL -> BitmapOr (cost=13.22..13.22 rows=40 width=0) @@ -399,7 +400,7 @@ explain select * from test_subpart where ((980 < c and ((980 < c and c < 1000) o Index Cond: ((2180 < c) AND (c < 2200)) Selected Partitions: 1..3 Selected Subpartitions: ALL -(20 rows) +(21 rows) select * from test_subpart where ((980 < c and c < 1000) or (2180 < c and c < 2200)); a | b | c | d diff --git a/contrib/dolphin/expected/test_dayofweek.out b/contrib/dolphin/expected/test_dayofweek.out index 4ccf33d6c..df399b9b2 100644 --- a/contrib/dolphin/expected/test_dayofweek.out +++ b/contrib/dolphin/expected/test_dayofweek.out @@ -202,6 +202,97 @@ select * from t_datetime; 0000-00-00 00:00:00 (3 rows) +set dolphin.b_compatibility_mode = true; +select dayofmonth('20:38:40'); +WARNING: invalid input syntax for type timestamp: "20:38:40" +LINE 1: select dayofmonth('20:38:40'); + ^ +CONTEXT: referenced column: dayofmonth + dayofmonth +------------ + +(1 row) + +select dayofmonth(time'20:38:40'); + dayofmonth +------------ +--?.* +(1 row) + +select dayofmonth(203840); +WARNING: timestamp out of range +CONTEXT: referenced column: dayofmonth + dayofmonth +------------ + +(1 row) + +select dayofmonth(time'203840'); + dayofmonth +------------ +--?.* +(1 row) + +select dayofyear('20:38:40'); +WARNING: invalid input syntax for type timestamp: "20:38:40" +LINE 1: select dayofyear('20:38:40'); + ^ +CONTEXT: referenced column: dayofyear + dayofyear +----------- + +(1 row) + +select dayofyear(time'20:38:40'); + dayofyear +----------- +--?.* +(1 row) + +select dayofyear(203840); +WARNING: timestamp out of range +CONTEXT: referenced column: dayofyear + dayofyear +----------- + +(1 row) + +select dayofyear(time'203840'); + dayofyear +----------- +--?.* +(1 row) + +select quarter('20:38:40'); +WARNING: invalid input syntax for type timestamp: "20:38:40" +LINE 1: select quarter('20:38:40'); + ^ +CONTEXT: referenced column: quarter + quarter +--------- + +(1 row) + +select quarter(time'20:38:40'); + quarter +--------- +--?.* +(1 row) + +select quarter(203840); +WARNING: timestamp out of range +CONTEXT: referenced column: quarter + quarter +--------- + +(1 row) + +select quarter(time'203840'); + quarter +--------- +--?.* +(1 row) + reset current_schema; drop schema if exists test_dayofweek cascade; NOTICE: drop cascades to 2 other objects diff --git a/contrib/dolphin/include/plugin_parser/parse_type.h b/contrib/dolphin/include/plugin_parser/parse_type.h index cd17400ab..6f208c86b 100644 --- a/contrib/dolphin/include/plugin_parser/parse_type.h +++ b/contrib/dolphin/include/plugin_parser/parse_type.h @@ -52,10 +52,16 @@ extern Oid typeTypeRelid(Type typ); extern Oid typeTypeCollation(Type typ); extern Datum stringTypeDatum(Type tp, char* string, int32 atttypmod, bool can_ignore = false); #ifdef DOLPHIN +extern Datum stringTypeDatumCompatibleNullResult(Type tp, char* string, int32 atttypmod, bool can_ignore, + CoercionContext ccontext, bool* result_isnull); char* makeEnumTypeName(const char* relname, const char *colname, const char* schemaname); void DefineAnonymousEnum(TypeName * typname, Oid collations); extern Datum GetEnumDefineStr(Oid enumOid); extern bool IsAnonymousEnum(Oid enumOid); +extern Datum InputFunctionCallCompatibleNullResult(FmgrInfo* flinfo, char* str, Oid typioparam, int32 typmod, + bool can_ignore, Oid collation, CoercionContext ccontext, bool* result_isnull); +extern Datum OidInputFunctionCallCompatibleNullResult(Oid functionId, char* str, Oid typioparam, int32 typmod, + bool can_ignore, CoercionContext ccontext, bool* result_isnull); #endif extern Oid typeidTypeRelid(Oid type_id); diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index bbf37d393..85f09601e 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -55,6 +55,7 @@ #define TIME_MAX_FRAC 999999 #define DATE_ALL_ZERO_VALUE (-2451508) #define JDATE_ALL_ZERO_VALUE (DATE_ALL_ZERO_VALUE + POSTGRES_EPOCH_JDATE) +#define MONTH_TO_QUARTER_RADIX 3 #endif /* for b compatibility type*/ @@ -84,7 +85,7 @@ extern void adjust_time_range(pg_tm *tm, fsec_t &fsec, bool &warnings); extern TimeADT time_in_with_flag(char *str, unsigned int date_flag); extern bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag); extern bool date_add_interval(DateADT date, Interval *span, DateADT *result); -extern Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst); +extern Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_error_type); extern "C" Datum time_float(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum date_enum(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum timestamp_enum(PG_FUNCTION_ARGS); @@ -98,12 +99,6 @@ typedef struct DateTimeFormat const char *time_format; }DateTimeFormat; -typedef enum -{ - TIME_CORRECT = 0, - TIME_IGNORED_INCORRECT, - TIME_INCORRECT -}TimeErrorType; typedef enum { diff --git a/contrib/dolphin/include/plugin_utils/fmgr.h b/contrib/dolphin/include/plugin_utils/fmgr.h index a6fd7a5c4..5a806120a 100644 --- a/contrib/dolphin/include/plugin_utils/fmgr.h +++ b/contrib/dolphin/include/plugin_utils/fmgr.h @@ -27,6 +27,8 @@ #include "fmgr/fmgr_core.h" #include "lib/stringinfo.h" #include "access/tupdesc.h" +#include "nodes/primnodes.h" + #ifndef FRONTEND_PARSER @@ -168,6 +170,7 @@ typedef struct FunctionCallInfoData { RefcusorInfoData refcursor_data; UDFInfoType udfInfo; StartWithFuncEvalInfo swinfo; + CoercionContext ccontext; FunctionCallInfoData() { @@ -182,6 +185,7 @@ typedef struct FunctionCallInfoData { nargs = 0; isnull = false; can_ignore = false; + ccontext = COERCION_UNKNOWN; } } FunctionCallInfoData; @@ -663,4 +667,13 @@ inline bool eval_simple_op(Datatype dataVal1, Datatype dataVal2) extern void CopyCursorInfoData(Cursor_Data* target_data, Cursor_Data* source_data); +#ifdef DOLPHIN +extern bool is_allow_null_result(CoercionContext ccontext); +extern void CheckNullResultCompatibleNullResult(Oid oid, bool isnull, char* str, CoercionContext ccontext); +extern Datum InputFunctionCallCompatibleNullResult(FmgrInfo* flinfo, char* str, Oid typioparam, int32 typmod, + bool can_ignore, Oid collation, CoercionContext ccontext, bool* result_isnull); +extern Datum OidInputFunctionCallCompatibleNullResult(Oid functionId, char* str, Oid typioparam, int32 typmod, + bool can_ignore, CoercionContext ccontext, bool* result_isnull); +#endif + #endif /* FMGR_H */ diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index e8a77bcbe..4b763f0ba 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -135,9 +135,17 @@ extern bool datetime_in_with_sql_mode_internal(char *str, struct pg_tm *tm, fsec extern bool datetime_in_range(Timestamp datetime); extern int128 timestamp_int128(Timestamp timestamp); extern int128 timestamptz_int128(TimestampTz timestampTz); -extern Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst); +extern Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst); extern TimestampTz time2timestamptz(TimeADT timeVal); extern TimestampTz timetz2timestamptz(TimeTzADT* timetzVal); +typedef enum { + TIME_CORRECT = 0, + TIME_IGNORED_INCORRECT, + TIME_INCORRECT +} TimeErrorType; + + +Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErrorType* time_error_type); extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); #endif diff --git a/contrib/dolphin/input/test_trigger_ddl_import_and_export.source b/contrib/dolphin/input/test_trigger_ddl_import_and_export.source index aaa9c52c9..53c573ea8 100644 --- a/contrib/dolphin/input/test_trigger_ddl_import_and_export.source +++ b/contrib/dolphin/input/test_trigger_ddl_import_and_export.source @@ -1,8 +1,8 @@ --create trigger测试 drop database if exists dump_trigger_db; drop database if exists restore_trigger_db; -create database dump_trigger_db with dbcompatibility = 'B'; -create database restore_trigger_db with dbcompatibility = 'B'; +create database dump_trigger_db with dbcompatibility = 'B' lc_collate 'C'; +create database restore_trigger_db with dbcompatibility = 'B' lc_collate 'C'; \c dump_trigger_db --test definer,if not exists create table t_create_trigger_001(c int); diff --git a/contrib/dolphin/output/test_trigger_ddl_import_and_export.source b/contrib/dolphin/output/test_trigger_ddl_import_and_export.source index 6287a8c35..63f83e39b 100644 --- a/contrib/dolphin/output/test_trigger_ddl_import_and_export.source +++ b/contrib/dolphin/output/test_trigger_ddl_import_and_export.source @@ -3,8 +3,8 @@ drop database if exists dump_trigger_db; NOTICE: database "dump_trigger_db" does not exist, skipping drop database if exists restore_trigger_db; NOTICE: database "restore_trigger_db" does not exist, skipping -create database dump_trigger_db with dbcompatibility = 'B'; -create database restore_trigger_db with dbcompatibility = 'B'; +create database dump_trigger_db with dbcompatibility = 'B' lc_collate 'C'; +create database restore_trigger_db with dbcompatibility = 'B' lc_collate 'C'; \c dump_trigger_db --test definer,if not exists create table t_create_trigger_001(c int); @@ -44,11 +44,11 @@ show lc_collate; (1 row) show create trigger trigger001; - Trigger | sql_mode | SQL Original Statement | character_set_client | collation_connection | Database Collation -------------+------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+----------------------+----------------------+-------------------- - trigger001 | sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length | CREATE DEFINER = test_trigger_definer TRIGGER trigger001 AFTER INSERT ON t_create_trigger_001 FOR EACH ROW DECLARE begin+| UTF8 | C | C - | | insert into t_create_trigger_002(c) values(3); +| | | - | | end; | | | + Trigger | sql_mode | SQL Original Statement | character_set_client | collation_connection | Database Collation +------------+------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+----------------------+----------------------+-------------------- + trigger001 | sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length | CREATE DEFINER = test_trigger_definer TRIGGER trigger001 AFTER INSERT ON t_create_trigger_001 FOR EACH ROW DECLARE begin+| UTF8 | C | C + | | insert into t_create_trigger_002(c) values(3); +| | | + | | end; | | | (1 row) --导入导出 @@ -71,11 +71,11 @@ show lc_collate; (1 row) show create trigger trigger001; - Trigger | sql_mode | SQL Original Statement | character_set_client | collation_connection | Database Collation -------------+------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+----------------------+----------------------+-------------------- - trigger001 | sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length | CREATE DEFINER = test_trigger_definer TRIGGER trigger001 AFTER INSERT ON t_create_trigger_001 FOR EACH ROW DECLARE begin+| UTF8 | C | C - | | insert into t_create_trigger_002(c) values(3); +| | | - | | end; | | | + Trigger | sql_mode | SQL Original Statement | character_set_client | collation_connection | Database Collation +------------+------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------+----------------------+----------------------+-------------------- + trigger001 | sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length | CREATE DEFINER = test_trigger_definer TRIGGER trigger001 AFTER INSERT ON t_create_trigger_001 FOR EACH ROW DECLARE begin+| UTF8 | C | C + | | insert into t_create_trigger_002(c) values(3); +| | | + | | end; | | | (1 row) \c postgres diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index b4e729121..281b9d985 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -512,6 +512,25 @@ static Datum stringTypeDatum_with_collation(Type tp, char* string, int32 atttypm return result; } +#ifdef DOLPHIN +static Datum stringTypeDatumCompatibleNullResult_with_collation(Type tp, char* string, int32 atttypmod, + bool can_ignore, Oid collation, CoercionContext ccontext, bool* result_isnull) +{ + Datum result; + int tmp_encoding = get_valid_charset_by_collation(collation); + int db_encoding = GetDatabaseEncoding(); + + if (tmp_encoding == db_encoding) { + return stringTypeDatumCompatibleNullResult(tp, string, atttypmod, can_ignore, ccontext, result_isnull); + } + + DB_ENCODING_SWITCH_TO(tmp_encoding); + result = stringTypeDatumCompatibleNullResult(tp, string, atttypmod, can_ignore, ccontext, result_isnull); + DB_ENCODING_SWITCH_BACK(db_encoding); + return result; +} +#endif + #ifdef DOLPHIN static bool hasTextCoercePath(Oid* srcoid, Oid destoid, CoercionContext ccontext, bool* changed) { @@ -697,14 +716,28 @@ Node* coerce_type(ParseState* pstate, Node* node, Oid inputTypeId, Oid targetTyp * We assume here that UNKNOWN's internal representation is the same * as CSTRING. */ - if (!con->constisnull) { - newcon->constvalue = stringTypeDatum_with_collation(targetType, DatumGetCString(con->constvalue), - inputTypeMod, pstate != NULL && pstate->p_has_ignore, con->constcollid); +#ifdef DOLPHIN + if (ENABLE_B_CMPT_MODE) { + if (!con->constisnull) { + newcon->constvalue = stringTypeDatumCompatibleNullResult_with_collation(targetType, + DatumGetCString(con->constvalue), inputTypeMod, pstate != NULL && pstate->p_has_ignore, + con->constcollid, ccontext, &newcon->constisnull); + } else { + newcon->constvalue = stringTypeDatumCompatibleNullResult(targetType, NULL, + inputTypeMod, pstate != NULL && pstate->p_has_ignore, ccontext, &newcon->constisnull); + } } else { - newcon->constvalue = - stringTypeDatum(targetType, NULL, inputTypeMod, pstate != NULL && pstate->p_has_ignore); +#endif + if (!con->constisnull) { + newcon->constvalue = stringTypeDatum_with_collation(targetType, DatumGetCString(con->constvalue), + inputTypeMod, pstate != NULL && pstate->p_has_ignore, con->constcollid); + } else { + newcon->constvalue = + stringTypeDatum(targetType, NULL, inputTypeMod, pstate != NULL && pstate->p_has_ignore); + } +#ifdef DOLPHIN } - +#endif cancel_parser_errposition_callback(&pcbstate); result = (Node*)newcon; diff --git a/contrib/dolphin/plugin_parser/parse_type.cpp b/contrib/dolphin/plugin_parser/parse_type.cpp index b6196fe74..d78849fce 100644 --- a/contrib/dolphin/plugin_parser/parse_type.cpp +++ b/contrib/dolphin/plugin_parser/parse_type.cpp @@ -1012,6 +1012,44 @@ Datum stringTypeDatum(Type tp, char* string, int32 atttypmod, bool can_ignore) return result; } + + +#ifdef DOLPHIN +Datum stringTypeDatumCompatibleNullResult(Type tp, char* string, int32 atttypmod, bool can_ignore, + CoercionContext ccontext, bool* result_isnull) +{ + Form_pg_type typform = (Form_pg_type)GETSTRUCT(tp); + Oid typinput = typform->typinput; + Oid typioparam = getTypeIOParam(tp); + Datum result; + result = OidInputFunctionCallCompatibleNullResult(typinput, string, typioparam, atttypmod, can_ignore, + ccontext, result_isnull); +#ifdef RANDOMIZE_ALLOCATED_MEMORY + + /* + * For pass-by-reference data types, repeat the conversion to see if the + * input function leaves any uninitialized bytes in the result. We can + * only detect that reliably if RANDOMIZE_ALLOCATED_MEMORY is enabled, so + * we don't bother testing otherwise. The reason we don't want any + * instability in the input function is that comparison of Const nodes + * relies on bytewise comparison of the datums, so if the input function + * leaves garbage then subexpressions that should be identical may not get + * recognized as such. See pgsql-hackers discussion of 2008-04-04. + */ + if (string && !typform->typbyval) { + Datum result2; + + result2 = OidInputFunctionCallCompatibleNullResult(typinput, string, typioparam, atttypmod, + ccontext, result_isnull); + if (!datumIsEqual(result, result2, typform->typbyval, typform->typlen)) { + elog(WARNING, "type %s has unstable input conversion for \"%s\"", NameStr(typform->typname), string); + } + } +#endif + return result; +} +#endif + /* given a typeid, return the type's typrelid (associated relation, if any) */ Oid typeidTypeRelid(Oid type_id) { diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index dd430c526..89724b8c4 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -454,15 +454,23 @@ Datum b_db_statement_start_time(PG_FUNCTION_ARGS) Datum date_in(PG_FUNCTION_ARGS) #ifdef DOLPHIN { - return date_internal(fcinfo, false); + Datum result; + TimeErrorType time_error_type = TIME_CORRECT; + result = date_internal(fcinfo, false, &time_error_type); + if ((fcinfo->ccontext == COERCION_IMPLICIT || fcinfo->ccontext == COERCION_EXPLICIT) && + time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return result; } Datum date_cast(PG_FUNCTION_ARGS) { - return date_internal(fcinfo, true); + TimeErrorType time_error_type = TIME_CORRECT; + return date_internal(fcinfo, true, &time_error_type); } -Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst) +Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_error_type) #endif { char* str = PG_GETARG_CSTRING(0); @@ -509,6 +517,9 @@ Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst) /* * if reporting warning in DateTimeParseError, return 1970-01-01 */ +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); } if (dterr == 0) { @@ -521,6 +532,9 @@ Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst) } if (dterr != 0) { DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, is_date_sconst); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); } switch (dtype) { @@ -2768,13 +2782,45 @@ Datum time_part(PG_FUNCTION_ARGS) break; } + case DTK_DAY: + { + int tz; + if (timestamp2tm(GetCurrentTimestamp(), &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + result = tm->tm_mday; + break; + } + + case DTK_DOY: + { + int tz; + if (timestamp2tm(GetCurrentTimestamp(), &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + result = (date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - date2j(tm->tm_year, 1, 1) + 1); + break; + } + + case DTK_QUARTER: + { + int tz; + if (timestamp2tm(GetCurrentTimestamp(), &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + result = (tm->tm_mon - 1) / MONTH_TO_QUARTER_RADIX + 1; + break; + } #endif + case DTK_TZ: case DTK_TZ_MINUTE: case DTK_TZ_HOUR: +#ifndef DOLPHIN case DTK_DAY: - case DTK_MONTH: case DTK_QUARTER: +#endif + case DTK_MONTH: case DTK_YEAR: case DTK_DECADE: case DTK_CENTURY: diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index c23652223..eba8c59ef 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -331,8 +331,15 @@ extern "C" DLL_PUBLIC Datum timestamp_cast(PG_FUNCTION_ARGS); #endif /* b format datetime and timestamp type */ +#ifdef DOLPHIN +static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, + bool can_ignore, TimeErrorType* time_error_type); +static int64 integer_b_format_timestamp(bool hasTz, int64 ts, + bool can_ignore, TimeErrorType* time_error_type); +#else static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, bool can_ignore); static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore); +#endif static void fillZeroBeforeNumericTimestamp(char *str, char *buf); /* common code for timestamptypmodin and timestamptztypmodin */ @@ -869,7 +876,12 @@ int NumberTimestamp(char *str, pg_tm *tm, fsec_t *fsec) return dterr; } +#ifdef DOLPHIN +static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, bool can_ignore, + TimeErrorType* time_error_type) +#else static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, bool can_ignore) +#endif { Timestamp result; struct pg_tm tt, *tm = &tt; @@ -902,6 +914,9 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t if (int32_b_format_time_internal(tm, true, time, &fsec) || int32_b_format_date_internal(tm, date, true)){ ereport(level, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif return TIMESTAMP_ZERO; } @@ -911,49 +926,100 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t if (tm2timestamp(tm, fsec, &tz, &result) != 0) { ereport(level, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif return TIMESTAMP_ZERO; } } else { if (tm2timestamp(tm, fsec, NULL, &result) != 0) { ereport(level, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif return TIMESTAMP_ZERO; } } return result; } +#ifdef DOLPHIN +static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore, TimeErrorType* time_error_type) +{ + TimestampTz result; + result = int64_b_format_timestamp_internal(hasTz, ts, 0, can_ignore, time_error_type); + PG_RETURN_TIMESTAMP(result); +} +#else static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore) { TimestampTz result; result = int64_b_format_timestamp_internal(hasTz, ts, 0, can_ignore); PG_RETURN_TIMESTAMP(result); } +#endif + #ifdef DOLPHIN +Datum timestamp_to_datum(PG_FUNCTION_ARGS, bool hasTz, int64 ts) +{ + TimeErrorType time_error_type = TIME_CORRECT; + int64 result = integer_b_format_timestamp(hasTz, ts, fcinfo->can_ignore, &time_error_type); + if (fcinfo->ccontext == COERCION_IMPLICIT && time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + PG_RETURN_TIMESTAMP(result); +} + Datum int8_b_format_datetime(PG_FUNCTION_ARGS) { int64 ts = PG_GETARG_INT64(0); - PG_RETURN_TIMESTAMP(integer_b_format_timestamp(false, ts, fcinfo->can_ignore)); + return timestamp_to_datum(fcinfo, false, ts); } Datum int8_b_format_timestamp(PG_FUNCTION_ARGS) { int64 ts = PG_GETARG_INT64(0); - PG_RETURN_TIMESTAMP(integer_b_format_timestamp(true, ts, fcinfo->can_ignore)); + return timestamp_to_datum(fcinfo, true, ts); } Datum int16_b_format_datetime(PG_FUNCTION_ARGS) { int64 ts = PG_GETARG_INT64(0); - PG_RETURN_TIMESTAMP(integer_b_format_timestamp(false, ts, fcinfo->can_ignore)); + return timestamp_to_datum(fcinfo, false, ts); } Datum int16_b_format_timestamp(PG_FUNCTION_ARGS) { int64 ts = PG_GETARG_INT64(0); - PG_RETURN_TIMESTAMP(integer_b_format_timestamp(true, ts, fcinfo->can_ignore)); + return timestamp_to_datum(fcinfo, true, ts); } -#endif + +Datum int32_b_format_datetime(PG_FUNCTION_ARGS) +{ + int64 ts = PG_GETARG_INT64(0); + return timestamp_to_datum(fcinfo, false, ts); +} + +Datum int32_b_format_timestamp(PG_FUNCTION_ARGS) +{ + int64 ts = PG_GETARG_INT64(0); + return timestamp_to_datum(fcinfo, true, ts); +} + +Datum int64_b_format_datetime(PG_FUNCTION_ARGS) +{ + int64 ts = PG_GETARG_INT64(0); + return timestamp_to_datum(fcinfo, false, ts); +} + +Datum int64_b_format_timestamp(PG_FUNCTION_ARGS) +{ + int64 ts = PG_GETARG_INT64(0); + return timestamp_to_datum(fcinfo, true, ts); +} + +#else Datum int32_b_format_datetime(PG_FUNCTION_ARGS) { int64 ts = PG_GETARG_INT64(0); @@ -977,6 +1043,7 @@ Datum int64_b_format_timestamp(PG_FUNCTION_ARGS) int64 ts = PG_GETARG_INT64(0); PG_RETURN_TIMESTAMP(integer_b_format_timestamp(true, ts, fcinfo->can_ignore)); } +#endif /* timestamp_out() * Convert a timestamp to external form. @@ -1494,6 +1561,29 @@ static void check_timestamp_overflow(Timestamp* time, bool isWithTz) * Convert a string to internal form. */ Datum timestamptz_in(PG_FUNCTION_ARGS) +#ifdef DOLPHIN +{ + TimeErrorType time_error_type = TIME_CORRECT; + Datum result = timestamptz_internal(fcinfo, false, &time_error_type); + if (time_error_type == TIME_INCORRECT && + (fcinfo->ccontext == COERCION_IMPLICIT || fcinfo->ccontext == COERCION_EXPLICIT)) { + PG_RETURN_NULL(); + } + return result; +} + +Datum timestamptz_implicit(PG_FUNCTION_ARGS) +{ + TimeErrorType time_error_type = TIME_CORRECT; + Datum result = timestamptz_internal(fcinfo, true, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return result; +} + +Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErrorType* time_error_type) +#endif { char* str = PG_GETARG_CSTRING(0); @@ -1529,11 +1619,14 @@ Datum timestamptz_in(PG_FUNCTION_ARGS) } else { dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr != 0) { - DateTimeParseError(dterr, str, "timestamp", fcinfo->can_ignore); + DateTimeParseError(dterr, str, "timestamp", is_timestamptz_sconst || fcinfo->can_ignore); /* * if error ignorable, function DateTimeParseError reports warning instead, then return current timestamp. */ - PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif + PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } if (dterr == 0) { if (nf == 1 && ftype[0] == DTK_NUMBER) { @@ -1546,7 +1639,10 @@ Datum timestamptz_in(PG_FUNCTION_ARGS) } } if (dterr != 0) { - DateTimeParseError(dterr, str, "timestamp", fcinfo->can_ignore); + DateTimeParseError(dterr, str, "timestamp", is_timestamptz_sconst || fcinfo->can_ignore); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } switch (dtype) { @@ -5157,6 +5253,10 @@ Datum timestamptz_part(PG_FUNCTION_ARGS) break; case DTK_QUARTER: +#ifdef DOLPHIN + if (timestamp == TIMESTAMP_ZERO) + PG_RETURN_NULL(); +#endif result = (tm->tm_mon - 1) / 3 + 1; break; @@ -5226,6 +5326,10 @@ Datum timestamptz_part(PG_FUNCTION_ARGS) break; case DTK_DOY: +#ifdef DOLPHIN + if (timestamp == TIMESTAMP_ZERO) + PG_RETURN_NULL(); +#endif if (timestamp2tm(timestamp, &tz, tm, &fsec, NULL, NULL) != 0) ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); result = (date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - date2j(tm->tm_year, 1, 1) + 1); @@ -7819,9 +7923,22 @@ Datum timestamp_param1(PG_FUNCTION_ARGS) if (datetime >= B_FORMAT_TIMESTAMP_MIN_VALUE && datetime <= B_FORMAT_TIMESTAMP_MAX_VALUE) PG_RETURN_TIMESTAMP(datetime); - ereport(ERROR, + if (ENABLE_B_CMPT_MODE) { + /* + * the case for invalid timestamp value compatible with b db: + * select timestamp('xxxx') : should return null + * insert xxx values (timestamp('xxxx')) : should throw exeception on strict mode, + * and should return null on non-strict mode + */ + int level = (fcinfo->can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR; + ereport(level, + (errcode(ERRCODE_INVALID_DATETIME_FORMAT), + errmsg("date/time field value out of range"))); + } else { + ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), errmsg("date/time field value out of range"))); + } PG_RETURN_NULL(); } diff --git a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp index 4568e8481..d99f0150b 100644 --- a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp +++ b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp @@ -2353,6 +2353,80 @@ Datum OidInputFunctionCall(Oid functionId, char* str, Oid typioparam, int32 typm return InputFunctionCall(&flinfo, str, typioparam, typmod, can_ignore); } + +#ifdef DOLPHIN +bool is_allow_null_result(CoercionContext ccontext) +{ + return ccontext == COERCION_IMPLICIT || ccontext == COERCION_EXPLICIT; +} + +void CheckNullResultCompatibleNullResult(Oid oid, bool isnull, char* str, CoercionContext ccontext) +{ + if (str == NULL) { + if (!isnull) { + ereport(ERROR, (errmodule(MOD_EXECUTOR), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("input function %u returned non-NULL", oid))); + } + } else { + if (isnull && !is_allow_null_result(ccontext)) { + ereport(ERROR, (errmodule(MOD_EXECUTOR), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("input function %u returned NULL", oid))); + } + } +} + +Datum InputFunctionCallCompatibleNullResult(FmgrInfo* flinfo, char* str, Oid typioparam, int32 typmod, + bool can_ignore, Oid collation, CoercionContext ccontext, bool* result_isnull) +{ + FunctionCallInfoData fcinfo; + Datum result; + bool pushed = false; + + if (str == NULL && flinfo->fn_strict) { + return (Datum)0; /* just return null result */ + } + + SPI_STACK_LOG("push cond", NULL, NULL); + pushed = SPI_push_conditional(); + + InitFunctionCallInfoData(fcinfo, flinfo, 3, InvalidOid, NULL, NULL); + + fcinfo.arg[0] = CStringGetDatum(str); + fcinfo.arg[1] = ObjectIdGetDatum(typioparam); + fcinfo.arg[2] = Int32GetDatum(typmod); + fcinfo.argnull[0] = (str == NULL); + fcinfo.argnull[1] = false; + fcinfo.argnull[2] = false; + fcinfo.can_ignore = can_ignore; + fcinfo.fncollation = collation; + fcinfo.ccontext = ccontext; + result = FunctionCallInvoke(&fcinfo); + + /* Should get null result if and only if str is NULL */ + CheckNullResultCompatibleNullResult(fcinfo.flinfo->fn_oid, fcinfo.isnull, str, ccontext); + + if (result_isnull != NULL) { + *result_isnull = fcinfo.isnull; + } + + SPI_STACK_LOG("pop cond", NULL, NULL); + SPI_pop_conditional(pushed); + + return result; +} + +Datum OidInputFunctionCallCompatibleNullResult(Oid functionId, char* str, Oid typioparam, int32 typmod, + bool can_ignore, CoercionContext ccontext, bool* result_isnull) +{ + FmgrInfo flinfo; + + fmgr_info(functionId, &flinfo); + return InputFunctionCallCompatibleNullResult(&flinfo, str, typioparam, typmod, can_ignore, InvalidOid, + ccontext, result_isnull); +} +#endif + + char* OidOutputFunctionCall(Oid functionId, Datum val) { FmgrInfo flinfo; diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql index 0ff35fb10..07dd7f59b 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql @@ -269,6 +269,8 @@ select hour(''); select hour('abc'); select hour('1234abc'); +set dolphin.b_compatibility_mode = true; + -- date_bool&time_bool select date_bool('0000-00-00'); select time_bool('00:00:00'); @@ -289,6 +291,27 @@ select date_bool('2020-12-31 BC'); select time_bool('838:59:59'); select time_bool('-838:59:59'); +-- 异常显示空测试 +select timestamp('2022-05'); +select timestamp('2022-05-05'); +select timestamp('2022-05-05 1'); +select timestamp('2023-13-15 1'); +select timestamp('2023-13-15 1:65'); +select timestamp('2022-05-05 1:55:61'); +select date('2022-15-05'); +select date('2022-05-35'); +select date('2022-05-05 01'); +select date('2022-05-05 01-20-30'); +select date('2022-05-05 20:59'); +select date('2022-05-05 20:60'); +select cast('2022-15-05' as date); +select cast('2022-05-35' as date); +select cast('2022-05-05 01' as date); +select cast('2022-05-05 20-70' as date); +select cast('2022-05-05 20:70' as date); +select cast('2022-05-05 20:60' as date); +select cast('2022-05-05 20:59' as date); + reset dolphin.sql_mode; drop schema b_time_funcs cascade; diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index d2135e8c2..f5b8fb16b 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -193,6 +193,7 @@ select to_seconds('-838:59:59'); select to_seconds(-8385959); -- è¿›ä½å’Œéžæ³•值 +set dolphin.b_compatibility_mode = true; set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; create table test1(a time); insert into test1 values('23:65:66'); diff --git a/contrib/dolphin/sql/test_dayofweek.sql b/contrib/dolphin/sql/test_dayofweek.sql index e64f0e56d..75b507864 100644 --- a/contrib/dolphin/sql/test_dayofweek.sql +++ b/contrib/dolphin/sql/test_dayofweek.sql @@ -40,5 +40,19 @@ insert ignore into t_datetime values('14:14:14'); select * from t_timestamp; select * from t_datetime; +set dolphin.b_compatibility_mode = true; +select dayofmonth('20:38:40'); +select dayofmonth(time'20:38:40'); +select dayofmonth(203840); +select dayofmonth(time'203840'); +select dayofyear('20:38:40'); +select dayofyear(time'20:38:40'); +select dayofyear(203840); +select dayofyear(time'203840'); +select quarter('20:38:40'); +select quarter(time'20:38:40'); +select quarter(203840); +select quarter(time'203840'); + reset current_schema; drop schema if exists test_dayofweek cascade; -- Gitee From 5ac717d5d41d47ef8fd71cbe5dd0daf70e480cf9 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Fri, 3 Nov 2023 16:59:14 +0800 Subject: [PATCH 040/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddate=E5=85=A5=E5=8F=82=E6=B5=8B=E8=AF=95-?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B=E4=B8=BAtime=E6=97=B6?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E6=89=A7=E8=A1=8C=E5=A4=B1=E8=B4=A5=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddate=E5=85=A5=E5=8F=82=E6=B5=8B=E8=AF=95-?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B=E4=B8=BAtime=E6=97=B6?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E6=89=A7=E8=A1=8C=E5=A4=B1=E8=B4=A5.=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20?= =?UTF-8?q?=E4=B9=8B=E5=89=8Dday=E5=87=BD=E6=95=B0=E6=B2=A1=E6=9C=89time?= =?UTF-8?q?=E7=9A=84=E5=85=A5=E5=8F=82=EF=BC=8C=E8=B5=B0=E4=BA=86time->tim?= =?UTF-8?q?estamp->number->day=E7=9A=84=E6=B5=81=E7=A8=8B=EF=BC=8Ctime?= =?UTF-8?q?=E5=88=B0timestamp=E4=B8=A2=E5=A4=B1=E4=BA=86=E6=97=A5=E6=9C=9F?= =?UTF-8?q?=E4=BF=A1=E6=81=AF=EF=BC=8C=E5=AF=BC=E8=87=B4=E7=BB=93=E6=9E=9C?= =?UTF-8?q?=E4=B8=8D=E5=AF=B9=E3=80=82=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E6=96=B9=E6=A1=88=E3=80=91:=20=E9=87=8D=E6=96=B0=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E4=B8=80=E4=B8=AAtime=E5=8F=82=E6=95=B0=E7=9A=84day?= =?UTF-8?q?=E5=87=BD=E6=95=B0=EF=BC=8C=E8=BF=94=E5=9B=9E=E5=BD=93=E5=89=8D?= =?UTF-8?q?=E7=9A=84=E6=97=B6=E9=97=B4=E5=A4=A9=E6=95=B0=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue?= =?UTF-8?q?=E3=80=91:=20https://e.gitee.com/opengaussorg/dashboard=3Fissue?= =?UTF-8?q?=3DI8BHOO=20=E3=80=90=E5=BC=80=E5=8F=91=E8=87=AA=E9=AA=8C?= =?UTF-8?q?=E6=8A=A5=E5=91=8A=E3=80=91:=20=E8=AF=B7=E9=99=84=E4=B8=8A?= =?UTF-8?q?=E8=87=AA=E9=AA=8C=E7=BB=93=E6=9E=9C(=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E6=88=96=E8=80=85=E6=88=AA=E5=9B=BE)=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E5=8F=AF=E4=BB=A5=E6=B7=BB=E5=8A=A0fastcheck=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E8=AF=B7?= =?UTF-8?q?=E8=A1=A5=E5=85=85fastcheck=E7=94=A8=E4=BE=8B=20->=20=E6=98=AF?= =?UTF-8?q?=20=E6=98=AF=E5=90=A6=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8?= =?UTF-8?q?docs=E4=BB=93=E5=BA=93=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20?= =?UTF-8?q?=20=20=20->=20=E6=97=A0=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5=E5=BF=97=E6=8C=81?= =?UTF-8?q?=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE=E6=94=B9=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC=E5=BC=8F)=20=20->?= =?UTF-8?q?=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9=E7=AD=89=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6=E5=8F=91=E5=9C=BA?= =?UTF-8?q?=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC=E5=AE=B9/=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1=E5=93=8D=20=20=20?= =?UTF-8?q?->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/test_dayofweek.out | 18 ++++++++++++++++++ contrib/dolphin/plugin_utils/adt/timestamp.cpp | 13 +++++++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 2 ++ contrib/dolphin/sql/test_dayofweek.sql | 3 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 3 +++ 5 files changed, 39 insertions(+) diff --git a/contrib/dolphin/expected/test_dayofweek.out b/contrib/dolphin/expected/test_dayofweek.out index df399b9b2..f1b6088a8 100644 --- a/contrib/dolphin/expected/test_dayofweek.out +++ b/contrib/dolphin/expected/test_dayofweek.out @@ -293,6 +293,24 @@ select quarter(time'203840'); --?.* (1 row) +select day('10:10:10'); + day +----- + 10 +(1 row) + +select day('2002-01-01 11:12:12'::time); + day +----- +--?.* +(1 row) + +select dayofmonth('2002-01-01 11:12:12'::time); + dayofmonth +------------ +--?.* +(1 row) + reset current_schema; drop schema if exists test_dayofweek cascade; NOTICE: drop cascades to 2 other objects diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index eba8c59ef..ee2ca3d49 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -276,6 +276,8 @@ PG_FUNCTION_INFO_V1_PUBLIC(b_db_date_numeric); extern "C" DLL_PUBLIC Datum b_db_date_numeric(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(dayofmonth_text); extern "C" DLL_PUBLIC Datum dayofmonth_text(PG_FUNCTION_ARGS); +extern "C" DLL_PUBLIC Datum dayofmonth_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(dayofmonth_time); PG_FUNCTION_INFO_V1_PUBLIC(dayofmonth_numeric); extern "C" DLL_PUBLIC Datum dayofmonth_numeric(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(week_text); @@ -8422,6 +8424,17 @@ Datum dayofmonth_numeric(PG_FUNCTION_ARGS) PG_RETURN_INT32(result_tm->tm_mday); } +Datum dayofmonth_time(PG_FUNCTION_ARGS) +{ + pg_tm tt, *tm = &tt; + fsec_t fsec; + int tz; + if (timestamp2tm(GetCurrentTimestamp(), &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + PG_RETURN_INT32(tm->tm_mday); +} + /* b_db_sumdays() * @param year specified year * @param month specified month diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index c16d246db..1dbbed42f 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -10,6 +10,8 @@ DROP CAST IF EXISTS (TEXT AS time); DROP FUNCTION IF EXISTS pg_catalog.time_cast_implicit(TEXT); DROP FUNCTION IF EXISTS pg_catalog.text_time_explicit(TEXT); +DROP FUNCTION IF EXISTS pg_catalog.day(time without time zone); + DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp without time zone); DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp without time zone); DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp without time zone); diff --git a/contrib/dolphin/sql/test_dayofweek.sql b/contrib/dolphin/sql/test_dayofweek.sql index 75b507864..f82e578e1 100644 --- a/contrib/dolphin/sql/test_dayofweek.sql +++ b/contrib/dolphin/sql/test_dayofweek.sql @@ -53,6 +53,9 @@ select quarter('20:38:40'); select quarter(time'20:38:40'); select quarter(203840); select quarter(time'203840'); +select day('10:10:10'); +select day('2002-01-01 11:12:12'::time); +select dayofmonth('2002-01-01 11:12:12'::time); reset current_schema; drop schema if exists test_dayofweek cascade; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 7827d9169..757bf33fe 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -20,6 +20,9 @@ CREATE OR REPLACE FUNCTION pg_catalog.text_time_explicit(TEXT) RETURNS time with CREATE CAST(TEXT AS time) WITH FUNCTION time_cast_implicit(TEXT) AS ASSIGNMENT; +DROP FUNCTION IF EXISTS pg_catalog.day(time without time zone); +CREATE OR REPLACE FUNCTION pg_catalog.day(time without time zone) RETURNS int4 LANGUAGE C STABLE RETURNS NULL ON NULL INPUT as '$libdir/dolphin', 'dayofmonth_time'; + --CREATE TIME_TIMESTAMP'S COMPARATION FUNCTION DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamp (time, timestamp without time zone) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.time_eq_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_eq_timestamp'; -- Gitee From 7b7283153e27584c5c9857d673206cbb928de28e Mon Sep 17 00:00:00 2001 From: Mijamind Date: Sun, 5 Nov 2023 16:05:25 +0800 Subject: [PATCH 041/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91spq=E5=A4=9A=E6=9C=BA=E7=B4=A2=E5=BC=95?= =?UTF-8?q?=E5=88=9B=E5=BB=BA=E6=B7=BB=E5=8A=A0root=5Fctid=E5=AD=97?= =?UTF-8?q?=E6=AE=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../spq_optimizer_util/translate/CTranslatorUtils.h | 2 +- contrib/spq_plugin/src/guc_spq.cpp | 10 ++++++++++ .../libnaucrates/include/naucrates/dxl/xml/dxltokens.h | 1 + .../spq_optimizer/libnaucrates/src/xml/dxltokens.cpp | 1 + .../translate/CTranslatorRelcacheToDXL.cpp | 9 +++++++++ .../spq_optimizer_util/translate/CTranslatorUtils.cpp | 10 ++++++++-- contrib/spq_plugin/src/spqplugin.cpp | 8 +++++++- 7 files changed, 37 insertions(+), 4 deletions(-) diff --git a/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorUtils.h b/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorUtils.h index 83f82a5f7..f96c92962 100644 --- a/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorUtils.h +++ b/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorUtils.h @@ -15,7 +15,7 @@ #ifndef SPQDXL_CTranslatorUtils_H #define SPQDXL_CTranslatorUtils_H -#define SPQDXL_SYSTEM_COLUMNS 10 +#define SPQDXL_SYSTEM_COLUMNS 11 #include "postgres.h" diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 450e7e4e0..d8b63a429 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1120,6 +1120,16 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_transaction", + "Enable spq execution in transaction.", + NULL, + &u_sess->attr.attr_spq.spq_enable_transaction, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesInt() diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/dxltokens.h b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/dxltokens.h index 888436b60..044651031 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/dxltokens.h +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/dxl/xml/dxltokens.h @@ -362,6 +362,7 @@ enum Edxltoken EdxltokenXmaxColName, EdxltokenCmaxColName, EdxltokenTableOidColName, + EdxltokenRootCtidColName, EdxltokenXCNidAttribute, EdxltokenBidAttribute, EdxltokenUidAttribute, diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/dxltokens.cpp b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/dxltokens.cpp index 3a0078795..cd074819c 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/dxltokens.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/xml/dxltokens.cpp @@ -408,6 +408,7 @@ CDXLTokens::Init(CMemoryPool *mp) {EdxltokenXCNidAttribute, SPQOS_WSZ_LIT("xc_node_id")}, {EdxltokenBidAttribute, SPQOS_WSZ_LIT("tablebucketid")}, {EdxltokenUidAttribute, SPQOS_WSZ_LIT("gs_tuple_uid")}, + {EdxltokenRootCtidColName, SPQOS_WSZ_LIT("_root_ctid")}, {EdxltokenActionColId, SPQOS_WSZ_LIT("ActionCol")}, {EdxltokenCtidColId, SPQOS_WSZ_LIT("CtidCol")}, diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp index 4faadf2ad..0930dafb4 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp @@ -2651,6 +2651,13 @@ CTranslatorRelcacheToDXL::GenerateStatsForSystemCols( distinct_remaining = num_rows; break; } + case RootSelfItemPointerAttributeNumber: // _root_ctid + { + is_col_stats_missing = false; + freq_remaining = CDouble(1.0); + distinct_remaining = num_rows; + break; + } default: break; } @@ -3286,7 +3293,9 @@ CTranslatorRelcacheToDXL::RetrieveRelKeysets(CMemoryPool *mp, OID oid, } ULONG ctid_pos = GetAttributePosition(SelfItemPointerAttributeNumber, attno_mapping); + ULONG root_ctid_pos = GetAttributePosition(RootSelfItemPointerAttributeNumber, attno_mapping); key_set->Append(SPQOS_NEW(mp) ULONG(ctid_pos)); + key_set->Append(SPQOS_NEW(mp) ULONG(root_ctid_pos)); key_sets->Append(key_set); } diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorUtils.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorUtils.cpp index 583416938..8404ba4a1 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorUtils.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorUtils.cpp @@ -807,6 +807,8 @@ CTranslatorUtils::GetSystemColName(AttrNumber attno) case UidAttributeNumber: return CDXLTokens::GetDXLTokenStr(EdxltokenUidAttribute); + case RootSelfItemPointerAttributeNumber: + return CDXLTokens::GetDXLTokenStr(EdxltokenRootCtidColName); default: SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiPlStmt2DXLConversion, @@ -848,7 +850,9 @@ CTranslatorUtils::GetSystemColType(CMemoryPool *mp, AttrNumber attno) case MaxCommandIdAttributeNumber: // cid type return SPQOS_NEW(mp) CMDIdSPQDB(IMDId::EmdidGeneral, SPQDB_CID); - + case RootSelfItemPointerAttributeNumber: + // tid type + return SPQOS_NEW(mp) CMDIdSPQDB(IMDId::EmdidGeneral, SPQDB_TID); case XC_NodeIdAttributeNumber: return SPQOS_NEW(mp) CMDIdSPQDB(IMDId::EmdidGeneral, SPQDB_NID); case BucketIdAttributeNumber: @@ -896,7 +900,9 @@ CTranslatorUtils::GetSystemColLength(AttrNumber attno) return 2; case UidAttributeNumber: return 8; - + case RootSelfItemPointerAttributeNumber: + // tid type + return 6; default: SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiPlStmt2DXLConversion, SPQOS_WSZ_LIT("Invalid attribute number")); diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index c7eb5d6f3..8b7a32a17 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -159,7 +159,7 @@ static bool should_spq_planner(Query *parse) return false; } - if (IsTransactionBlock()) { + if (!u_sess->attr.attr_spq.spq_enable_transaction && IsTransactionBlock()) { elog(DEBUG1, "sql in transaction can`t run on spq node"); return false; } @@ -190,6 +190,12 @@ PlannedStmt* spq_optimize_query(Query* parse, int cursorOptions, ParamListInfo b // if build spq plan fail go back result = spq_planner(parse, boundParams); if (result == nullptr) { + if (unlikely(cursorOptions & CURSOR_OPT_SPQ_FORCE)) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("expected a spq plan but get a normal gauss plan."))); + } t_thrd.spq_ctx.spq_role = ROLE_UTILITY; } else { set_default_stream(); -- Gitee From 1911e9b08b70acefe21872dd9f89a98f43636335 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Tue, 7 Nov 2023 15:40:25 +0800 Subject: [PATCH 042/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dstr=5Fto=5Fdate=E5=85=A5=E5=8F=82=E4=B8=BA?= =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=97=B6=E8=A1=A8=E7=8E=B0=E5=92=8Cmysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=E3=80=82?= =?UTF-8?q?=20=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dstr=5Fto=5Fdate=E5=85=A5=E5=8F=82=E4=B8=BA?= =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=97=B6=E8=A1=A8=E7=8E=B0=E5=92=8Cmysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20?= =?UTF-8?q?=E5=85=B6=E5=AE=9E=E6=A0=B9=E6=9C=AC=E5=8E=9F=E5=9B=A0=E6=98=AF?= =?UTF-8?q?mysql=205.7.44=E7=89=88=E6=9C=AC=E7=9A=84=E5=8F=82=E6=95=B0?= =?UTF-8?q?=E6=A0=A1=E9=AA=8C=E6=96=B9=E5=BC=8F=E6=9C=89=E5=8F=98=E5=8A=A8?= =?UTF-8?q?=EF=BC=8C=E5=AF=BC=E8=87=B4=E9=83=A8=E5=88=86=E9=9D=9E=E6=B3=95?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E7=9A=84=E7=8E=B0=E8=B1=A1=E5=92=8Cmysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E3=80=82=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=8F=82=E8=80=83mysql?= =?UTF-8?q?=205.7.44=E7=89=88=E6=9C=AC=E7=9A=84=E5=8F=82=E6=95=B0=E6=A0=A1?= =?UTF-8?q?=E9=AA=8C=E5=B0=B1=E8=A1=8C=E4=BF=AE=E6=94=B9=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue?= =?UTF-8?q?=E3=80=91:https://e.gitee.com/opengaussorg/dashboard=3Fissue=3D?= =?UTF-8?q?I8D400=20=E3=80=90=E5=BC=80=E5=8F=91=E8=87=AA=E9=AA=8C=E6=8A=A5?= =?UTF-8?q?=E5=91=8A=E3=80=91:=20=E8=AF=B7=E9=99=84=E4=B8=8A=E8=87=AA?= =?UTF-8?q?=E9=AA=8C=E7=BB=93=E6=9E=9C(=E5=86=85=E5=AE=B9=E6=88=96?= =?UTF-8?q?=E8=80=85=E6=88=AA=E5=9B=BE)=20=E6=98=AF=E5=90=A6=E5=8F=AF?= =?UTF-8?q?=E4=BB=A5=E6=B7=BB=E5=8A=A0fastcheck=E6=B5=8B=E8=AF=95=E7=94=A8?= =?UTF-8?q?=E4=BE=8B=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E8=AF=B7=E8=A1=A5?= =?UTF-8?q?=E5=85=85fastcheck=E7=94=A8=E4=BE=8B=20->=20=E6=98=AF=20?= =?UTF-8?q?=E6=98=AF=E5=90=A6=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8docs?= =?UTF-8?q?=E4=BB=93=E5=BA=93=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20=20=20?= =?UTF-8?q?=20->=20=E6=97=A0=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5=E5=BF=97=E6=8C=81?= =?UTF-8?q?=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE=E6=94=B9=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC=E5=BC=8F)=20=20->?= =?UTF-8?q?=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9=E7=AD=89=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6=E5=8F=91=E5=9C=BA?= =?UTF-8?q?=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC=E5=AE=B9/=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1=E5=93=8D=20=20=20?= =?UTF-8?q?->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 585 ++++++++++++++++++ .../dolphin/include/plugin_utils/datetime.h | 3 + .../dolphin/include/plugin_utils/timestamp.h | 6 + .../dolphin/plugin_utils/adt/timestamp.cpp | 23 +- .../b_compatibility_time_funcs3.sql | 86 +++ 5 files changed, 700 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index 564b36a35..0c8a0a21b 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -793,6 +793,591 @@ CONTEXT: referenced column: addtime 00:00:24 (1 row) +-- str_to_date +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +select str_to_date('20231103','%Y%m%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023113','%Y%m%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-11-03','%Y-%m-%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023,11,03','%Y,%m,%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('03,11,2023','%d,%m,%Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-11-03 17','%Y-%m-%d'); +WARNING: Truncated incorrect date value: '2023-11-03 17' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d'); +WARNING: Truncated incorrect date value: '2023-11-03 17:30:00' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-13-03','%Y-%m-%d'); +WARNING: Incorrect date value: '2023-13-03' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('2023-November-3', '%Y-%M-%e'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('November,3,2023', '%M,%e,%Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('November 3 2023', '%M %d %Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('Friday, November 3, 2023', '%W,%M %e, %Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('20231103 173050','%Y%m%d %h%i%s'); +WARNING: Incorrect datetime value: '20231103 173050' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('20231103 1730','%Y%m%d %h%i'); +WARNING: Incorrect datetime value: '20231103 1730' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('20231103 17','%Y%m%d %h'); +WARNING: Incorrect datetime value: '20231103 17' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d %H:%i:%s'); + str_to_date +--------------------- + 2023-11-03 17:30:00 +(1 row) + +select str_to_date('2023,11,03 17:30:10', '%Y,%m,%d %h:%i:%s'); +WARNING: Incorrect datetime value: '2023,11,03 17:30:10' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('03,11,2023 dummy','%d,%m,%Y'); +WARNING: Truncated incorrect date value: '03,11,2023 dummy' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('20231103 1730 dummy','%Y%m%d %h%i'); +WARNING: Truncated incorrect datetime value: '20231103 1730 dummy' +CONTEXT: referenced column: str_to_date +WARNING: Incorrect datetime value: '20231103 1730 dummy' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('20231103', '%Y%m%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('0000-1-1 00:00:00', '%Y-%m-%d'); +WARNING: Truncated incorrect date value: '0000-1-1 00:00:00' +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0000-1-1 00:00:00' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('2023','%Y'); +WARNING: Incorrect date value: '2023' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('0000','%Y'); +WARNING: Incorrect date value: '0000' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('9999','%Y'); +WARNING: Incorrect date value: '9999' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('11','%h'); + str_to_date +------------- + 11:00:00 +(1 row) + +select str_to_date('13','%h'); +WARNING: Incorrect time value: '13' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('1730','%h%i'); +WARNING: Incorrect time value: '1730' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('173005','%h%i%s'); +WARNING: Incorrect time value: '173005' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('November,11,2023', '%M %e %Y'); +WARNING: Incorrect date value: 'November,11,2023' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('Friday, November 3, 2023', '%W %M %e %Y'); +WARNING: Incorrect date value: 'Friday, November 3, 2023' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('10000-11-03', '%Y-%m-%d'); +WARNING: Incorrect date value: '10000-11-03' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('202311', '%Y%m%d'); +WARNING: Incorrect date value: '202311' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('0000-00-00', '%Y-%m-%d'); +WARNING: Incorrect date value: '0000-00-00' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('0000-01-00', '%Y-%m-%d'); +WARNING: Incorrect date value: '0000-01-00' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('0000-00-01', '%Y-%m-%d'); +WARNING: Incorrect date value: '0000-00-01' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('2023-02-31', '%Y-%m-%d'); +WARNING: Incorrect date value: '2023-02-31' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('31/11/22', '%d/%m/%Y'); +WARNING: Incorrect date value: '31/11/22' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('2023-02-31','%Y-%m-%d'); +WARNING: Incorrect date value: '2023-02-31' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('2004.09.12 10:61:59','%Y.%m.%d %T'); +WARNING: Incorrect datetime value: '2004.09.12 10:61:59' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); +WARNING: Incorrect date value: '0000-00-00' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes'; +select str_to_date('20231103','%Y%m%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023113','%Y%m%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-11-03','%Y-%m-%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023,11,03','%Y,%m,%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('03,11,2023','%d,%m,%Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-11-03 17','%Y-%m-%d'); +WARNING: Truncated incorrect date value: '2023-11-03 17' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d'); +WARNING: Truncated incorrect date value: '2023-11-03 17:30:00' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('2023-13-03','%Y-%m-%d'); +WARNING: Incorrect date value: '2023-13-03' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('2023-November-3', '%Y-%M-%e'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('November,3,2023', '%M,%e,%Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('November 3 2023', '%M %d %Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('Friday, November 3, 2023', '%W,%M %e, %Y'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('20231103 173050','%Y%m%d %h%i%s'); +WARNING: Incorrect datetime value: '20231103 173050' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('20231103 1730','%Y%m%d %h%i'); +WARNING: Incorrect datetime value: '20231103 1730' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('20231103 17','%Y%m%d %h'); +WARNING: Incorrect datetime value: '20231103 17' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d %H:%i:%s'); + str_to_date +--------------------- + 2023-11-03 17:30:00 +(1 row) + +select str_to_date('2023,11,03 17:30:10', '%Y,%m,%d %h:%i:%s'); +WARNING: Incorrect datetime value: '2023,11,03 17:30:10' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('03,11,2023 dummy','%d,%m,%Y'); +WARNING: Truncated incorrect date value: '03,11,2023 dummy' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('20231103 1730 dummy','%Y%m%d %h%i'); +WARNING: Truncated incorrect datetime value: '20231103 1730 dummy' +CONTEXT: referenced column: str_to_date +WARNING: Incorrect datetime value: '20231103 1730 dummy' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('20231103', '%Y%m%d'); + str_to_date +------------- + 2023-11-03 +(1 row) + +select str_to_date('0000-1-1 00:00:00', '%Y-%m-%d'); +WARNING: Truncated incorrect date value: '0000-1-1 00:00:00' +CONTEXT: referenced column: str_to_date + str_to_date +------------- + 0000-01-01 +(1 row) + +select str_to_date('2023','%Y'); + str_to_date +------------- + 2023-00-00 +(1 row) + +select str_to_date('0000','%Y'); + str_to_date +------------- + 0000-00-00 +(1 row) + +select str_to_date('9999','%Y'); + str_to_date +------------- + 9999-00-00 +(1 row) + +select str_to_date('11','%h'); + str_to_date +------------- + 11:00:00 +(1 row) + +select str_to_date('13','%h'); +WARNING: Incorrect time value: '13' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('1730','%h%i'); +WARNING: Incorrect time value: '1730' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('173005','%h%i%s'); +WARNING: Incorrect time value: '173005' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('November,11,2023', '%M %e %Y'); +WARNING: Incorrect date value: 'November,11,2023' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('Friday, November 3, 2023', '%W %M %e %Y'); +WARNING: Incorrect date value: 'Friday, November 3, 2023' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('10000-11-03', '%Y-%m-%d'); +WARNING: Incorrect date value: '10000-11-03' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +select str_to_date('202311', '%Y%m%d'); + str_to_date +------------- + 2023-11-00 +(1 row) + +select str_to_date('0000-00-00', '%Y-%m-%d'); + str_to_date +------------- + 0000-00-00 +(1 row) + +select str_to_date('0000-01-00', '%Y-%m-%d'); + str_to_date +------------- + 0000-01-00 +(1 row) + +select str_to_date('0000-00-01', '%Y-%m-%d'); + str_to_date +------------- + 0000-00-01 +(1 row) + +select str_to_date('2023-02-31', '%Y-%m-%d'); +WARNING: Incorrect date value: '2023-02-31' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('31/11/22', '%d/%m/%Y'); +WARNING: Incorrect date value: '31/11/22' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('2023-02-31','%Y-%m-%d'); +WARNING: Incorrect date value: '2023-02-31' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('2004.09.12 10:61:59','%Y.%m.%d %T'); +WARNING: Incorrect datetime value: '2004.09.12 10:61:59' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date +------------- + +(1 row) + +SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); + str_to_date +------------- + 0000-00-00 +(1 row) + drop schema b_time_funcs3 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table func_test3 diff --git a/contrib/dolphin/include/plugin_utils/datetime.h b/contrib/dolphin/include/plugin_utils/datetime.h index ca83944e8..fe123bd03 100644 --- a/contrib/dolphin/include/plugin_utils/datetime.h +++ b/contrib/dolphin/include/plugin_utils/datetime.h @@ -124,6 +124,9 @@ extern bool cstring_to_datetime(const char* str, time_flags flags, int &tm_type extern void DateTimeParseErrorWithFlag(int dterr, const char* str, const char* datatype, bool can_ignore = false, bool is_error = false); extern void DateTimeParseErrorInternal(int dterr, const char* str, const char* datatype, int level); + +bool CheckDateRange(const pg_tm *tm, bool not_zero_date, time_flags flags); + #endif extern bool datetime_add_nanoseconds_with_round(pg_tm *tm, fsec_t &fsec, int nano); extern bool cstring_to_tm(const char *expr, pg_tm *tm, fsec_t &fsec, int* tzp = NULL, int* invalid_tz = NULL); diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 4b763f0ba..3e3e43270 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -148,6 +148,12 @@ typedef enum { Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErrorType* time_error_type); extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); + +static inline bool non_zero_date(const pg_tm *ltime) +{ + return ltime->tm_year || ltime->tm_mon || ltime->tm_mday; +} + #endif extern Datum datetime_text(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index ee2ca3d49..d2876e3c9 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -10058,6 +10058,22 @@ static inline Datum make_text_result(int return_type ,struct pg_tm *tm, fsec_t f return CStringGetTextDatum(buf); } +time_flags sql_mode_to_time_flags() +{ + if (SQL_MODE_NO_ZERO_DATE()) { + return TIME_NO_ZERO_IN_DATE; + } else { + return TIME_FUZZY_DATE; + } +} + +static bool date_should_be_null(int target_type, const pg_tm* time, time_flags fuzzy_date) +{ + return (fuzzy_date & TIME_NO_ZERO_IN_DATE) != 0 && + (target_type != DTK_TIME) && + (time->tm_year == 0 || time->tm_mon == 0 || time->tm_mday == 0); +} + /** * compatibility of str_to_date */ @@ -10333,7 +10349,8 @@ Datum str_to_date(PG_FUNCTION_ARGS) // a simple quick range check if (tm->tm_mon > MONTHS_PER_YEAR || tm->tm_mday > DAYNUM_BIGMON || - tm->tm_hour >= HOURS_PER_DAY || tm->tm_min >= MINS_PER_HOUR || tm->tm_sec >= SECS_PER_MINUTE) + tm->tm_hour >= HOURS_PER_DAY || tm->tm_min >= MINS_PER_HOUR || tm->tm_sec >= SECS_PER_MINUTE || + !CheckDateRange(tm, non_zero_date(tm), sql_mode_to_time_flags())) goto err; if (return_type == DTK_TIME && tm->tm_mday) { @@ -10341,9 +10358,9 @@ Datum str_to_date(PG_FUNCTION_ARGS) tm->tm_mday = 0; } - // range check - if (!final_range_check(return_type, tm, &fsec)) + if (date_should_be_null(return_type, tm, sql_mode_to_time_flags())) { goto err; + } // make the text result result = make_text_result(return_type, tm, fsec, buf); diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index f5b8fb16b..81acdef17 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -289,5 +289,91 @@ select cast('-1:-1:-1' as time); select cast('23:55:56.1234' as time); select addtime('12aaa43', '12aa43'); +-- str_to_date +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +select str_to_date('20231103','%Y%m%d'); +select str_to_date('2023113','%Y%m%d'); +select str_to_date('2023-11-03','%Y-%m-%d'); +select str_to_date('2023,11,03','%Y,%m,%d'); +select str_to_date('03,11,2023','%d,%m,%Y'); +select str_to_date('2023-11-03 17','%Y-%m-%d'); +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d'); +select str_to_date('2023-13-03','%Y-%m-%d'); +select str_to_date('2023-November-3', '%Y-%M-%e'); +select str_to_date('November,3,2023', '%M,%e,%Y'); +select str_to_date('November 3 2023', '%M %d %Y'); +select str_to_date('Friday, November 3, 2023', '%W,%M %e, %Y'); +select str_to_date('20231103 173050','%Y%m%d %h%i%s'); +select str_to_date('20231103 1730','%Y%m%d %h%i'); +select str_to_date('20231103 17','%Y%m%d %h'); +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d %H:%i:%s'); +select str_to_date('2023,11,03 17:30:10', '%Y,%m,%d %h:%i:%s'); +select str_to_date('03,11,2023 dummy','%d,%m,%Y'); +select str_to_date('20231103 1730 dummy','%Y%m%d %h%i'); +select str_to_date('20231103', '%Y%m%d'); +select str_to_date('0000-1-1 00:00:00', '%Y-%m-%d'); +select str_to_date('2023','%Y'); +select str_to_date('0000','%Y'); +select str_to_date('9999','%Y'); +select str_to_date('11','%h'); +select str_to_date('13','%h'); +select str_to_date('1730','%h%i'); +select str_to_date('173005','%h%i%s'); +select str_to_date('November,11,2023', '%M %e %Y'); +select str_to_date('Friday, November 3, 2023', '%W %M %e %Y'); +select str_to_date('10000-11-03', '%Y-%m-%d'); +select str_to_date('202311', '%Y%m%d'); +select str_to_date('0000-00-00', '%Y-%m-%d'); +select str_to_date('0000-01-00', '%Y-%m-%d'); +select str_to_date('0000-00-01', '%Y-%m-%d'); +select str_to_date('2023-02-31', '%Y-%m-%d'); +SELECT STR_TO_DATE('31/11/22', '%d/%m/%Y'); +SELECT STR_TO_DATE('2023-02-31','%Y-%m-%d'); +SELECT STR_TO_DATE('2004.09.12 10:61:59','%Y.%m.%d %T'); +SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); + + +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes'; +select str_to_date('20231103','%Y%m%d'); +select str_to_date('2023113','%Y%m%d'); +select str_to_date('2023-11-03','%Y-%m-%d'); +select str_to_date('2023,11,03','%Y,%m,%d'); +select str_to_date('03,11,2023','%d,%m,%Y'); +select str_to_date('2023-11-03 17','%Y-%m-%d'); +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d'); +select str_to_date('2023-13-03','%Y-%m-%d'); +select str_to_date('2023-November-3', '%Y-%M-%e'); +select str_to_date('November,3,2023', '%M,%e,%Y'); +select str_to_date('November 3 2023', '%M %d %Y'); +select str_to_date('Friday, November 3, 2023', '%W,%M %e, %Y'); +select str_to_date('20231103 173050','%Y%m%d %h%i%s'); +select str_to_date('20231103 1730','%Y%m%d %h%i'); +select str_to_date('20231103 17','%Y%m%d %h'); +select str_to_date('2023-11-03 17:30:00','%Y-%m-%d %H:%i:%s'); +select str_to_date('2023,11,03 17:30:10', '%Y,%m,%d %h:%i:%s'); +select str_to_date('03,11,2023 dummy','%d,%m,%Y'); +select str_to_date('20231103 1730 dummy','%Y%m%d %h%i'); +select str_to_date('20231103', '%Y%m%d'); +select str_to_date('0000-1-1 00:00:00', '%Y-%m-%d'); +select str_to_date('2023','%Y'); +select str_to_date('0000','%Y'); +select str_to_date('9999','%Y'); +select str_to_date('11','%h'); +select str_to_date('13','%h'); +select str_to_date('1730','%h%i'); +select str_to_date('173005','%h%i%s'); +select str_to_date('November,11,2023', '%M %e %Y'); +select str_to_date('Friday, November 3, 2023', '%W %M %e %Y'); +select str_to_date('10000-11-03', '%Y-%m-%d'); +select str_to_date('202311', '%Y%m%d'); +select str_to_date('0000-00-00', '%Y-%m-%d'); +select str_to_date('0000-01-00', '%Y-%m-%d'); +select str_to_date('0000-00-01', '%Y-%m-%d'); +select str_to_date('2023-02-31', '%Y-%m-%d'); +SELECT STR_TO_DATE('31/11/22', '%d/%m/%Y'); +SELECT STR_TO_DATE('2023-02-31','%Y-%m-%d'); +SELECT STR_TO_DATE('2004.09.12 10:61:59','%Y.%m.%d %T'); +SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); + drop schema b_time_funcs3 cascade; reset current_schema; -- Gitee From 3d254d5aedd6b9dcf1e6944f04445ef952cf2149 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Tue, 7 Nov 2023 18:01:48 +0800 Subject: [PATCH 043/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dselect=20timestamp'xxxx'=E5=9C=BA=E6=99=AF?= =?UTF-8?q?=EF=BC=8C=E5=AF=B9=E4=BA=8E=E9=9D=9E=E6=B3=95=E5=80=BC=EF=BC=8C?= =?UTF-8?q?OG=E6=98=BE=E7=A4=BA=E4=B8=BA0=EF=BC=8CMysql=E4=B8=BA=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=EF=BC=8C=E4=B8=A4=E8=80=85=E8=A1=8C=E4=B8=BA=E4=B8=8D?= =?UTF-8?q?=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E5=9C=A8gram.?= =?UTF-8?q?y=E6=96=87=E4=BB=B6=E4=B8=ADselect=20timestamp''=E7=9A=84?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E4=B8=8B=E6=98=BE=E5=BC=8F=E8=B0=83=E7=94=A8?= =?UTF-8?q?timestamp=5Fcast=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86?= =?UTF-8?q?=E6=9E=90=E3=80=91:=20=E5=85=B6=E5=AE=9E=E4=B8=BB=E8=A6=81?= =?UTF-8?q?=E6=98=AF=E6=AD=A4mr=E6=BC=8F=E6=94=B9https://gitee.com/opengau?= =?UTF-8?q?ss/Plugin/pulls/1055=20=E6=BC=8F=E6=94=B9gram.y=E6=96=87?= =?UTF-8?q?=E4=BB=B6=E4=BA=86=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9?= =?UTF-8?q?=E6=A1=88=E3=80=91:=20=E4=BF=AE=E6=94=B9gram.y=E5=9C=A8timestam?= =?UTF-8?q?p''=E7=9A=84=E5=9C=BA=E6=99=AF=E4=B8=8B=E6=98=BE=E5=BC=8F?= =?UTF-8?q?=E8=B0=83=E7=94=A8timestamp=5Fcast=EF=BC=8C=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E7=9A=84=E4=B8=BA=E9=85=8D=E5=A5=97=E4=BF=AE=E6=94=B9=E3=80=82?= =?UTF-8?q?=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue?= =?UTF-8?q?=E3=80=91:=20https://e.gitee.com/opengaussorg/dashboard=3Fissue?= =?UTF-8?q?=3DI8ED23=20=E3=80=90=E5=BC=80=E5=8F=91=E8=87=AA=E9=AA=8C?= =?UTF-8?q?=E6=8A=A5=E5=91=8A=E3=80=91:=20=E8=AF=B7=E9=99=84=E4=B8=8A?= =?UTF-8?q?=E8=87=AA=E9=AA=8C=E7=BB=93=E6=9E=9C(=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E6=88=96=E8=80=85=E6=88=AA=E5=9B=BE)=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E5=8F=AF=E4=BB=A5=E6=B7=BB=E5=8A=A0fastcheck=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E8=AF=B7?= =?UTF-8?q?=E8=A1=A5=E5=85=85fastcheck=E7=94=A8=E4=BE=8B=20->=20=E6=98=AF?= =?UTF-8?q?=20=E6=98=AF=E5=90=A6=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=EF=BC=8C=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8?= =?UTF-8?q?docs=E4=BB=93=E5=BA=93=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20?= =?UTF-8?q?=20=20=20->=20=E6=97=A0=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5=E5=BF=97=E6=8C=81?= =?UTF-8?q?=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE=E6=94=B9=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC=E5=BC=8F)=20=20->?= =?UTF-8?q?=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9=E7=AD=89=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6=E5=8F=91=E5=9C=BA?= =?UTF-8?q?=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC=E5=AE=B9/=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1=E5=93=8D=20=20=20?= =?UTF-8?q?->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../builtin_funcs/b_compatibility_time_funcs.out | 10 ++++++++++ contrib/dolphin/plugin_parser/gram.y | 15 ++++++++++++--- contrib/dolphin/plugin_parser/parse_target.cpp | 3 ++- contrib/dolphin/plugin_utils/adt/timestamp.cpp | 2 +- .../builtin_funcs/b_compatibility_time_funcs.sql | 4 ++++ 5 files changed, 29 insertions(+), 5 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index 4cc8c6b45..7a26aa7d2 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -930,6 +930,16 @@ select cast('2022-05-05 20:59' as date); 2022-05-05 (1 row) +select timestamp'2022-05'; +ERROR: invalid input syntax for type timestamp: "2022-05" +CONTEXT: referenced column: timestamp +select timestamp'2022-15-05 16:20:10'; +ERROR: date/time field value out of range: "2022-15-05 16:20:10" +HINT: Perhaps you need a different "datestyle" setting. +CONTEXT: referenced column: timestamp +select timestamp'2022-05-05 16:60:10'; +ERROR: date/time field value out of range: "2022-05-05 16:60:10" +CONTEXT: referenced column: timestamp reset dolphin.sql_mode; drop schema b_time_funcs cascade; NOTICE: drop cascades to table func_test diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 482ec26c5..22f568daa 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -36946,9 +36946,18 @@ AexprConst_without_Sconst: Iconst } | TIMESTAMP SCONST { - TypeName * tmp = SystemTypeName("timestamp"); - tmp->location = @1; - $$ = makeStringConstCast($2, @2, tmp); + FuncCall *n = makeNode(FuncCall); + n->funcname = SystemFuncName("timestamp_cast"); + n->colname = pstrdup("timestamp"); + n->args = list_make4(makeStringConst($2, @2), makeIntConst(-1, -1), makeIntConst(-1, -1), makeBoolAConst(FALSE, -1)); + n->agg_order = NIL; + n->agg_star = FALSE; + n->agg_distinct = FALSE; + n->func_variadic = FALSE; + n->over = NULL; + n->location = @1; + n->call_func = false; + $$ = (Node *)n; } | TIMESTAMP WITH_TIME ZONE SCONST { diff --git a/contrib/dolphin/plugin_parser/parse_target.cpp b/contrib/dolphin/plugin_parser/parse_target.cpp index 4d097974b..a39d17600 100644 --- a/contrib/dolphin/plugin_parser/parse_target.cpp +++ b/contrib/dolphin/plugin_parser/parse_target.cpp @@ -1649,7 +1649,8 @@ static int FigureColnameInternal(Node* node, char** name) #ifdef DOLPHIN /* to make the last displayed column name as the type name instead of the function name. */ if (strcmp(strVal(llast(((FuncCall*)node)->funcname)), "time_cast") == 0 || - strcmp(strVal(llast(((FuncCall*)node)->funcname)), "date_cast") == 0) { + strcmp(strVal(llast(((FuncCall*)node)->funcname)), "date_cast") == 0 || + strcmp(strVal(llast(((FuncCall*)node)->funcname)), "timestamp_cast") == 0) { return 1; } #endif diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index d2876e3c9..58f376bff 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -562,7 +562,7 @@ Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) /* * this case is used for timestamp format is specified. */ - if (4 == PG_NARGS()) { + if (4 == PG_NARGS() && !is_date_sconst) { timestamp_fmt = PG_GETARG_CSTRING(3); if (timestamp_fmt == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), errmsg("specified timestamp format is null"))); diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql index 07dd7f59b..157d71ff7 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql @@ -312,6 +312,10 @@ select cast('2022-05-05 20:70' as date); select cast('2022-05-05 20:60' as date); select cast('2022-05-05 20:59' as date); +select timestamp'2022-05'; +select timestamp'2022-15-05 16:20:10'; +select timestamp'2022-05-05 16:60:10'; + reset dolphin.sql_mode; drop schema b_time_funcs cascade; -- Gitee From 2e63f2ec1010d54fa0c838663ba3012fad6cd5ab Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 8 Nov 2023 19:05:21 +0800 Subject: [PATCH 044/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dopenguass=E7=9A=84select=20time('xxxx')?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E4=B8=8B=E5=9C=A8=E9=9D=9E=E6=B3=95=E5=85=A5?= =?UTF-8?q?=E5=8F=82=E7=9A=84=E6=97=B6=E5=80=99=E8=A1=A8=E7=8E=B0=E5=92=8C?= =?UTF-8?q?mysql=E4=B8=8D=E4=B8=80=E8=87=B4=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=B8=BB=E8=A6=81=E6=98=AF?= =?UTF-8?q?=E5=9C=A8time=5Fin=E4=B8=AD=E5=88=A4=E6=96=AD=E5=A6=82=E6=9E=9C?= =?UTF-8?q?=E6=98=AFselect=20time('')=E8=80=8C=E4=B8=94=E5=80=BC=E4=B9=9F?= =?UTF-8?q?=E6=98=AF=E9=9D=9E=E6=B3=95=E7=9A=84=EF=BC=8C=E9=82=A3=E4=B9=88?= =?UTF-8?q?=E8=BF=94=E5=9B=9E=E4=B8=80=E4=B8=8B=E9=9D=9E=E6=B3=95=E5=80=BC?= =?UTF-8?q?=EF=BC=88B=5FFORMAT=5FTIME=5FINVALID=5FVALUE=5FTAG=EF=BC=89?= =?UTF-8?q?=EF=BC=8C=E5=86=8D=E5=9C=A8check=5Fb=5Fformat=5Ftime=5Frange=5F?= =?UTF-8?q?with=5Fereport=E4=B8=AD=E5=88=A4=E6=96=AD=E5=A6=82=E6=9E=9C?= =?UTF-8?q?=E8=BF=94=E5=9B=9E=E5=80=BC=E4=B8=BA=E9=9D=9E=E6=B3=95=E7=9A=84?= =?UTF-8?q?=E8=AF=9D=EF=BC=8C=E5=B0=B1=E8=BF=94=E5=9B=9ENULL=20=E3=80=90?= =?UTF-8?q?=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20=E4=B9=8B?= =?UTF-8?q?=E5=89=8Dselect=20time('')=E5=9C=BA=E6=99=AF=E8=B5=B0=E7=9A=84?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E4=B8=BAtime=5Fmysql->convert=5Fto=5Fdatetim?= =?UTF-8?q?e=5Ftime->....->time=5Fin,time=5Fin=E4=B8=8D=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E8=BF=94=E5=9B=9ENULL=E5=AF=BC=E8=87=B4=E7=9A=84=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20?= =?UTF-8?q?=E4=B8=BB=E8=A6=81=E6=98=AF=E5=9C=A8time=5Fin=E4=B8=AD=E5=88=A4?= =?UTF-8?q?=E6=96=AD=E5=A6=82=E6=9E=9C=E6=98=AFselect=20time('')=E8=80=8C?= =?UTF-8?q?=E4=B8=94=E5=80=BC=E4=B9=9F=E6=98=AF=E9=9D=9E=E6=B3=95=E7=9A=84?= =?UTF-8?q?=EF=BC=8C=E9=82=A3=E4=B9=88=E8=BF=94=E5=9B=9E=E4=B8=80=E4=B8=8B?= =?UTF-8?q?=E9=9D=9E=E6=B3=95=E5=80=BC=EF=BC=88B=5FFORMAT=5FTIME=5FINVALID?= =?UTF-8?q?=5FVALUE=5FTAG=EF=BC=89=EF=BC=8C=E5=86=8D=E5=9C=A8check=5Fb=5Ff?= =?UTF-8?q?ormat=5Ftime=5Frange=5Fwith=5Fereport=E4=B8=AD=E5=88=A4?= =?UTF-8?q?=E6=96=AD=E5=A6=82=E6=9E=9C=E8=BF=94=E5=9B=9E=E5=80=BC=E4=B8=BA?= =?UTF-8?q?=E9=9D=9E=E6=B3=95=E7=9A=84=E8=AF=9D=EF=BC=8C=E5=B0=B1=E8=BF=94?= =?UTF-8?q?=E5=9B=9ENULL=E3=80=82=E5=9B=A0=E4=B8=BAinsert=E7=9A=84?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E4=B8=8Btime=5Fin=E5=B7=B2=E7=BB=8F=E8=BF=94?= =?UTF-8?q?=E5=9B=9E0=E6=88=96=E8=80=85=E6=8A=9B=E5=BC=82=E5=B8=B8?= =?UTF-8?q?=E4=BA=86=EF=BC=8C=E6=89=80=E4=BB=A5=E4=B8=8D=E5=BD=B1=E5=93=8D?= =?UTF-8?q?insert=E7=9A=84=E5=9C=BA=E6=99=AF=E3=80=82=20=E3=80=90=E5=85=B3?= =?UTF-8?q?=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e?= =?UTF-8?q?.gitee.com/opengaussorg/dashboard=3Fissue=3DI8ETZ8=20=E3=80=90?= =?UTF-8?q?=E5=BC=80=E5=8F=91=E8=87=AA=E9=AA=8C=E6=8A=A5=E5=91=8A=E3=80=91?= =?UTF-8?q?:=20=E8=AF=B7=E9=99=84=E4=B8=8A=E8=87=AA=E9=AA=8C=E7=BB=93?= =?UTF-8?q?=E6=9E=9C(=E5=86=85=E5=AE=B9=E6=88=96=E8=80=85=E6=88=AA?= =?UTF-8?q?=E5=9B=BE)=20=E6=98=AF=E5=90=A6=E5=8F=AF=E4=BB=A5=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0fastcheck=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B=EF=BC=8C?= =?UTF-8?q?=E5=A6=82=E6=98=AF=EF=BC=8C=E8=AF=B7=E8=A1=A5=E5=85=85fastcheck?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=20->=20=E6=98=AF=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99=E4=BF=AE=E6=94=B9=EF=BC=8C?= =?UTF-8?q?=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8docs=E4=BB=93=E5=BA=93?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20=20=20=20->=20=E4=B9=8B?= =?UTF-8?q?=E5=89=8D=E5=9C=A8https://gitee.com/opengauss/docs/pulls/5885/f?= =?UTF-8?q?iles=E5=B7=B2=E7=BB=8F=E4=BF=AE=E6=94=B9=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E8=80=83=E8=99=91=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(?= =?UTF-8?q?=E7=B3=BB=E7=BB=9F=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5?= =?UTF-8?q?=E5=BF=97=E6=8C=81=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=E6=89=A7=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F)=20=20->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF?= =?UTF-8?q?=E5=90=A6=E8=80=83=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9?= =?UTF-8?q?=E7=AD=89=E6=89=A9=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20?= =?UTF-8?q?=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=BC=82=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6?= =?UTF-8?q?=E5=8F=91=E5=9C=BA=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC?= =?UTF-8?q?=E5=AE=B9/=E6=80=A7=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20?= =?UTF-8?q?=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9?= =?UTF-8?q?=E5=85=B6=E4=BB=96=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1?= =?UTF-8?q?=E5=93=8D=20=20=20->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90?= =?UTF-8?q?=E5=85=B6=E4=BB=96=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 131 ++++++++++++++++-- contrib/dolphin/include/plugin_utils/date.h | 5 +- .../dolphin/include/plugin_utils/timestamp.h | 3 +- contrib/dolphin/plugin_utils/adt/date.cpp | 69 +++++---- .../dolphin/plugin_utils/adt/timestamp.cpp | 12 +- .../b_compatibility_time_funcs3.sql | 20 +++ 6 files changed, 195 insertions(+), 45 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index 0c8a0a21b..1a9117097 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -546,8 +546,6 @@ CONTEXT: referenced column: time select cast('23-65' as time); WARNING: time zone displacement out of range: "23-65" -CONTEXT: referenced column: time -WARNING: time zone displacement out of range: "23-65" CONTEXT: referenced column: time time ---------- @@ -576,8 +574,6 @@ select cast('0:0:0' as time); select cast('-1:-1:-1' as time); WARNING: time zone displacement out of range: "-1:-1:-1" -CONTEXT: referenced column: time -WARNING: time zone displacement out of range: "-1:-1:-1" CONTEXT: referenced column: time time ----------- @@ -600,6 +596,70 @@ CONTEXT: referenced column: addtime 00:00:24 (1 row) +select time('23:65:66'); +WARNING: date/time field value out of range: "23:65:66" +CONTEXT: referenced column: time +WARNING: time field value out of range +CONTEXT: referenced column: time + time +------ + +(1 row) + +select time('23:65'); +WARNING: date/time field value out of range: "23:65" +CONTEXT: referenced column: time +WARNING: time field value out of range +CONTEXT: referenced column: time + time +------ + +(1 row) + +select time('23-65'); +WARNING: time zone displacement out of range: "23-65" +CONTEXT: referenced column: time + time +---------- + 00:00:23 +(1 row) + +select time('23:59:59.8888'); + time +--------------- + 23:59:59.8888 +(1 row) + +select time('23:65:66.8888'); +WARNING: date/time field value out of range: "23:65:66.8888" +CONTEXT: referenced column: time +WARNING: time field value out of range +CONTEXT: referenced column: time + time +------ + +(1 row) + +select time('0:0:0'); + time +---------- + 00:00:00 +(1 row) + +select time('-1:-1:-1'); +WARNING: time zone displacement out of range: "-1:-1:-1" +CONTEXT: referenced column: time + time +----------- + -00:00:01 +(1 row) + +select time('23:55:56.1234'); + time +--------------- + 23:55:56.1234 +(1 row) + set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test1 values('23:65:66'); ERROR: date/time field value out of range: "23:65:66" @@ -739,8 +799,6 @@ CONTEXT: referenced column: time select cast('23-65' as time); WARNING: time zone displacement out of range: "23-65" -CONTEXT: referenced column: time -WARNING: time zone displacement out of range: "23-65" CONTEXT: referenced column: time time ---------- @@ -769,8 +827,6 @@ select cast('0:0:0' as time); select cast('-1:-1:-1' as time); WARNING: time zone displacement out of range: "-1:-1:-1" -CONTEXT: referenced column: time -WARNING: time zone displacement out of range: "-1:-1:-1" CONTEXT: referenced column: time time ----------- @@ -1378,6 +1434,65 @@ SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); 0000-00-00 (1 row) +set dolphin.b_compatibility_mode = false; +select time('23:65:66'); +WARNING: date/time field value out of range: "23:65:66" +CONTEXT: referenced column: time + time +---------- + 24:06:06 +(1 row) + +select time('23:65'); +WARNING: date/time field value out of range: "23:65" +CONTEXT: referenced column: time + time +---------- + 24:05:00 +(1 row) + +select time('23-65'); +WARNING: time zone displacement out of range: "23-65" +CONTEXT: referenced column: time + time +---------- + 00:00:23 +(1 row) + +select time('23:59:59.8888'); + time +--------------- + 23:59:59.8888 +(1 row) + +select time('23:65:66.8888'); +WARNING: date/time field value out of range: "23:65:66.8888" +CONTEXT: referenced column: time + time +--------------- + 24:06:06.8888 +(1 row) + +select time('0:0:0'); + time +---------- + 00:00:00 +(1 row) + +select time('-1:-1:-1'); +WARNING: time zone displacement out of range: "-1:-1:-1" +CONTEXT: referenced column: time + time +----------- + -00:00:01 +(1 row) + +select time('23:55:56.1234'); + time +--------------- + 23:55:56.1234 +(1 row) + drop schema b_time_funcs3 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table func_test3 diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index 85f09601e..75b200bdc 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -28,6 +28,7 @@ #define B_FORMAT_DATE_NUMBER_MIN_LEN 5 #define B_FORMAT_TIME_BOUND INT64CONST(839) #define B_FORMAT_TIME_MAX_VALUE INT64CONST(B_FORMAT_TIME_BOUND * INT64CONST(3600000000) - 1000000) +#define B_FORMAT_TIME_INVALID_VALUE_TAG (-B_FORMAT_TIME_MAX_VALUE-1) #define B_FORMAT_TIME_NUMBER_MAX_LEN 7 #define B_FORMAT_DATE_INT_MIN 101 #define B_FORMAT_MAX_DATE 99991231 @@ -78,7 +79,7 @@ extern int tm2time(struct pg_tm* tm, fsec_t fsec, TimeADT* result); extern int time2tm(TimeADT time, struct pg_tm* tm, fsec_t* fsec); extern int timetz2tm(TimeTzADT* time, struct pg_tm* tm, fsec_t* fsec, int* tzp); extern bool cstring_to_time(const char *str, pg_tm *tm, fsec_t &fsec, int &timeSign, int &tm_type, bool &warnings, bool *null_func_result); -extern void check_b_format_time_range_with_ereport(TimeADT &time); +extern void check_b_format_time_range_with_ereport(TimeADT &time, bool can_ignore = false, bool* result_isnull = NULL); extern void check_b_format_date_range_with_ereport(DateADT &date); extern Oid convert_to_datetime_date(Datum value, Oid valuetypid, Timestamp *datetime, DateADT *date); extern void adjust_time_range(pg_tm *tm, fsec_t &fsec, bool &warnings); @@ -109,7 +110,7 @@ typedef enum }TimeCastType; -extern TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int is_time_sconst, Datum* datum_internal); +extern Datum time_internal(PG_FUNCTION_ARGS, char* str, int is_time_sconst, TimeErrorType* time_error_type); char* parser_function_input(Datum txt, Oid oid); #endif diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 3e3e43270..8cd514a58 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -119,7 +119,8 @@ extern bool datetime_sub_days(Timestamp datetime, int days, Timestamp *result, b extern bool datetime_sub_interval(Timestamp datetime, Interval *span, Timestamp *result, bool is_add_func = false); #ifdef DOLPHIN -Oid convert_to_datetime_time(Datum value, Oid valuetypid, Timestamp *datetime, TimeADT *time); +Oid convert_to_datetime_time(Datum value, Oid valuetypid, Timestamp *datetime, TimeADT *time, + bool can_ignore = false, bool* result_isnull = NULL); extern void check_b_format_datetime_range_with_ereport(Timestamp &datetime); extern void datetime_in_with_flag_internal(const char *str, struct pg_tm *tm, fsec_t* fsec, unsigned int date_flag); extern bool MaybeRound(struct pg_tm *tm, fsec_t *fsec); diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 89724b8c4..04d6e4a36 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -1666,9 +1666,12 @@ Datum abstime_date(PG_FUNCTION_ARGS) Datum time_in(PG_FUNCTION_ARGS) { #ifdef DOLPHIN - Datum datum_internal; char* input_str = PG_GETARG_CSTRING(0); - time_internal(fcinfo, input_str, TIME_IN, &datum_internal); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, input_str, TIME_IN, &time_error_type); + if (time_error_type == TIME_INCORRECT && ENABLE_B_CMPT_MODE) { + PG_RETURN_TIMEADT(B_FORMAT_TIME_INVALID_VALUE_TAG); + } return datum_internal; } @@ -1678,10 +1681,9 @@ Datum time_in(PG_FUNCTION_ARGS) */ Datum time_cast(PG_FUNCTION_ARGS) { - Datum datum_internal; char* input_str = PG_GETARG_CSTRING(0); - time_internal(fcinfo, input_str, TIME_CAST, &datum_internal); - return datum_internal; + TimeErrorType time_error_type = TIME_CORRECT; + return time_internal(fcinfo, input_str, TIME_CAST, &time_error_type); } @@ -1707,15 +1709,16 @@ char* parser_function_input(Datum txt, Oid oid) */ Datum text_time_explicit(PG_FUNCTION_ARGS) { - Datum datum_internal; char* input_str = parser_function_input(PG_GETARG_DATUM(0), fcinfo->argTypes[0]); - if (time_internal(fcinfo, input_str, TEXT_TIME_EXPLICIT, &datum_internal) == TIME_INCORRECT) { + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, input_str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { PG_RETURN_NULL(); } return datum_internal; } -TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, Datum* datum_internal) +Datum time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorType* time_error_type) { #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); @@ -1762,8 +1765,8 @@ TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, Dat char *adjusted = adjust_b_format_time(str, &timeSign, &D, &hasD); /* check if empty */ if (strlen(adjusted) == 0) { - *datum_internal = TimeADTGetDatum(0); - return TIME_INCORRECT; + *time_error_type = TIME_INCORRECT; + PG_RETURN_TIMEADT(0); } dterr = ParseDateTime(adjusted, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr == 0) { @@ -1782,37 +1785,36 @@ TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, Dat */ char* field_str = field[0]; if (field_str == NULL) { - *datum_internal = TimeADTGetDatum(0); - return TIME_INCORRECT; + *time_error_type = TIME_INCORRECT; + PG_RETURN_TIMEADT(0); } if (*field_str == '+') { field_str++; } int trunc_val = getStartingDigits(field_str); if (trunc_val < 0 || trunc_val >= 60) { - *datum_internal = TimeADTGetDatum(0); - return TIME_INCORRECT; + *time_error_type = TIME_INCORRECT; + PG_RETURN_TIMEADT(0); } - *datum_internal = TimeADTGetDatum(trunc_val * TIME_MS_TO_S_RADIX * TIME_MS_TO_S_RADIX); - return TIME_INCORRECT; + *time_error_type = TIME_INCORRECT; + PG_RETURN_TIMEADT(trunc_val * TIME_MS_TO_S_RADIX * TIME_MS_TO_S_RADIX); } else if (SQL_MODE_NOT_STRICT_ON_INSERT()) { /* for case insert unavailable data, need to set the unavailable data to 0 to compatible with M */ DateTimeParseError(dterr, str, "time", true); if (IsResetUnavailableDataTime(dterr, !SQL_MODE_STRICT() && !CMD_TAG_IS_SELECT())) { - *datum_internal = TimeADTGetDatum(0); - return TIME_INCORRECT; + *time_error_type = TIME_IGNORED_INCORRECT; + PG_RETURN_TIMEADT(0); } else { tm = &tt; // switch to M*'s parsing result } } else { - if (time_cast_type == TEXT_TIME_EXPLICIT) { + if (time_cast_type == TEXT_TIME_EXPLICIT || time_cast_type == TIME_IN) { DateTimeParseError(dterr, str, "time", true); tm = &tt; // switch to M*'s parsing result if (dterr != DTERR_TZDISP_OVERFLOW) { - return TIME_INCORRECT; + *time_error_type = TIME_INCORRECT; } - } - if (time_cast_type == TIME_CAST) { + } else if (time_cast_type == TIME_CAST) { DateTimeParseErrorWithFlag(dterr, str, "time", fcinfo->can_ignore, !SQL_MODE_STRICT()); tm = &tt; // switch to M*'s parsing result } else { @@ -1830,9 +1832,7 @@ TimeErrorType time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, Dat tm2time(tm, fsec, &result); AdjustTimeForTypmod(&result, typmod); result *= timeSign; - *datum_internal = TimeADTGetDatum(result); - return TIME_CORRECT; - + PG_RETURN_TIMEADT(result); #else char* str = PG_GETARG_CSTRING(0); @@ -4151,13 +4151,18 @@ Datum makedate(PG_FUNCTION_ARGS) /* * Check whether the TimeADT value is within the specified range: * [-838:59:59, 838:59:59] (the time range is from MySQL). - * Error will be reported if the TimeADT value exceeds the range. + * for case select time('xxx') : should return null if the TimeADT value exceeds the range + * and for other case : Error will be reported if the TimeADT value exceeds the range. */ -void check_b_format_time_range_with_ereport(TimeADT &time) +void check_b_format_time_range_with_ereport(TimeADT &time, bool can_ignore, bool* result_isnull) { + int level = (can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR; if (time < -B_FORMAT_TIME_MAX_VALUE || time > B_FORMAT_TIME_MAX_VALUE) { - ereport(ERROR, (errcode(ERRCODE_DATETIME_FIELD_OVERFLOW), + ereport(level, (errcode(ERRCODE_DATETIME_FIELD_OVERFLOW), errmsg("time field value out of range"))); + if (result_isnull != NULL) { + *result_isnull = true; + } } } @@ -4937,8 +4942,14 @@ Datum time_mysql(PG_FUNCTION_ARGS) Timestamp datetime; Oid val_type; + bool result_isnull = false; val_type = get_fn_expr_argtype(fcinfo->flinfo, 0); - val_type = convert_to_datetime_time(PG_GETARG_DATUM(0), val_type, &datetime, &time); + val_type = convert_to_datetime_time(PG_GETARG_DATUM(0), val_type, &datetime, &time, + fcinfo->can_ignore, &result_isnull); + + if (result_isnull) { + PG_RETURN_NULL(); + } switch (val_type) { case TIMEOID: { diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 58f376bff..5d8f59458 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -7060,7 +7060,8 @@ void convert_to_datetime(Datum value, Oid valuetypid, Timestamp *datetime) * Error will be reported if the above range is exceeded. * @return: Actual time type oid. */ -Oid convert_cstring_to_datetime_time(const char* str, Timestamp *datetime, TimeADT *time) +Oid convert_cstring_to_datetime_time(const char* str, Timestamp *datetime, TimeADT *time, + bool can_ignore, bool* result_isnull) { size_t len = strlen(str); const char *start; @@ -7085,7 +7086,7 @@ Oid convert_cstring_to_datetime_time(const char* str, Timestamp *datetime, TimeA /* Not a timestamp. Try to convert str to time*/ *time = DatumGetTimeADT( DirectFunctionCall3(time_in, CStringGetDatum(start), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); - check_b_format_time_range_with_ereport(*time); + check_b_format_time_range_with_ereport(*time, can_ignore, result_isnull); return TIMEOID; } @@ -7174,12 +7175,13 @@ Oid convert_unknown_to_datetime_time(const char* str, Timestamp *datetime, TimeA * Error will be reported if the above range is exceeded. * @return: Actual time type oid. */ -Oid convert_to_datetime_time(Datum value, Oid valuetypid, Timestamp *datetime, TimeADT *time) +Oid convert_to_datetime_time(Datum value, Oid valuetypid, Timestamp *datetime, TimeADT *time, + bool can_ignore, bool* result_isnull) { switch (valuetypid) { case UNKNOWNOID: case CSTRINGOID: { - return convert_cstring_to_datetime_time(DatumGetCString(value), datetime, time); + return convert_cstring_to_datetime_time(DatumGetCString(value), datetime, time, can_ignore, result_isnull); } case CLOBOID: case NVARCHAR2OID: @@ -7187,7 +7189,7 @@ Oid convert_to_datetime_time(Datum value, Oid valuetypid, Timestamp *datetime, T case VARCHAROID: case TEXTOID: { char *str = TextDatumGetCString(value); - return convert_cstring_to_datetime_time(str, datetime, time); + return convert_cstring_to_datetime_time(str, datetime, time, can_ignore, result_isnull); } case TIMESTAMPOID: case TIMESTAMPTZOID: diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index 81acdef17..d10f05cc4 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -241,6 +241,15 @@ select cast('0:0:0' as time); select cast('-1:-1:-1' as time); select cast('23:55:56.1234' as time); select addtime('12aaa43', '12aa43'); +select time('23:65:66'); +select time('23:65'); +select time('23-65'); +select time('23:59:59.8888'); +select time('23:65:66.8888'); +select time('0:0:0'); +select time('-1:-1:-1'); +select time('23:55:56.1234'); + set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test1 values('23:65:66'); @@ -375,5 +384,16 @@ SELECT STR_TO_DATE('2023-02-31','%Y-%m-%d'); SELECT STR_TO_DATE('2004.09.12 10:61:59','%Y.%m.%d %T'); SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); + +set dolphin.b_compatibility_mode = false; +select time('23:65:66'); +select time('23:65'); +select time('23-65'); +select time('23:59:59.8888'); +select time('23:65:66.8888'); +select time('0:0:0'); +select time('-1:-1:-1'); +select time('23:55:56.1234'); + drop schema b_time_funcs3 cascade; reset current_schema; -- Gitee From 71177278893972bc99d88b595211a97bced3f9dc Mon Sep 17 00:00:00 2001 From: luekman Date: Wed, 8 Nov 2023 16:55:48 +0800 Subject: [PATCH 045/434] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E7=BC=BA=E9=99=B7SIG?= =?UTF-8?q?NED/UNSIGNED=20INTEGER=E7=B1=BB=E5=9E=8B=E8=BD=AC=E6=8D=A2?= =?UTF-8?q?=E4=B8=8D=E5=85=BC=E5=AE=B9=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/uint_cast3.out | 72 +++++++++++++++++++++++++ contrib/dolphin/plugin_parser/gram.y | 34 +++++++++++- contrib/dolphin/sql/uint_cast3.sql | 17 +++++- 3 files changed, 121 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/expected/uint_cast3.out b/contrib/dolphin/expected/uint_cast3.out index b0eeb7c25..68fac2789 100644 --- a/contrib/dolphin/expected/uint_cast3.out +++ b/contrib/dolphin/expected/uint_cast3.out @@ -566,5 +566,77 @@ select * from t_uint; (1 row) drop table t_uint; +select cast(1 as signed integer); + int8 +------ + 1 +(1 row) + +select cast(1 as unsigned integer); + uint8 +------- + 1 +(1 row) + +select convert(1 , signed integer); + int8 +------ + 1 +(1 row) + +select convert(1 , unsigned integer); + uint8 +------- + 1 +(1 row) + +select cast(1 as integer signed); + int8 +------ + 1 +(1 row) + +select cast(1 as integer unsigned); + uint8 +------- + 1 +(1 row) + +select convert(1 , integer signed); + int8 +------ + 1 +(1 row) + +select convert(1 , integer unsigned); + uint8 +------- + 1 +(1 row) + +select cast(1 as signed); + int8 +------ + 1 +(1 row) + +select cast(1 as unsigned); + uint8 +------- + 1 +(1 row) + +select convert(1 , signed); + int8 +------ + 1 +(1 row) + +select convert(1 , unsigned); + uint8 +------- + 1 +(1 row) + drop schema uint_cast3 cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 22f568daa..2ae995fc6 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -31336,7 +31336,7 @@ unsigned_list: | unsigned_list opt_unsigned ; -opt_unsigned: +opt_unsigned: %prec UMINUS UNSIGNED | ZEROFILL ; @@ -34043,6 +34043,30 @@ func_expr_common_subexpr: { $$ = makeTypeCast($3, $5, @1); } + | CONVERT '(' a_expr ',' UNSIGNED INTEGER ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' SIGNED INTEGER ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } + | CONVERT '(' a_expr ',' INTEGER UNSIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' INTEGER SIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } + | CONVERT '(' a_expr ',' UNSIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' SIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } | CURRENT_TIME { FuncCall *n = makeNode(FuncCall); @@ -34580,6 +34604,14 @@ func_expr_common_subexpr: { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } | CAST '(' a_expr AS SIGNED ')' { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } + | CAST '(' a_expr AS UNSIGNED INTEGER ')' + { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } + | CAST '(' a_expr AS SIGNED INTEGER ')' + { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } + | CAST '(' a_expr AS INTEGER UNSIGNED ')' + { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } + | CAST '(' a_expr AS INTEGER SIGNED ')' + { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } | EXTRACT '(' extract_list ')' { FuncCall *n = makeNode(FuncCall); diff --git a/contrib/dolphin/sql/uint_cast3.sql b/contrib/dolphin/sql/uint_cast3.sql index 6eab983be..ea2f39eb2 100644 --- a/contrib/dolphin/sql/uint_cast3.sql +++ b/contrib/dolphin/sql/uint_cast3.sql @@ -110,5 +110,20 @@ insert into t_uint values('-0', '-0', '-0', '-0'); select * from t_uint; drop table t_uint; +select cast(1 as signed integer); +select cast(1 as unsigned integer); +select convert(1 , signed integer); +select convert(1 , unsigned integer); + +select cast(1 as integer signed); +select cast(1 as integer unsigned); +select convert(1 , integer signed); +select convert(1 , integer unsigned); + +select cast(1 as signed); +select cast(1 as unsigned); +select convert(1 , signed); +select convert(1 , unsigned); + drop schema uint_cast3 cascade; -reset current_schema; \ No newline at end of file +reset current_schema; -- Gitee From cdb2a9b18ce9a56ce3e504ea29638116879087d1 Mon Sep 17 00:00:00 2001 From: he-shaoyu Date: Mon, 6 Nov 2023 14:24:31 +0800 Subject: [PATCH 046/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=8D=A2=E8=A1=8C?= =?UTF-8?q?=E7=AC=A6=E4=B8=BAunix=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/conv_cast_test.out | 311 +++++++++--------- contrib/dolphin/sql/conv_cast_test.sql | 330 ++++++++++---------- 2 files changed, 311 insertions(+), 330 deletions(-) diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index 0dbf76b01..536902f63 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -1,268 +1,264 @@ -create schema conv_cast_test; -set current_schema to 'conv_cast_test'; - -select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); +create schema conv_cast_test; +set current_schema to 'conv_cast_test'; +select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-366666666666666666666666666666666666666, 10, 8); +select conv(-366666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-266666666666666666666666666666666666666, 10, 8); +select conv(-266666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-170141183460469231731687303715884105729,10,8); +select conv(-170141183460469231731687303715884105729,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-170141183460469231731687303715884105728,10,8); +select conv(-170141183460469231731687303715884105728,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-170141183460469231731687303715884105727,10,8); +select conv(-170141183460469231731687303715884105727,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-18446744073709551617,10,8); +select conv(-18446744073709551617,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-18446744073709551616,10,8); +select conv(-18446744073709551616,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-18446744073709551615,10,8); +select conv(-18446744073709551615,10,8); conv ------ 1 (1 row) -select conv(-9223372036854775809,10,8); +select conv(-9223372036854775809,10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv(-9223372036854775808,10,8); +select conv(-9223372036854775808,10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv(-9223372036854775807,10,8); +select conv(-9223372036854775807,10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv(-123456,10,8); +select conv(-123456,10,8); conv ------------------------ 1777777777777777416700 (1 row) -select conv(-1,10,8); +select conv(-1,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(0,10,8); +select conv(0,10,8); conv ------ 0 (1 row) -select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); +select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(366666666666666666666666666666666666666, 10, 8); +select conv(366666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(266666666666666666666666666666666666666, 10, 8); +select conv(266666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(170141183460469231731687303715884105729,10,8); +select conv(170141183460469231731687303715884105729,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(170141183460469231731687303715884105728,10,8); +select conv(170141183460469231731687303715884105728,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(170141183460469231731687303715884105727,10,8); +select conv(170141183460469231731687303715884105727,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(18446744073709551617,10,8); +select conv(18446744073709551617,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(18446744073709551616,10,8); +select conv(18446744073709551616,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(18446744073709551615,10,8); +select conv(18446744073709551615,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(9223372036854775809,10,8); +select conv(9223372036854775809,10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv(9223372036854775808,10,8); +select conv(9223372036854775808,10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv(9223372036854775807,10,8); +select conv(9223372036854775807,10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv(123456,10,8); +select conv(123456,10,8); conv -------- 361100 (1 row) -select conv(1,10,8); +select conv(1,10,8); conv ------ 1 (1 row) - -select time'-2 34:25:59'::float4; +select time'-2 34:25:59'::float4; float4 --------- -822559 (1 row) -select time'-838:59:59'::float4; +select time'-838:59:59'::float4; float4 -------------- -8.38596e+06 (1 row) -select time'0'::float4; +select time'0'::float4; float4 -------- 0 (1 row) -select time'-0'::float4; +select time'-0'::float4; float4 -------- 0 (1 row) -select time'-2 34:25:59'::float8; +select time'-2 34:25:59'::float8; float8 --------- -822559 (1 row) -select time'-838:59:59'::float8; +select time'-838:59:59'::float8; float8 ---------- -8385959 (1 row) -select time'0'::float8; +select time'0'::float8; float8 -------- 0 (1 row) -select time'-0'::float8; +select time'-0'::float8; float8 -------- 0 (1 row) - -select 127::tinyint::bit(64)::tinyint; +select 127::tinyint::bit(64)::tinyint; int1 ------ 127 (1 row) -select 32767::smallint::bit(64)::smallint; +select 32767::smallint::bit(64)::smallint; int2 ------- 32767 (1 row) -select 2147483647::int::bit(64)::int; +select 2147483647::int::bit(64)::int; int4 ------------ 2147483647 (1 row) -select 9223372036854775807::bigint::bit(64)::bigint; +select 9223372036854775807::bigint::bit(64)::bigint; int8 --------------------- 9223372036854775807 (1 row) - -select '255'::uint1::time; +select '255'::uint1::time; time ---------- 00:02:55 (1 row) -select '65535'::uint2::time; +select '65535'::uint2::time; time ---------- 06:55:35 (1 row) -select '4294967295'::uint4::time; +select '4294967295'::uint4::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -270,7 +266,7 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) -select '18446744073709551615'::uint8::time; +select '18446744073709551615'::uint8::time; WARNING: invalid input syntax for type time: "18446744073709551615" CONTEXT: referenced column: time time @@ -278,8 +274,7 @@ CONTEXT: referenced column: time 00:00:00 (1 row) - -select '4294967295'::uint1::time; +select '4294967295'::uint1::time; WARNING: tinyint unsigned out of range CONTEXT: referenced column: time time @@ -287,7 +282,7 @@ CONTEXT: referenced column: time 00:02:55 (1 row) -select '4294967295'::uint2::time; +select '4294967295'::uint2::time; WARNING: smallint unsigned out of range CONTEXT: referenced column: time time @@ -295,7 +290,7 @@ CONTEXT: referenced column: time 06:55:35 (1 row) -select '4294967295'::uint4::time; +select '4294967295'::uint4::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -303,7 +298,7 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) -select '4294967295'::uint8::time; +select '4294967295'::uint8::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -311,8 +306,7 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) - -select '4294967295'::int1::time; +select '4294967295'::int1::time; WARNING: value "4294967295" is out of range for type tinyint LINE 1: select '4294967295'::int1::time; ^ @@ -322,21 +316,21 @@ CONTEXT: referenced column: time 00:01:27 (1 row) -select '4294967295'::int2::time; +select '4294967295'::int2::time; WARNING: value "4294967295" is out of range for type smallint LINE 1: select '4294967295'::int2::time; ^ CONTEXT: referenced column: time ERROR: time out of range CONTEXT: referenced column: time -select '4294967295'::int4::time; +select '4294967295'::int4::time; WARNING: value "4294967295" is out of range for type integer LINE 1: select '4294967295'::int4::time; ^ CONTEXT: referenced column: time ERROR: Incorrect time value CONTEXT: referenced column: time -select '4294967295'::int8::time; +select '4294967295'::int8::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -344,286 +338,278 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) - -select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-366666666666666666666666666666666666666', 10, 8); +select conv('-366666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-266666666666666666666666666666666666666', 10, 8); +select conv('-266666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-170141183460469231731687303715884105729',10,8); +select conv('-170141183460469231731687303715884105729',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-170141183460469231731687303715884105728',10,8); +select conv('-170141183460469231731687303715884105728',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-170141183460469231731687303715884105727',10,8); +select conv('-170141183460469231731687303715884105727',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-18446744073709551617',10,8); +select conv('-18446744073709551617',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-18446744073709551616',10,8); +select conv('-18446744073709551616',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-18446744073709551615',10,8); +select conv('-18446744073709551615',10,8); conv ------ 1 (1 row) -select conv('-9223372036854775809',10,8); +select conv('-9223372036854775809',10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv('-9223372036854775808',10,8); +select conv('-9223372036854775808',10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv('-9223372036854775807',10,8); +select conv('-9223372036854775807',10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv('-123456',10,8); +select conv('-123456',10,8); conv ------------------------ 1777777777777777416700 (1 row) -select conv('-1',10,8); +select conv('-1',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('0',10,8); +select conv('0',10,8); conv ------ 0 (1 row) -select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('366666666666666666666666666666666666666', 10, 8); +select conv('366666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('266666666666666666666666666666666666666', 10, 8); +select conv('266666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('170141183460469231731687303715884105729',10,8); +select conv('170141183460469231731687303715884105729',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('170141183460469231731687303715884105728',10,8); +select conv('170141183460469231731687303715884105728',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('170141183460469231731687303715884105727',10,8); +select conv('170141183460469231731687303715884105727',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('18446744073709551617',10,8); +select conv('18446744073709551617',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('18446744073709551616',10,8); +select conv('18446744073709551616',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('18446744073709551615',10,8); +select conv('18446744073709551615',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('9223372036854775809',10,8); +select conv('9223372036854775809',10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv('9223372036854775808',10,8); +select conv('9223372036854775808',10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv('9223372036854775807',10,8); +select conv('9223372036854775807',10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv('123456',10,8); +select conv('123456',10,8); conv -------- 361100 (1 row) -select conv('1',10,8); +select conv('1',10,8); conv ------ 1 (1 row) - -select ''::bit; +select ''::bit; bit ----- 0 (1 row) -select ''::bit(10); +select ''::bit(10); bit ------------ 0000000000 (1 row) -select ''::bit(64); +select ''::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000000 (1 row) - -set dolphin.b_compatibility_mode to on; - -select ''::bit; +set dolphin.b_compatibility_mode to on; +select ''::bit; bit ----- 0 (1 row) -select ''::bit(10); +select ''::bit(10); bit ------------ 0000000000 (1 row) -select ''::bit(64); +select ''::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000000 (1 row) - -select 8385959::char(30)::time; +select 8385959::char(30)::time; time ----------- 838:59:59 (1 row) -select -8385958.999999::char(30)::time(6); +select -8385958.999999::char(30)::time(6); ?column? ------------ -838:59:59 (1 row) -select -8385959::varchar(30)::time; +select -8385959::varchar(30)::time; ?column? ------------ -838:59:59 (1 row) -select 8385958.999999::varchar(30)::time(6); +select 8385958.999999::varchar(30)::time(6); time ----------- 838:59:59 (1 row) - -select '838:59:59'::char(30)::time; +select '838:59:59'::char(30)::time; time ----------- 838:59:59 (1 row) -select '-838:59:58.999999'::char(30)::time(6); +select '-838:59:58.999999'::char(30)::time(6); time ------------ -838:59:59 (1 row) -select '-838:59:59'::varchar(30)::time; +select '-838:59:59'::varchar(30)::time; time ------------ -838:59:59 (1 row) -select '838:59:58.999999'::varchar(30)::time(6); +select '838:59:58.999999'::varchar(30)::time(6); time ----------- 838:59:59 (1 row) - -SELECT '20220101121212'::date; +SELECT '20220101121212'::date; date ------------ 2022-01-01 (1 row) -SELECT '20220101121212.5'::date; +SELECT '20220101121212.5'::date; date ------------ 2022-01-01 (1 row) - -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); WARNING: bigint unsigned out of range CONTEXT: referenced column: uint8 uint8 @@ -631,7 +617,7 @@ CONTEXT: referenced column: uint8 18446744073709551615 (1 row) -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); WARNING: bigint out of range CONTEXT: referenced column: int8 WARNING: bigint out of range @@ -641,13 +627,13 @@ CONTEXT: referenced column: int8 -1 (1 row) -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); uint8 ---------------------- 18446744073709551615 (1 row) -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); WARNING: bigint out of range CONTEXT: referenced column: int8 int8 @@ -655,19 +641,19 @@ CONTEXT: referenced column: int8 -1 (1 row) -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); uint8 --------------------- 9223372036854775807 (1 row) -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); int8 --------------------- 9223372036854775807 (1 row) -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); WARNING: bigint unsigned out of range CONTEXT: referenced column: uint8 uint8 @@ -675,7 +661,7 @@ CONTEXT: referenced column: uint8 18446744073709551615 (1 row) -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); WARNING: bigint out of range CONTEXT: referenced column: int8 WARNING: bigint out of range @@ -685,85 +671,81 @@ CONTEXT: referenced column: int8 -1 (1 row) - -select hex('9999-12-31 23:59:59'::datetime::bit(64)); +select hex('9999-12-31 23:59:59'::datetime::bit(64)); hex ------------------ 00005af105d18777 (1 row) -select hex('99991231235959'::datetime::bit(64)); +select hex('99991231235959'::datetime::bit(64)); hex ------------------ 00005af105d18777 (1 row) - -select 'true'::bool::bit; +select 'true'::bool::bit; bit ----- 1 (1 row) -select 'true'::bool::bit(10); +select 'true'::bool::bit(10); bit ------------ 0000000001 (1 row) -select 'true'::bool::bit(64); +select 'true'::bool::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000001 (1 row) -select 'true'::bool::float4; +select 'true'::bool::float4; float4 -------- 1 (1 row) -select 'true'::bool::float8; +select 'true'::bool::float8; float8 -------- 1 (1 row) - -select 'false'::bool::bit; +select 'false'::bool::bit; bit ----- 0 (1 row) -select 'false'::bool::bit(10); +select 'false'::bool::bit(10); bit ------------ 0000000000 (1 row) -select 'false'::bool::bit(64); +select 'false'::bool::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000000 (1 row) -select 'false'::bool::float4; +select 'false'::bool::float4; float4 -------- 0 (1 row) -select 'false'::bool::float8; +select 'false'::bool::float8; float8 -------- 0 (1 row) - -create table test_date(a date); -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; +create table test_date(a date); +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -771,10 +753,10 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(0); +insert into test_date values(0); ERROR: Out of range value for date CONTEXT: referenced column: a -select 1::date; +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -782,18 +764,18 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); ERROR: Out of range value for date CONTEXT: referenced column: a -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; date ------------ 0000-00-00 (1 row) -insert into test_date values(0); -select 1::date; +insert into test_date values(0); +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -801,11 +783,11 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); ERROR: Out of range value for date CONTEXT: referenced column: a -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -813,10 +795,10 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(0); +insert into test_date values(0); WARNING: Out of range value for date CONTEXT: referenced column: a -select 1::date; +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -824,18 +806,18 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); WARNING: Out of range value for date CONTEXT: referenced column: a -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; date ------------ 0000-00-00 (1 row) -insert into test_date values(0); -select 1::date; +insert into test_date values(0); +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -843,11 +825,11 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); WARNING: Out of range value for date CONTEXT: referenced column: a -reset dolphin.sql_mode; -select * from test_date; +reset dolphin.sql_mode; +select * from test_date; a ------------ 0000-00-00 @@ -857,7 +839,6 @@ select * from test_date; 0000-00-00 (5 rows) - -drop schema conv_cast_test cascade; +drop schema conv_cast_test cascade; NOTICE: drop cascades to table test_date -reset current_schema; +reset current_schema; diff --git a/contrib/dolphin/sql/conv_cast_test.sql b/contrib/dolphin/sql/conv_cast_test.sql index 22d9a00c3..f4fbd2132 100755 --- a/contrib/dolphin/sql/conv_cast_test.sql +++ b/contrib/dolphin/sql/conv_cast_test.sql @@ -1,165 +1,165 @@ -create schema conv_cast_test; -set current_schema to 'conv_cast_test'; - -select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); -select conv(-366666666666666666666666666666666666666, 10, 8); -select conv(-266666666666666666666666666666666666666, 10, 8); -select conv(-170141183460469231731687303715884105729,10,8); -select conv(-170141183460469231731687303715884105728,10,8); -select conv(-170141183460469231731687303715884105727,10,8); -select conv(-18446744073709551617,10,8); -select conv(-18446744073709551616,10,8); -select conv(-18446744073709551615,10,8); -select conv(-9223372036854775809,10,8); -select conv(-9223372036854775808,10,8); -select conv(-9223372036854775807,10,8); -select conv(-123456,10,8); -select conv(-1,10,8); -select conv(0,10,8); -select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); -select conv(366666666666666666666666666666666666666, 10, 8); -select conv(266666666666666666666666666666666666666, 10, 8); -select conv(170141183460469231731687303715884105729,10,8); -select conv(170141183460469231731687303715884105728,10,8); -select conv(170141183460469231731687303715884105727,10,8); -select conv(18446744073709551617,10,8); -select conv(18446744073709551616,10,8); -select conv(18446744073709551615,10,8); -select conv(9223372036854775809,10,8); -select conv(9223372036854775808,10,8); -select conv(9223372036854775807,10,8); -select conv(123456,10,8); -select conv(1,10,8); - -select time'-2 34:25:59'::float4; -select time'-838:59:59'::float4; -select time'0'::float4; -select time'-0'::float4; -select time'-2 34:25:59'::float8; -select time'-838:59:59'::float8; -select time'0'::float8; -select time'-0'::float8; - -select 127::tinyint::bit(64)::tinyint; -select 32767::smallint::bit(64)::smallint; -select 2147483647::int::bit(64)::int; -select 9223372036854775807::bigint::bit(64)::bigint; - -select '255'::uint1::time; -select '65535'::uint2::time; -select '4294967295'::uint4::time; -select '18446744073709551615'::uint8::time; - -select '4294967295'::uint1::time; -select '4294967295'::uint2::time; -select '4294967295'::uint4::time; -select '4294967295'::uint8::time; - -select '4294967295'::int1::time; -select '4294967295'::int2::time; -select '4294967295'::int4::time; -select '4294967295'::int8::time; - -select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); -select conv('-366666666666666666666666666666666666666', 10, 8); -select conv('-266666666666666666666666666666666666666', 10, 8); -select conv('-170141183460469231731687303715884105729',10,8); -select conv('-170141183460469231731687303715884105728',10,8); -select conv('-170141183460469231731687303715884105727',10,8); -select conv('-18446744073709551617',10,8); -select conv('-18446744073709551616',10,8); -select conv('-18446744073709551615',10,8); -select conv('-9223372036854775809',10,8); -select conv('-9223372036854775808',10,8); -select conv('-9223372036854775807',10,8); -select conv('-123456',10,8); -select conv('-1',10,8); -select conv('0',10,8); -select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); -select conv('366666666666666666666666666666666666666', 10, 8); -select conv('266666666666666666666666666666666666666', 10, 8); -select conv('170141183460469231731687303715884105729',10,8); -select conv('170141183460469231731687303715884105728',10,8); -select conv('170141183460469231731687303715884105727',10,8); -select conv('18446744073709551617',10,8); -select conv('18446744073709551616',10,8); -select conv('18446744073709551615',10,8); -select conv('9223372036854775809',10,8); -select conv('9223372036854775808',10,8); -select conv('9223372036854775807',10,8); -select conv('123456',10,8); -select conv('1',10,8); - -select ''::bit; -select ''::bit(10); -select ''::bit(64); - -set dolphin.b_compatibility_mode to on; - -select ''::bit; -select ''::bit(10); -select ''::bit(64); - -select 8385959::char(30)::time; -select -8385958.999999::char(30)::time(6); -select -8385959::varchar(30)::time; -select 8385958.999999::varchar(30)::time(6); - -select '838:59:59'::char(30)::time; -select '-838:59:58.999999'::char(30)::time(6); -select '-838:59:59'::varchar(30)::time; -select '838:59:58.999999'::varchar(30)::time(6); - -SELECT '20220101121212'::date; -SELECT '20220101121212.5'::date; - -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); - -select hex('9999-12-31 23:59:59'::datetime::bit(64)); -select hex('99991231235959'::datetime::bit(64)); - -select 'true'::bool::bit; -select 'true'::bool::bit(10); -select 'true'::bool::bit(64); -select 'true'::bool::float4; -select 'true'::bool::float8; - -select 'false'::bool::bit; -select 'false'::bool::bit(10); -select 'false'::bool::bit(64); -select 'false'::bool::float4; -select 'false'::bool::float8; - -create table test_date(a date); -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -reset dolphin.sql_mode; -select * from test_date; - -drop schema conv_cast_test cascade; -reset current_schema; +create schema conv_cast_test; +set current_schema to 'conv_cast_test'; + +select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); +select conv(-366666666666666666666666666666666666666, 10, 8); +select conv(-266666666666666666666666666666666666666, 10, 8); +select conv(-170141183460469231731687303715884105729,10,8); +select conv(-170141183460469231731687303715884105728,10,8); +select conv(-170141183460469231731687303715884105727,10,8); +select conv(-18446744073709551617,10,8); +select conv(-18446744073709551616,10,8); +select conv(-18446744073709551615,10,8); +select conv(-9223372036854775809,10,8); +select conv(-9223372036854775808,10,8); +select conv(-9223372036854775807,10,8); +select conv(-123456,10,8); +select conv(-1,10,8); +select conv(0,10,8); +select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); +select conv(366666666666666666666666666666666666666, 10, 8); +select conv(266666666666666666666666666666666666666, 10, 8); +select conv(170141183460469231731687303715884105729,10,8); +select conv(170141183460469231731687303715884105728,10,8); +select conv(170141183460469231731687303715884105727,10,8); +select conv(18446744073709551617,10,8); +select conv(18446744073709551616,10,8); +select conv(18446744073709551615,10,8); +select conv(9223372036854775809,10,8); +select conv(9223372036854775808,10,8); +select conv(9223372036854775807,10,8); +select conv(123456,10,8); +select conv(1,10,8); + +select time'-2 34:25:59'::float4; +select time'-838:59:59'::float4; +select time'0'::float4; +select time'-0'::float4; +select time'-2 34:25:59'::float8; +select time'-838:59:59'::float8; +select time'0'::float8; +select time'-0'::float8; + +select 127::tinyint::bit(64)::tinyint; +select 32767::smallint::bit(64)::smallint; +select 2147483647::int::bit(64)::int; +select 9223372036854775807::bigint::bit(64)::bigint; + +select '255'::uint1::time; +select '65535'::uint2::time; +select '4294967295'::uint4::time; +select '18446744073709551615'::uint8::time; + +select '4294967295'::uint1::time; +select '4294967295'::uint2::time; +select '4294967295'::uint4::time; +select '4294967295'::uint8::time; + +select '4294967295'::int1::time; +select '4294967295'::int2::time; +select '4294967295'::int4::time; +select '4294967295'::int8::time; + +select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('-366666666666666666666666666666666666666', 10, 8); +select conv('-266666666666666666666666666666666666666', 10, 8); +select conv('-170141183460469231731687303715884105729',10,8); +select conv('-170141183460469231731687303715884105728',10,8); +select conv('-170141183460469231731687303715884105727',10,8); +select conv('-18446744073709551617',10,8); +select conv('-18446744073709551616',10,8); +select conv('-18446744073709551615',10,8); +select conv('-9223372036854775809',10,8); +select conv('-9223372036854775808',10,8); +select conv('-9223372036854775807',10,8); +select conv('-123456',10,8); +select conv('-1',10,8); +select conv('0',10,8); +select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('366666666666666666666666666666666666666', 10, 8); +select conv('266666666666666666666666666666666666666', 10, 8); +select conv('170141183460469231731687303715884105729',10,8); +select conv('170141183460469231731687303715884105728',10,8); +select conv('170141183460469231731687303715884105727',10,8); +select conv('18446744073709551617',10,8); +select conv('18446744073709551616',10,8); +select conv('18446744073709551615',10,8); +select conv('9223372036854775809',10,8); +select conv('9223372036854775808',10,8); +select conv('9223372036854775807',10,8); +select conv('123456',10,8); +select conv('1',10,8); + +select ''::bit; +select ''::bit(10); +select ''::bit(64); + +set dolphin.b_compatibility_mode to on; + +select ''::bit; +select ''::bit(10); +select ''::bit(64); + +select 8385959::char(30)::time; +select -8385958.999999::char(30)::time(6); +select -8385959::varchar(30)::time; +select 8385958.999999::varchar(30)::time(6); + +select '838:59:59'::char(30)::time; +select '-838:59:58.999999'::char(30)::time(6); +select '-838:59:59'::varchar(30)::time; +select '838:59:58.999999'::varchar(30)::time(6); + +SELECT '20220101121212'::date; +SELECT '20220101121212.5'::date; + +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); + +select hex('9999-12-31 23:59:59'::datetime::bit(64)); +select hex('99991231235959'::datetime::bit(64)); + +select 'true'::bool::bit; +select 'true'::bool::bit(10); +select 'true'::bool::bit(64); +select 'true'::bool::float4; +select 'true'::bool::float8; + +select 'false'::bool::bit; +select 'false'::bool::bit(10); +select 'false'::bool::bit(64); +select 'false'::bool::float4; +select 'false'::bool::float8; + +create table test_date(a date); +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +reset dolphin.sql_mode; +select * from test_date; + +drop schema conv_cast_test cascade; +reset current_schema; -- Gitee From 50c726e205ee00b9612cbdcbd9f2ed5e287ca87e Mon Sep 17 00:00:00 2001 From: he-shaoyu Date: Mon, 6 Nov 2023 11:44:10 +0800 Subject: [PATCH 047/434] =?UTF-8?q?=E8=AE=BE=E7=BD=AE=E6=96=B0=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E6=8E=A7=E5=88=B6bconst=E8=A1=A8=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/bxconst_test.out | 105 +++++ contrib/dolphin/expected/conv_cast_test.out | 441 +++++++++++------- .../include/plugin_commands/mysqlmode.h | 4 +- .../dolphin/include/plugin_utils/varlena.h | 11 + contrib/dolphin/plugin_parser/parse_node.cpp | 11 +- contrib/dolphin/plugin_postgres.cpp | 3 +- contrib/dolphin/plugin_utils/adt/numeric.cpp | 2 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 1 + .../rollback_script/dolphin--3.0--2.0.sql | 11 + contrib/dolphin/sql/bxconst_test.sql | 49 ++ contrib/dolphin/sql/conv_cast_test.sql | 358 +++++++------- .../upgrade_script/dolphin--2.0--3.0.sql | 11 + 12 files changed, 673 insertions(+), 334 deletions(-) create mode 100644 contrib/dolphin/expected/bxconst_test.out create mode 100644 contrib/dolphin/include/plugin_utils/varlena.h create mode 100644 contrib/dolphin/sql/bxconst_test.sql diff --git a/contrib/dolphin/expected/bxconst_test.out b/contrib/dolphin/expected/bxconst_test.out new file mode 100644 index 000000000..76ee124d9 --- /dev/null +++ b/contrib/dolphin/expected/bxconst_test.out @@ -0,0 +1,105 @@ +create schema bxconst_test; +set current_schema to 'bxconst_test'; + +set dolphin.b_compatibility_mode to on; + +create table t_bit(a bit(16)); +create table t_bin(a binary(6)); + +select b'11100000111000'; + ?column? +---------------- + 11100000111000 +(1 row) + +select pg_typeof(b'11100000111000'); + pg_typeof +----------- + bit +(1 row) + +select x'4c'; + ?column? +---------- + 01001100 +(1 row) + +select pg_typeof(x'4c'); + pg_typeof +----------- + bit +(1 row) + +insert into t_bit values(b'11100000111000'), (x'4c'); +insert into t_bin values(b'11100000111000'), (x'4c'); + +select * from t_bit; + a +------------------ + 0011100000111000 + 0000000001001100 +(2 rows) + +select * from t_bin; + a +---------------- + \x313433393200 + \x373600000000 +(2 rows) + +drop table t_bit; +drop table t_bin; + +set dolphin.sql_mode = treat_bxconst_as_binary; + +create table t_bit(a bit(16)); +create table t_bin(a binary(6)); + +select b'11100000111000'; + ?column? +---------- + \x3838 +(1 row) + +select pg_typeof(b'11100000111000'); + pg_typeof +----------- + "binary" +(1 row) + +select x'4c'; + ?column? +---------- + \x4c +(1 row) + +select pg_typeof(x'4c'); + pg_typeof +----------- + "binary" +(1 row) + +insert into t_bit values(b'11000100110001'), (x'3130'); +insert into t_bin values(b'11000100110001'), (x'3130'); + +select * from t_bit; + a +------------------ + 0000000000000011 + 0000000000000010 +(2 rows) + +select * from t_bin; + a +---------------- + \x313100000000 + \x313000000000 +(2 rows) + +drop table t_bit; +drop table t_bin; + +reset dolphin.sql_mode; + +drop schema bxconst_test cascade; +reset current_schema; diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index 0dbf76b01..6591f2f45 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -1,268 +1,264 @@ -create schema conv_cast_test; -set current_schema to 'conv_cast_test'; - -select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); +create schema conv_cast_test; +set current_schema to 'conv_cast_test'; +select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-366666666666666666666666666666666666666, 10, 8); +select conv(-366666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-266666666666666666666666666666666666666, 10, 8); +select conv(-266666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-170141183460469231731687303715884105729,10,8); +select conv(-170141183460469231731687303715884105729,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-170141183460469231731687303715884105728,10,8); +select conv(-170141183460469231731687303715884105728,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-170141183460469231731687303715884105727,10,8); +select conv(-170141183460469231731687303715884105727,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-18446744073709551617,10,8); +select conv(-18446744073709551617,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-18446744073709551616,10,8); +select conv(-18446744073709551616,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(-18446744073709551615,10,8); +select conv(-18446744073709551615,10,8); conv ------ 1 (1 row) -select conv(-9223372036854775809,10,8); +select conv(-9223372036854775809,10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv(-9223372036854775808,10,8); +select conv(-9223372036854775808,10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv(-9223372036854775807,10,8); +select conv(-9223372036854775807,10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv(-123456,10,8); +select conv(-123456,10,8); conv ------------------------ 1777777777777777416700 (1 row) -select conv(-1,10,8); +select conv(-1,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(0,10,8); +select conv(0,10,8); conv ------ 0 (1 row) -select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); +select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(366666666666666666666666666666666666666, 10, 8); +select conv(366666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(266666666666666666666666666666666666666, 10, 8); +select conv(266666666666666666666666666666666666666, 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(170141183460469231731687303715884105729,10,8); +select conv(170141183460469231731687303715884105729,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(170141183460469231731687303715884105728,10,8); +select conv(170141183460469231731687303715884105728,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(170141183460469231731687303715884105727,10,8); +select conv(170141183460469231731687303715884105727,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(18446744073709551617,10,8); +select conv(18446744073709551617,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(18446744073709551616,10,8); +select conv(18446744073709551616,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(18446744073709551615,10,8); +select conv(18446744073709551615,10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv(9223372036854775809,10,8); +select conv(9223372036854775809,10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv(9223372036854775808,10,8); +select conv(9223372036854775808,10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv(9223372036854775807,10,8); +select conv(9223372036854775807,10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv(123456,10,8); +select conv(123456,10,8); conv -------- 361100 (1 row) -select conv(1,10,8); +select conv(1,10,8); conv ------ 1 (1 row) - -select time'-2 34:25:59'::float4; +select time'-2 34:25:59'::float4; float4 --------- -822559 (1 row) -select time'-838:59:59'::float4; +select time'-838:59:59'::float4; float4 -------------- -8.38596e+06 (1 row) -select time'0'::float4; +select time'0'::float4; float4 -------- 0 (1 row) -select time'-0'::float4; +select time'-0'::float4; float4 -------- 0 (1 row) -select time'-2 34:25:59'::float8; +select time'-2 34:25:59'::float8; float8 --------- -822559 (1 row) -select time'-838:59:59'::float8; +select time'-838:59:59'::float8; float8 ---------- -8385959 (1 row) -select time'0'::float8; +select time'0'::float8; float8 -------- 0 (1 row) -select time'-0'::float8; +select time'-0'::float8; float8 -------- 0 (1 row) - -select 127::tinyint::bit(64)::tinyint; +select 127::tinyint::bit(64)::tinyint; int1 ------ 127 (1 row) -select 32767::smallint::bit(64)::smallint; +select 32767::smallint::bit(64)::smallint; int2 ------- 32767 (1 row) -select 2147483647::int::bit(64)::int; +select 2147483647::int::bit(64)::int; int4 ------------ 2147483647 (1 row) -select 9223372036854775807::bigint::bit(64)::bigint; +select 9223372036854775807::bigint::bit(64)::bigint; int8 --------------------- 9223372036854775807 (1 row) - -select '255'::uint1::time; +select '255'::uint1::time; time ---------- 00:02:55 (1 row) -select '65535'::uint2::time; +select '65535'::uint2::time; time ---------- 06:55:35 (1 row) -select '4294967295'::uint4::time; +select '4294967295'::uint4::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -270,7 +266,7 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) -select '18446744073709551615'::uint8::time; +select '18446744073709551615'::uint8::time; WARNING: invalid input syntax for type time: "18446744073709551615" CONTEXT: referenced column: time time @@ -278,8 +274,7 @@ CONTEXT: referenced column: time 00:00:00 (1 row) - -select '4294967295'::uint1::time; +select '4294967295'::uint1::time; WARNING: tinyint unsigned out of range CONTEXT: referenced column: time time @@ -287,7 +282,7 @@ CONTEXT: referenced column: time 00:02:55 (1 row) -select '4294967295'::uint2::time; +select '4294967295'::uint2::time; WARNING: smallint unsigned out of range CONTEXT: referenced column: time time @@ -295,7 +290,7 @@ CONTEXT: referenced column: time 06:55:35 (1 row) -select '4294967295'::uint4::time; +select '4294967295'::uint4::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -303,7 +298,7 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) -select '4294967295'::uint8::time; +select '4294967295'::uint8::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -311,8 +306,7 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) - -select '4294967295'::int1::time; +select '4294967295'::int1::time; WARNING: value "4294967295" is out of range for type tinyint LINE 1: select '4294967295'::int1::time; ^ @@ -322,21 +316,21 @@ CONTEXT: referenced column: time 00:01:27 (1 row) -select '4294967295'::int2::time; +select '4294967295'::int2::time; WARNING: value "4294967295" is out of range for type smallint LINE 1: select '4294967295'::int2::time; ^ CONTEXT: referenced column: time ERROR: time out of range CONTEXT: referenced column: time -select '4294967295'::int4::time; +select '4294967295'::int4::time; WARNING: value "4294967295" is out of range for type integer LINE 1: select '4294967295'::int4::time; ^ CONTEXT: referenced column: time ERROR: Incorrect time value CONTEXT: referenced column: time -select '4294967295'::int8::time; +select '4294967295'::int8::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time time @@ -344,286 +338,308 @@ CONTEXT: referenced column: time 429497:13:35 (1 row) - -select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-366666666666666666666666666666666666666', 10, 8); +select conv('-366666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-266666666666666666666666666666666666666', 10, 8); +select conv('-266666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-170141183460469231731687303715884105729',10,8); +select conv('-170141183460469231731687303715884105729',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-170141183460469231731687303715884105728',10,8); +select conv('-170141183460469231731687303715884105728',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-170141183460469231731687303715884105727',10,8); +select conv('-170141183460469231731687303715884105727',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-18446744073709551617',10,8); +select conv('-18446744073709551617',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-18446744073709551616',10,8); +select conv('-18446744073709551616',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('-18446744073709551615',10,8); +select conv('-18446744073709551615',10,8); conv ------ 1 (1 row) -select conv('-9223372036854775809',10,8); +select conv('-9223372036854775809',10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv('-9223372036854775808',10,8); +select conv('-9223372036854775808',10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv('-9223372036854775807',10,8); +select conv('-9223372036854775807',10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv('-123456',10,8); +select conv('-123456',10,8); conv ------------------------ 1777777777777777416700 (1 row) -select conv('-1',10,8); +select conv('-1',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('0',10,8); +select conv('0',10,8); conv ------ 0 (1 row) -select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('366666666666666666666666666666666666666', 10, 8); +select conv('366666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('266666666666666666666666666666666666666', 10, 8); +select conv('266666666666666666666666666666666666666', 10, 8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('170141183460469231731687303715884105729',10,8); +select conv('170141183460469231731687303715884105729',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('170141183460469231731687303715884105728',10,8); +select conv('170141183460469231731687303715884105728',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('170141183460469231731687303715884105727',10,8); +select conv('170141183460469231731687303715884105727',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('18446744073709551617',10,8); +select conv('18446744073709551617',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('18446744073709551616',10,8); +select conv('18446744073709551616',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('18446744073709551615',10,8); +select conv('18446744073709551615',10,8); conv ------------------------ 1777777777777777777777 (1 row) -select conv('9223372036854775809',10,8); +select conv('9223372036854775809',10,8); conv ------------------------ 1000000000000000000001 (1 row) -select conv('9223372036854775808',10,8); +select conv('9223372036854775808',10,8); conv ------------------------ 1000000000000000000000 (1 row) -select conv('9223372036854775807',10,8); +select conv('9223372036854775807',10,8); conv ----------------------- 777777777777777777777 (1 row) -select conv('123456',10,8); +select conv('123456',10,8); conv -------- 361100 (1 row) -select conv('1',10,8); +select conv('1',10,8); conv ------ 1 (1 row) - -select ''::bit; +select conv('10', 8, 10); + conv +------ + 8 +(1 row) + +select conv('180', 8, 10); + conv +------ + 1 +(1 row) + +select conv('910', 8, 10); + conv +------ + 0 +(1 row) + +select conv('B1', 8, 10); + conv +------ + 0 +(1 row) + +select conv('B1', 16, 10); + conv +------ + 177 +(1 row) + +select ''::bit; bit ----- 0 (1 row) -select ''::bit(10); +select ''::bit(10); bit ------------ 0000000000 (1 row) -select ''::bit(64); +select ''::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000000 (1 row) - -set dolphin.b_compatibility_mode to on; - -select ''::bit; +set dolphin.b_compatibility_mode to on; +select ''::bit; bit ----- 0 (1 row) -select ''::bit(10); +select ''::bit(10); bit ------------ 0000000000 (1 row) -select ''::bit(64); +select ''::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000000 (1 row) - -select 8385959::char(30)::time; +select 8385959::char(30)::time; time ----------- 838:59:59 (1 row) -select -8385958.999999::char(30)::time(6); +select -8385958.999999::char(30)::time(6); ?column? ------------ -838:59:59 (1 row) -select -8385959::varchar(30)::time; +select -8385959::varchar(30)::time; ?column? ------------ -838:59:59 (1 row) -select 8385958.999999::varchar(30)::time(6); +select 8385958.999999::varchar(30)::time(6); time ----------- 838:59:59 (1 row) - -select '838:59:59'::char(30)::time; +select '838:59:59'::char(30)::time; time ----------- 838:59:59 (1 row) -select '-838:59:58.999999'::char(30)::time(6); +select '-838:59:58.999999'::char(30)::time(6); time ------------ -838:59:59 (1 row) -select '-838:59:59'::varchar(30)::time; +select '-838:59:59'::varchar(30)::time; time ------------ -838:59:59 (1 row) -select '838:59:58.999999'::varchar(30)::time(6); +select '838:59:58.999999'::varchar(30)::time(6); time ----------- 838:59:59 (1 row) - -SELECT '20220101121212'::date; +SELECT '20220101121212'::date; date ------------ 2022-01-01 (1 row) -SELECT '20220101121212.5'::date; +SELECT '20220101121212.5'::date; date ------------ 2022-01-01 (1 row) - -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); WARNING: bigint unsigned out of range CONTEXT: referenced column: uint8 uint8 @@ -631,7 +647,7 @@ CONTEXT: referenced column: uint8 18446744073709551615 (1 row) -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); WARNING: bigint out of range CONTEXT: referenced column: int8 WARNING: bigint out of range @@ -641,13 +657,13 @@ CONTEXT: referenced column: int8 -1 (1 row) -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); uint8 ---------------------- 18446744073709551615 (1 row) -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); WARNING: bigint out of range CONTEXT: referenced column: int8 int8 @@ -655,19 +671,19 @@ CONTEXT: referenced column: int8 -1 (1 row) -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); uint8 --------------------- 9223372036854775807 (1 row) -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); int8 --------------------- 9223372036854775807 (1 row) -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); WARNING: bigint unsigned out of range CONTEXT: referenced column: uint8 uint8 @@ -675,7 +691,7 @@ CONTEXT: referenced column: uint8 18446744073709551615 (1 row) -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); WARNING: bigint out of range CONTEXT: referenced column: int8 WARNING: bigint out of range @@ -685,85 +701,81 @@ CONTEXT: referenced column: int8 -1 (1 row) - -select hex('9999-12-31 23:59:59'::datetime::bit(64)); +select hex('9999-12-31 23:59:59'::datetime::bit(64)); hex ------------------ 00005af105d18777 (1 row) -select hex('99991231235959'::datetime::bit(64)); +select hex('99991231235959'::datetime::bit(64)); hex ------------------ 00005af105d18777 (1 row) - -select 'true'::bool::bit; +select 'true'::bool::bit; bit ----- 1 (1 row) -select 'true'::bool::bit(10); +select 'true'::bool::bit(10); bit ------------ 0000000001 (1 row) -select 'true'::bool::bit(64); +select 'true'::bool::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000001 (1 row) -select 'true'::bool::float4; +select 'true'::bool::float4; float4 -------- 1 (1 row) -select 'true'::bool::float8; +select 'true'::bool::float8; float8 -------- 1 (1 row) - -select 'false'::bool::bit; +select 'false'::bool::bit; bit ----- 0 (1 row) -select 'false'::bool::bit(10); +select 'false'::bool::bit(10); bit ------------ 0000000000 (1 row) -select 'false'::bool::bit(64); +select 'false'::bool::bit(64); bit ------------------------------------------------------------------ 0000000000000000000000000000000000000000000000000000000000000000 (1 row) -select 'false'::bool::float4; +select 'false'::bool::float4; float4 -------- 0 (1 row) -select 'false'::bool::float8; +select 'false'::bool::float8; float8 -------- 0 (1 row) - -create table test_date(a date); -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; +create table test_date(a date); +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -771,10 +783,10 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(0); +insert into test_date values(0); ERROR: Out of range value for date CONTEXT: referenced column: a -select 1::date; +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -782,18 +794,18 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); ERROR: Out of range value for date CONTEXT: referenced column: a -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; date ------------ 0000-00-00 (1 row) -insert into test_date values(0); -select 1::date; +insert into test_date values(0); +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -801,11 +813,11 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); ERROR: Out of range value for date CONTEXT: referenced column: a -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -813,10 +825,10 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(0); +insert into test_date values(0); WARNING: Out of range value for date CONTEXT: referenced column: a -select 1::date; +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -824,18 +836,18 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); WARNING: Out of range value for date CONTEXT: referenced column: a -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; date ------------ 0000-00-00 (1 row) -insert into test_date values(0); -select 1::date; +insert into test_date values(0); +select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date date @@ -843,11 +855,11 @@ CONTEXT: referenced column: date 0000-00-00 (1 row) -insert into test_date values(1); +insert into test_date values(1); WARNING: Out of range value for date CONTEXT: referenced column: a -reset dolphin.sql_mode; -select * from test_date; +reset dolphin.sql_mode; +select * from test_date; a ------------ 0000-00-00 @@ -857,7 +869,106 @@ select * from test_date; 0000-00-00 (5 rows) - -drop schema conv_cast_test cascade; +select b'11100000111000'; + ?column? +---------------- + 11100000111000 +(1 row) + +select conv(b'11100000111000', 10, 8); + conv +------- + 34070 +(1 row) + +select conv(b'11100000111000', 20, 8); + conv +------- + 34070 +(1 row) + +select conv(b'11100000111000'::int8, 20, 8); + conv +-------- + 571546 +(1 row) + +select x'4c'; + ?column? +---------- + 01001100 +(1 row) + +select conv(x'4c', 10, 8); + conv +------ + 114 +(1 row) + +select conv(x'4c', 30, 8); + conv +------ + 114 +(1 row) + +select conv(x'4c'::int8, 30, 8); + conv +------ + 330 +(1 row) + +set dolphin.sql_mode = treat_bxconst_as_binary; +select b'11100000111000'; + ?column? +---------- + \x3838 +(1 row) + +select conv(b'11100000111000', 10, 8); + conv +------ + 130 +(1 row) + +select conv(b'11100000111000', 20, 8); + conv +------ + 250 +(1 row) + +select conv(b'11100000111000'::int8, 20, 8); + conv +------ + 250 +(1 row) + +select x'4c'; + ?column? +---------- + \x4c +(1 row) + +select conv(x'4c', 10, 8); + conv +------ + 0 +(1 row) + +select conv(x'4c', 30, 8); + conv +------ + 25 +(1 row) + +select conv(x'4c'::int8, 30, 8); +WARNING: Truncated incorrect INTEGER value: 'L' +CONTEXT: referenced column: conv + conv +------ + 0 +(1 row) + +reset dolphin.sql_mode; +drop schema conv_cast_test cascade; NOTICE: drop cascades to table test_date -reset current_schema; +reset current_schema; diff --git a/contrib/dolphin/include/plugin_commands/mysqlmode.h b/contrib/dolphin/include/plugin_commands/mysqlmode.h index 259d6a932..39f897946 100644 --- a/contrib/dolphin/include/plugin_commands/mysqlmode.h +++ b/contrib/dolphin/include/plugin_commands/mysqlmode.h @@ -20,7 +20,8 @@ #define OPT_SQL_MODE_BLOCK_RETURN_MULTI_RESULTS (1 << 7) #define OPT_SQL_MODE_ATUO_RECOMPILE_FUNCTION (1 << 8) #define OPT_SQL_MODE_ERROR_FOR_DIVISION_BY_ZERO (1 << 9) -#define OPT_SQL_MODE_MAX 10 +#define OPT_SQL_MODE_TREAT_BXCONST_AS_BINARY (1<<10) +#define OPT_SQL_MODE_MAX 11 #define SQL_MODE_STRICT() ((GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_STRICT) && !CMD_TAG_IS_SELECT()) #define SQL_MODE_STRICT_ON_SELECT() ((GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_STRICT) && CMD_TAG_IS_SELECT()) #define SQL_MODE_NOT_STRICT_ON_INSERT() \ @@ -37,6 +38,7 @@ #define SQL_MODE_ATUO_RECOMPILE_FUNCTION() (GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_ATUO_RECOMPILE_FUNCTION) #define SQL_MODE_ERROR_FOR_DIVISION_BY_ZERO() (GetSessionContext()->sqlModeFlags & \ OPT_SQL_MODE_ERROR_FOR_DIVISION_BY_ZERO) +#define SQL_MODE_TREAT_BXCONST_AS_BINARY() (GetSessionContext()->sqlModeFlags & OPT_SQL_MODE_TREAT_BXCONST_AS_BINARY) extern int32 PgAtoiInternal(char* s, int size, int c, bool sqlModeStrict, bool can_ignore, bool isUnsigned = false); extern void CheckSpaceAndDotInternal(char& digitAfterDot, const char** ptr, diff --git a/contrib/dolphin/include/plugin_utils/varlena.h b/contrib/dolphin/include/plugin_utils/varlena.h new file mode 100644 index 000000000..43fde2d48 --- /dev/null +++ b/contrib/dolphin/include/plugin_utils/varlena.h @@ -0,0 +1,11 @@ +#ifndef VARLENA_H +#define VARLENA_H + +#include "plugin_postgres.h" +#include "fmgr.h" + +#ifdef DOLPHIN +extern Datum bit_blob(VarBit* input); +#endif + +#endif \ No newline at end of file diff --git a/contrib/dolphin/plugin_parser/parse_node.cpp b/contrib/dolphin/plugin_parser/parse_node.cpp index 2e3731c3c..cbd757c23 100644 --- a/contrib/dolphin/plugin_parser/parse_node.cpp +++ b/contrib/dolphin/plugin_parser/parse_node.cpp @@ -31,7 +31,10 @@ #include "utils/syscache.h" #ifdef DOLPHIN #include "plugin_utils/varbit.h" +#include "plugin_commands/mysqlmode.h" +#include "plugin_utils/varlena.h" extern "C" Datum bit_bin_in(PG_FUNCTION_ARGS); +extern "C" Datum bittobinary(PG_FUNCTION_ARGS); #else #include "utils/varbit.h" #endif @@ -540,12 +543,18 @@ Const* make_const(ParseState* pstate, Value* value, int location) #ifdef DOLPHIN val = DirectFunctionCall3( bit_bin_in, CStringGetDatum(strVal(value)), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (SQL_MODE_TREAT_BXCONST_AS_BINARY()) { + val = bit_blob(DatumGetVarBitP(val)); + typid = BINARYOID; + } else { + typid = BITOID; + } #else val = DirectFunctionCall3( bit_in, CStringGetDatum(strVal(value)), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + typid = BITOID; #endif cancel_parser_errposition_callback(&pcbstate); - typid = BITOID; typelen = -1; typebyval = false; break; diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index 3f72ec041..e0a6ec9f9 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -101,7 +101,8 @@ static const struct sql_mode_entry sql_mode_options[OPT_SQL_MODE_MAX] = { {"pad_char_to_full_length", OPT_SQL_MODE_PAD_CHAR_TO_FULL_LENGTH}, {"block_return_multi_results", OPT_SQL_MODE_BLOCK_RETURN_MULTI_RESULTS}, {"auto_recompile_function", OPT_SQL_MODE_ATUO_RECOMPILE_FUNCTION}, - {"error_for_division_by_zero", OPT_SQL_MODE_ERROR_FOR_DIVISION_BY_ZERO} + {"error_for_division_by_zero", OPT_SQL_MODE_ERROR_FOR_DIVISION_BY_ZERO}, + {"treat_bxconst_as_binary", OPT_SQL_MODE_TREAT_BXCONST_AS_BINARY}, }; #define DOLPHIN_TYPES_NUM 12 diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index f08bb0341..6b552c663 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -21031,7 +21031,7 @@ static int str_to_int64(char *str, int len, int128 *result, int *from_base_s) } else { break; /*illegal character*/ } - if ((num > 9) && (from_base <= num)) { + if (from_base <= num) { break; /*param error*/ } sum_128 = sum_128 * from_base + num; diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 51e20b91a..99705d2ba 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -59,6 +59,7 @@ #include "plugin_utils/timestamp.h" #include "plugin_utils/date.h" #include "libpq/libpq-int.h" +#include "plugin_utils/varlena.h" #define BETWEEN_AND_ARGC 3 #define SUBSTR_WITH_LEN_OFFSET 2 diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 1dbbed42f..c907dd1fd 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -86,3 +86,14 @@ mediumblob CREATE OR REPLACE FUNCTION pg_catalog.longblob_rawout ( longblob ) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; + +-- Make the result of oct(bit) and conv(bit) identical to Mysql +DROP FUNCTION IF EXISTS pg_catalog.conv(bit, int4, int4) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.oct(bit); +CREATE OR REPLACE FUNCTION pg_catalog.oct(bit) RETURNS text AS +$$ +BEGIN + RETURN 0; +END; +$$ diff --git a/contrib/dolphin/sql/bxconst_test.sql b/contrib/dolphin/sql/bxconst_test.sql new file mode 100644 index 000000000..dfba4b06e --- /dev/null +++ b/contrib/dolphin/sql/bxconst_test.sql @@ -0,0 +1,49 @@ +create schema bxconst_test; +set current_schema to 'bxconst_test'; + +set dolphin.b_compatibility_mode to on; + +-- å°†bxconst当作bitå¤„ç† +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero; + +create table t_bit(a bit(16)); +create table t_bin(a binary(6)); + +select b'11100000111000'; +select pg_typeof(b'11100000111000'); +select x'4c'; +select pg_typeof(x'4c'); + +insert into t_bit values(b'11100000111000'), (x'4c'); +insert into t_bin values(b'11100000111000'), (x'4c'); + +select * from t_bit; +select * from t_bin; + +drop table t_bit; +drop table t_bin; + +-- å°†bxconst当作binaryå¤„ç† +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary; + +create table t_bit(a bit(16)); +create table t_bin(a binary(6)); + +select b'11100000111000'; +select pg_typeof(b'11100000111000'); +select x'4c'; +select pg_typeof(x'4c'); + +insert into t_bit values(b'11100000111000'), (x'4c'); +insert into t_bin values(b'11100000111000'), (x'4c'); + +select * from t_bit; +select * from t_bin; + +drop table t_bit; +drop table t_bin; + +reset dolphin.sql_mode; + +drop schema bxconst_test cascade; +reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/sql/conv_cast_test.sql b/contrib/dolphin/sql/conv_cast_test.sql index 22d9a00c3..1c468cf51 100755 --- a/contrib/dolphin/sql/conv_cast_test.sql +++ b/contrib/dolphin/sql/conv_cast_test.sql @@ -1,165 +1,193 @@ -create schema conv_cast_test; -set current_schema to 'conv_cast_test'; - -select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); -select conv(-366666666666666666666666666666666666666, 10, 8); -select conv(-266666666666666666666666666666666666666, 10, 8); -select conv(-170141183460469231731687303715884105729,10,8); -select conv(-170141183460469231731687303715884105728,10,8); -select conv(-170141183460469231731687303715884105727,10,8); -select conv(-18446744073709551617,10,8); -select conv(-18446744073709551616,10,8); -select conv(-18446744073709551615,10,8); -select conv(-9223372036854775809,10,8); -select conv(-9223372036854775808,10,8); -select conv(-9223372036854775807,10,8); -select conv(-123456,10,8); -select conv(-1,10,8); -select conv(0,10,8); -select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); -select conv(366666666666666666666666666666666666666, 10, 8); -select conv(266666666666666666666666666666666666666, 10, 8); -select conv(170141183460469231731687303715884105729,10,8); -select conv(170141183460469231731687303715884105728,10,8); -select conv(170141183460469231731687303715884105727,10,8); -select conv(18446744073709551617,10,8); -select conv(18446744073709551616,10,8); -select conv(18446744073709551615,10,8); -select conv(9223372036854775809,10,8); -select conv(9223372036854775808,10,8); -select conv(9223372036854775807,10,8); -select conv(123456,10,8); -select conv(1,10,8); - -select time'-2 34:25:59'::float4; -select time'-838:59:59'::float4; -select time'0'::float4; -select time'-0'::float4; -select time'-2 34:25:59'::float8; -select time'-838:59:59'::float8; -select time'0'::float8; -select time'-0'::float8; - -select 127::tinyint::bit(64)::tinyint; -select 32767::smallint::bit(64)::smallint; -select 2147483647::int::bit(64)::int; -select 9223372036854775807::bigint::bit(64)::bigint; - -select '255'::uint1::time; -select '65535'::uint2::time; -select '4294967295'::uint4::time; -select '18446744073709551615'::uint8::time; - -select '4294967295'::uint1::time; -select '4294967295'::uint2::time; -select '4294967295'::uint4::time; -select '4294967295'::uint8::time; - -select '4294967295'::int1::time; -select '4294967295'::int2::time; -select '4294967295'::int4::time; -select '4294967295'::int8::time; - -select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); -select conv('-366666666666666666666666666666666666666', 10, 8); -select conv('-266666666666666666666666666666666666666', 10, 8); -select conv('-170141183460469231731687303715884105729',10,8); -select conv('-170141183460469231731687303715884105728',10,8); -select conv('-170141183460469231731687303715884105727',10,8); -select conv('-18446744073709551617',10,8); -select conv('-18446744073709551616',10,8); -select conv('-18446744073709551615',10,8); -select conv('-9223372036854775809',10,8); -select conv('-9223372036854775808',10,8); -select conv('-9223372036854775807',10,8); -select conv('-123456',10,8); -select conv('-1',10,8); -select conv('0',10,8); -select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); -select conv('366666666666666666666666666666666666666', 10, 8); -select conv('266666666666666666666666666666666666666', 10, 8); -select conv('170141183460469231731687303715884105729',10,8); -select conv('170141183460469231731687303715884105728',10,8); -select conv('170141183460469231731687303715884105727',10,8); -select conv('18446744073709551617',10,8); -select conv('18446744073709551616',10,8); -select conv('18446744073709551615',10,8); -select conv('9223372036854775809',10,8); -select conv('9223372036854775808',10,8); -select conv('9223372036854775807',10,8); -select conv('123456',10,8); -select conv('1',10,8); - -select ''::bit; -select ''::bit(10); -select ''::bit(64); - -set dolphin.b_compatibility_mode to on; - -select ''::bit; -select ''::bit(10); -select ''::bit(64); - -select 8385959::char(30)::time; -select -8385958.999999::char(30)::time(6); -select -8385959::varchar(30)::time; -select 8385958.999999::varchar(30)::time(6); - -select '838:59:59'::char(30)::time; -select '-838:59:58.999999'::char(30)::time(6); -select '-838:59:59'::varchar(30)::time; -select '838:59:58.999999'::varchar(30)::time(6); - -SELECT '20220101121212'::date; -SELECT '20220101121212.5'::date; - -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); -select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); - -select hex('9999-12-31 23:59:59'::datetime::bit(64)); -select hex('99991231235959'::datetime::bit(64)); - -select 'true'::bool::bit; -select 'true'::bool::bit(10); -select 'true'::bool::bit(64); -select 'true'::bool::float4; -select 'true'::bool::float8; - -select 'false'::bool::bit; -select 'false'::bool::bit(10); -select 'false'::bool::bit(64); -select 'false'::bool::float4; -select 'false'::bool::float8; - -create table test_date(a date); -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; -select 0::date; -insert into test_date values(0); -select 1::date; -insert into test_date values(1); -reset dolphin.sql_mode; -select * from test_date; - -drop schema conv_cast_test cascade; -reset current_schema; +create schema conv_cast_test; +set current_schema to 'conv_cast_test'; + +select conv(-211111111111111111111111111111111111111111111111111111111177777,10,8); +select conv(-366666666666666666666666666666666666666, 10, 8); +select conv(-266666666666666666666666666666666666666, 10, 8); +select conv(-170141183460469231731687303715884105729,10,8); +select conv(-170141183460469231731687303715884105728,10,8); +select conv(-170141183460469231731687303715884105727,10,8); +select conv(-18446744073709551617,10,8); +select conv(-18446744073709551616,10,8); +select conv(-18446744073709551615,10,8); +select conv(-9223372036854775809,10,8); +select conv(-9223372036854775808,10,8); +select conv(-9223372036854775807,10,8); +select conv(-123456,10,8); +select conv(-1,10,8); +select conv(0,10,8); +select conv(211111111111111111111111111111111111111111111111111111111177777,10,8); +select conv(366666666666666666666666666666666666666, 10, 8); +select conv(266666666666666666666666666666666666666, 10, 8); +select conv(170141183460469231731687303715884105729,10,8); +select conv(170141183460469231731687303715884105728,10,8); +select conv(170141183460469231731687303715884105727,10,8); +select conv(18446744073709551617,10,8); +select conv(18446744073709551616,10,8); +select conv(18446744073709551615,10,8); +select conv(9223372036854775809,10,8); +select conv(9223372036854775808,10,8); +select conv(9223372036854775807,10,8); +select conv(123456,10,8); +select conv(1,10,8); + +select time'-2 34:25:59'::float4; +select time'-838:59:59'::float4; +select time'0'::float4; +select time'-0'::float4; +select time'-2 34:25:59'::float8; +select time'-838:59:59'::float8; +select time'0'::float8; +select time'-0'::float8; + +select 127::tinyint::bit(64)::tinyint; +select 32767::smallint::bit(64)::smallint; +select 2147483647::int::bit(64)::int; +select 9223372036854775807::bigint::bit(64)::bigint; + +select '255'::uint1::time; +select '65535'::uint2::time; +select '4294967295'::uint4::time; +select '18446744073709551615'::uint8::time; + +select '4294967295'::uint1::time; +select '4294967295'::uint2::time; +select '4294967295'::uint4::time; +select '4294967295'::uint8::time; + +select '4294967295'::int1::time; +select '4294967295'::int2::time; +select '4294967295'::int4::time; +select '4294967295'::int8::time; + +select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('-366666666666666666666666666666666666666', 10, 8); +select conv('-266666666666666666666666666666666666666', 10, 8); +select conv('-170141183460469231731687303715884105729',10,8); +select conv('-170141183460469231731687303715884105728',10,8); +select conv('-170141183460469231731687303715884105727',10,8); +select conv('-18446744073709551617',10,8); +select conv('-18446744073709551616',10,8); +select conv('-18446744073709551615',10,8); +select conv('-9223372036854775809',10,8); +select conv('-9223372036854775808',10,8); +select conv('-9223372036854775807',10,8); +select conv('-123456',10,8); +select conv('-1',10,8); +select conv('0',10,8); +select conv('211111111111111111111111111111111111111111111111111111111177777',10,8); +select conv('366666666666666666666666666666666666666', 10, 8); +select conv('266666666666666666666666666666666666666', 10, 8); +select conv('170141183460469231731687303715884105729',10,8); +select conv('170141183460469231731687303715884105728',10,8); +select conv('170141183460469231731687303715884105727',10,8); +select conv('18446744073709551617',10,8); +select conv('18446744073709551616',10,8); +select conv('18446744073709551615',10,8); +select conv('9223372036854775809',10,8); +select conv('9223372036854775808',10,8); +select conv('9223372036854775807',10,8); +select conv('123456',10,8); +select conv('1',10,8); + +select conv('10', 8, 10); +select conv('180', 8, 10); +select conv('910', 8, 10); +select conv('B1', 8, 10); +select conv('B1', 16, 10); + +select ''::bit; +select ''::bit(10); +select ''::bit(64); + +set dolphin.b_compatibility_mode to on; + +select ''::bit; +select ''::bit(10); +select ''::bit(64); + +select 8385959::char(30)::time; +select -8385958.999999::char(30)::time(6); +select -8385959::varchar(30)::time; +select 8385958.999999::varchar(30)::time(6); + +select '838:59:59'::char(30)::time; +select '-838:59:58.999999'::char(30)::time(6); +select '-838:59:59'::varchar(30)::time; +select '838:59:58.999999'::varchar(30)::time(6); + +SELECT '20220101121212'::date; +SELECT '20220101121212.5'::date; + +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'1111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'111111111111111111111111111111111111111111111111111111111111111' as signed); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as unsigned); +select cast(b'11111111111111111111111111111111111111111111111111111111111111111' as signed); + +select hex('9999-12-31 23:59:59'::datetime::bit(64)); +select hex('99991231235959'::datetime::bit(64)); + +select 'true'::bool::bit; +select 'true'::bool::bit(10); +select 'true'::bool::bit(64); +select 'true'::bool::float4; +select 'true'::bool::float8; + +select 'false'::bool::bit; +select 'false'::bool::bit(10); +select 'false'::bool::bit(64); +select 'false'::bool::float4; +select 'false'::bool::float8; + +create table test_date(a date); +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,pad_char_to_full_length,auto_recompile_function; +select 0::date; +insert into test_date values(0); +select 1::date; +insert into test_date values(1); +reset dolphin.sql_mode; +select * from test_date; + +select b'11100000111000'; +select conv(b'11100000111000', 10, 8); +select conv(b'11100000111000', 20, 8); +select conv(b'11100000111000'::int8, 20, 8); +select x'4c'; +select conv(x'4c', 10, 8); +select conv(x'4c', 30, 8); +select conv(x'4c'::int8, 30, 8); + +set dolphin.sql_mode = treat_bxconst_as_binary; + +select b'11100000111000'; +select conv(b'11100000111000', 10, 8); +select conv(b'11100000111000', 20, 8); +select conv(b'11100000111000'::int8, 20, 8); +select x'4c'; +select conv(x'4c', 10, 8); +select conv(x'4c', 30, 8); +select conv(x'4c'::int8, 30, 8); + +reset dolphin.sql_mode; + +drop schema conv_cast_test cascade; +reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 757bf33fe..a14048c14 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -155,3 +155,14 @@ mediumblob CREATE OR REPLACE FUNCTION pg_catalog.longblob_rawout ( longblob ) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_blob_rawout'; + +-- Make the result of oct(bit) and conv(bit) identical to Mysql +DROP FUNCTION IF EXISTS pg_catalog.conv(bit, int4, int4) CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.conv(bit, int4, int4) +RETURNS text AS $$ SELECT pg_catalog.conv($1::int8, 10, $3) $$ LANGUAGE SQL; + +DROP FUNCTION IF EXISTS pg_catalog.oct(bit); + +CREATE OR REPLACE FUNCTION pg_catalog.oct(t1 bit) +RETURNS text AS $$ SELECT pg_catalog.conv(t1, 10, 8) $$ LANGUAGE SQL; -- Gitee From 66b1686c59a8d46e1366ae642ca9748dd3c17273 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 6 Nov 2023 16:27:50 +0800 Subject: [PATCH 048/434] MySQL compatibility: 1. random_bytes(len)/rand(n) function 2. binary/ascii column attribute 3. 0x+interget input --- contrib/dolphin/expected/db_b_parser3.out | 311 +++++++++++++++++- .../dolphin/expected/test_charset_collate.out | 228 +++++++++++++ .../dolphin/include/plugin_parser/kwlist.h | 3 + .../include/plugin_parser/parse_type.h | 3 + contrib/dolphin/plugin_parser/gram.y | 92 +++++- contrib/dolphin/plugin_parser/parse_type.cpp | 67 +++- contrib/dolphin/plugin_parser/scan.l | 21 ++ contrib/dolphin/plugin_utils/adt/varlena.cpp | 49 +++ .../rollback_script/dolphin--3.0--2.0.sql | 22 ++ contrib/dolphin/sql/db_b_parser3.sql | 146 +++++++- contrib/dolphin/sql/test_charset_collate.sql | 51 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 23 ++ 12 files changed, 995 insertions(+), 21 deletions(-) diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index 9b3eac74f..c56472890 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -27,10 +27,313 @@ select upper('ABc'), upper('哈哈'), upper('123456'),upper('哈市&%%ï¿¥#'),upp ABC | 哈哈 | 123456 | 哈市&%%ï¿¥# | (1 row) ---测试点四:验è¯randå’Œrandom函数 --- select rand(), random();--䏿˜¾ç¤ºå°æ•°ç‚¹å‰çš„0 --- set behavior_compat_options := 'display_leading_zero'; --- select rand(), random();--æ˜¾ç¤ºå°æ•°ç‚¹å‰çš„0 +--测试点四:验è¯randå’Œrandom_bytes函数 +select rand(18446744073709551615) = rand(18446744073709551616); +WARNING: Truncated incorrect DECIMAL value + ?column? +---------- + t +(1 row) + +select rand(-9223372036854775808) = rand(-9223372036854775809); +WARNING: Truncated incorrect DECIMAL value + ?column? +---------- + t +(1 row) + +select rand(0) = rand(4294967296); + ?column? +---------- + t +(1 row) + +select rand(1) = rand(4294967297); + ?column? +---------- + t +(1 row) + +select rand(0) = rand(null); + ?column? +---------- + t +(1 row) + +select random_bytes(null); + random_bytes +-------------- + +(1 row) + +select random_bytes(0); +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: random_bytes +select random_bytes(1); + random_bytes +-------------- +--?.* +(1 row) + +select random_bytes(1024); + random_bytes +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +--?.* +(1 row) + +select random_bytes(1025); +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: random_bytes +set dolphin.b_compatibility_mode to on; +CREATE TABLE test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table values(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,b'1', b'111', true,'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023','1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a','a', 'a,c',json_object('a', 1, 'b', 2)); +select +rand(`int1`)=rand(cast(`int1` as signed)), +rand(`uint1`)=rand(cast(`uint1` as signed)), +rand(`int2`)=rand(cast(`int2` as signed)), +rand(`uint2`)=rand(cast(`uint2` as signed)), +rand(`int4`)=rand(cast(`int4` as signed)), +rand(`uint4`)=rand(cast(`uint4` as signed)), +rand(`int8`)=rand(cast(`int8` as signed)), +rand(`uint8`)=rand(cast(`uint8` as signed)), +rand(`float4`)=rand(cast(`float4` as signed)), +rand(`float8`)=rand(cast(`float8` as signed)), +rand(`numeric`)=rand(cast(`numeric` as signed)), +rand(`bit1`)=rand(cast(`bit1` as signed)), +rand(`bit64`)=rand(cast(`bit64` as signed)), +rand(`boolean`)=rand(cast(`boolean` as signed)), +rand(`date`)=rand(cast(`date` as signed)), +rand(`time`)=rand(cast(`time` as signed)), +rand(`time(4)`)=rand(cast(`time(4)` as signed)), +rand(`datetime`)=rand(cast(`datetime` as signed)), +rand(`datetime(4)`)=rand(cast(`datetime(4)` as signed)), +rand(`timestamp`)=rand(cast(`timestamp` as signed)), +rand(`timestamp(4)`)=rand(cast(`timestamp(4)` as signed)), +rand(`year`)=rand(cast(`year` as signed)), +rand(`char`)=rand(cast(`char` as signed)), +rand(`varchar`)=rand(cast(`varchar` as signed)), +rand(`binary`)=rand(cast(`binary` as signed)), +rand(`varbinary`)=rand(cast(`varbinary` as signed)), +rand(`tinyblob`)=rand(cast(`tinyblob` as signed)), +rand(`blob`)=rand(cast(`blob` as signed)), +rand(`mediumblob`)=rand(cast(`mediumblob` as signed)), +rand(`longblob`)=rand(cast(`longblob` as signed)), +rand(`text`)=rand(cast(`text` as signed)), +rand(`enum_t`)=rand(cast(`enum_t` as signed)), +rand(`set_t`)=rand(cast(`set_t` as signed)), +rand(`json`)=rand(cast(`json` as signed)) +from test_type_table; +WARNING: invalid input syntax for type int unsigned: "1.23a " +WARNING: invalid input syntax for type int16: "1.23a " +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int unsigned: "1.23a" +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' +WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' + ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? +----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+---------- + t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t | t +(1 row) + +select +length(random_bytes(`int1`)::binary), +length(random_bytes(`uint1`)::binary), +length(random_bytes(`int2`)::binary), +length(random_bytes(`uint2`)::binary), +length(random_bytes(`int4`)::binary), +length(random_bytes(`uint4`)::binary), +length(random_bytes(`int8`)::binary), +length(random_bytes(`uint8`)::binary), +length(random_bytes(`float4`)::binary), +length(random_bytes(`float8`)::binary), +length(random_bytes(`numeric`)::binary), +length(random_bytes(`bit1`)::binary), +length(random_bytes(`bit64`)::binary), +length(random_bytes(`boolean`)::binary), +length(random_bytes(`char`)::binary), +length(random_bytes(`varchar`)::binary), +length(random_bytes(`binary`)::binary), +length(random_bytes(`varbinary`)::binary), +length(random_bytes(`tinyblob`)::binary), +length(random_bytes(`blob`)::binary), +length(random_bytes(`mediumblob`)::binary), +length(random_bytes(`longblob`)::binary), +length(random_bytes(`text`)::binary), +length(random_bytes(`enum_t`)::binary), +length(random_bytes(`set_t`)::binary) +from test_type_table; +WARNING: invalid input syntax for type integer: "1.23a " +CONTEXT: referenced column: length +WARNING: invalid input syntax for type integer: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: length +WARNING: invalid input syntax for type integer: "1.23a" +CONTEXT: referenced column: length + length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+-------- + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 7 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 5 +(1 row) + +--error, cause value out of range +select length(random_bytes(`date`)::binary) from test_type_table; +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`time`)::binary) from test_type_table; +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`time(4)`)::binary) from test_type_table; +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`datetime`)::binary) from test_type_table; +WARNING: integer out of range +CONTEXT: referenced column: length +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`datetime(4)`)::binary) from test_type_table; +WARNING: integer out of range +CONTEXT: referenced column: length +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`timestamp`)::binary) from test_type_table; +WARNING: integer out of range +CONTEXT: referenced column: length +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`timestamp(4)`)::binary) from test_type_table; +WARNING: integer out of range +CONTEXT: referenced column: length +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`year`)::binary)from test_type_table; +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +select length(random_bytes(`json`)::binary)from test_type_table; +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: length +ERROR: length value is out of range in 'random_bytes' +CONTEXT: referenced column: length +drop table test_type_table; +reset dolphin.b_compatibility_mode; +--hex number +select 0x123 = x'123'; + ?column? +---------- + t +(1 row) + +select 0x123; + ?column? +-------------- + 000100100011 +(1 row) + +select 0xfe = x'FE'; + ?column? +---------- + t +(1 row) + +select 0xFE = x'fe'; + ?column? +---------- + t +(1 row) + +select 0xG123; --equal to select 0 xG123; + xG123 +------- + 0 +(1 row) + +select 0x123G; --equal to select 0x123 G; + G +-------------- + 000100100011 +(1 row) + +select 0x; --equal to select 0 x; + x +--- + 0 +(1 row) + --测试点五:验è¯truncate函数 select truncate(111.28);--返回111 ERROR: function truncate(numeric) does not exist diff --git a/contrib/dolphin/expected/test_charset_collate.out b/contrib/dolphin/expected/test_charset_collate.out index 25da489a9..cb273e81d 100644 --- a/contrib/dolphin/expected/test_charset_collate.out +++ b/contrib/dolphin/expected/test_charset_collate.out @@ -236,6 +236,234 @@ select pg_get_tabledef('test_binary2'); WITH (orientation=row, compression=no); (1 row) +--binary/ascii attribute +create schema bin_schema_test character set = utf8 collate utf8_unicode_ci; +use bin_schema_test; +set b_format_behavior_compat_options='enable_multi_charset'; -- use to enable different table charset with schema +create table t_bin(a text binary); +\d t_bin + Table "bin_schema_test.t_bin" + Column | Type | Modifiers +--------+------+------------------------------------- + a | text | character set UTF8 collate utf8_bin + +create table t_bin2(a text character set gbk binary); +\d t_bin2 + Table "bin_schema_test.t_bin2" + Column | Type | Modifiers +--------+------+----------------------------------- + a | text | character set GBK collate gbk_bin + +create table t_bin3(a text binary) character set gb18030; +\d t_bin3 + Table "bin_schema_test.t_bin3" + Column | Type | Modifiers +--------+------+------------------------------------------- + a | text | character set GB18030 collate gb18030_bin + +-- binary has high proirity than collate clause +create table t_bin4(a text binary collate gb18030_chinese_ci); +\d t_bin4 + Table "bin_schema_test.t_bin4" + Column | Type | Modifiers +--------+------+------------------------------------------- + a | text | character set GB18030 collate gb18030_bin + +create table t_bin5(a text binary character set gbk) character set gb18030; +\d t_bin5 + Table "bin_schema_test.t_bin5" + Column | Type | Modifiers +--------+------+----------------------------------- + a | text | character set GBK collate gbk_bin + +-- ascii +create table t_bin6(a text ascii collate 'af_ZA.iso88591', b text charset binary binary); +show create table t_bin6; + Table | Create Table +--------+------------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "LATIN1" COLLATE "af_ZA.iso88591",+ + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary" + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +--alter table +alter table t_bin6 add column c text binary; +show create table t_bin6; + Table | Create Table +--------+------------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "LATIN1" COLLATE "af_ZA.iso88591",+ + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary", + + | c text CHARACTER SET "UTF8" COLLATE utf8_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 add column d text character set gbk binary; +show create table t_bin6; + Table | Create Table +--------+------------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "LATIN1" COLLATE "af_ZA.iso88591",+ + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary", + + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 add column e text binary character set gbk; +show create table t_bin6; + Table | Create Table +--------+------------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "LATIN1" COLLATE "af_ZA.iso88591",+ + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary", + + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 modify column a text binary; +show create table t_bin6; + Table | Create Table +--------+-------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary",+ + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 modify column a text character set gbk binary; +show create table t_bin6; + Table | Create Table +--------+-------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "GBK" COLLATE gbk_bin, + + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary",+ + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 modify column a text binary character set gb18030; +show create table t_bin6; + Table | Create Table +--------+--------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "GB18030" COLLATE gb18030_bin,+ + | b blob CHARACTER SET "SQL_ASCII" COLLATE "binary", + + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 change column b b_new text binary; +show create table t_bin6; + Table | Create Table +--------+--------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "GB18030" COLLATE gb18030_bin,+ + | b_new text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 change column b_new b_new2 text charset gb18030 binary; +show create table t_bin6; + Table | Create Table +--------+-------------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "GB18030" COLLATE gb18030_bin, + + | b_new2 text CHARACTER SET "GB18030" COLLATE gb18030_bin,+ + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +alter table t_bin6 change column b_new2 b_new3 text binary charset gbk; +show create table t_bin6; + Table | Create Table +--------+--------------------------------------------------------- + t_bin6 | SET search_path = bin_schema_test; + + | CREATE TABLE t_bin6 ( + + | a text CHARACTER SET "GB18030" COLLATE gb18030_bin,+ + | b_new3 text CHARACTER SET "GBK" COLLATE gbk_bin, + + | c text CHARACTER SET "UTF8" COLLATE utf8_bin, + + | d text CHARACTER SET "GBK" COLLATE gbk_bin, + + | e text CHARACTER SET "GBK" COLLATE gbk_bin + + | ) + + | CHARACTER SET = "UTF8" COLLATE = "utf8_unicode_ci" + + | WITH (orientation=row, compression=no); +(1 row) + +-- ascii, grammar is ok, but execute report error +create table t_bin7(a text binary ascii); +ERROR: default collation for encoding "LATIN1" does not exist +create table t_bin8(a text ascii binary); +ERROR: default collation for encoding "LATIN1" does not exist +create table t_bin9(a text ascii); +ERROR: default collation for encoding "LATIN1" does not exist +-- wrong grammar +create table t_bin10(a text collate gb18030_chinese_ci binary); +ERROR: syntax error at or near "binary" +LINE 1: ...ate table t_bin10(a text collate gb18030_chinese_ci binary); + ^ +create table t_bin11(a text collate gb18030_chinese_ci ascii); +ERROR: syntax error at or near "ascii" +LINE 1: ...eate table t_bin11(a text collate gb18030_chinese_ci ascii); + ^ +create table t_bin12(a text character set gbk ascii); +ERROR: syntax error at or near "ascii" +LINE 1: create table t_bin12(a text character set gbk ascii); + ^ +create table t_bin13(a text ascii character set gbk); +ERROR: syntax error at or near "character" +LINE 1: create table t_bin13(a text ascii character set gbk); + ^ +drop schema bin_schema_test cascade; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to table t_bin +drop cascades to table t_bin2 +drop cascades to table t_bin3 +drop cascades to table t_bin4 +drop cascades to table t_bin5 +drop cascades to table t_bin6 drop schema test_charset cascade; NOTICE: drop cascades to 6 other objects DETAIL: drop cascades to table test_charset.test_collate0 diff --git a/contrib/dolphin/include/plugin_parser/kwlist.h b/contrib/dolphin/include/plugin_parser/kwlist.h index 9041ed2ad..51b598ae1 100644 --- a/contrib/dolphin/include/plugin_parser/kwlist.h +++ b/contrib/dolphin/include/plugin_parser/kwlist.h @@ -54,6 +54,9 @@ PG_KEYWORD("archive", ARCHIVE, UNRESERVED_KEYWORD) PG_KEYWORD("array", ARRAY, RESERVED_KEYWORD) PG_KEYWORD("as", AS, RESERVED_KEYWORD) PG_KEYWORD("asc", ASC, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("ascii", ASCII, UNRESERVED_KEYWORD) +#endif PG_KEYWORD("assertion", ASSERTION, UNRESERVED_KEYWORD) PG_KEYWORD("assignment", ASSIGNMENT, UNRESERVED_KEYWORD) #ifdef DOLPHIN diff --git a/contrib/dolphin/include/plugin_parser/parse_type.h b/contrib/dolphin/include/plugin_parser/parse_type.h index 6f208c86b..64c3a023d 100644 --- a/contrib/dolphin/include/plugin_parser/parse_type.h +++ b/contrib/dolphin/include/plugin_parser/parse_type.h @@ -18,6 +18,9 @@ #include "plugin_parser/parse_node.h" #include "plugin_postgres.h" +#ifdef DOLPHIN +#define COL_BINARY_ATTR "_bin" +#endif typedef HeapTuple Type; extern Type LookupTypeNameSupportUndef(ParseState *pstate, const TypeName *typeName, diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 482ec26c5..d69e97ab2 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -336,6 +336,11 @@ typedef struct DolphinString bool is_sconst; } DolphinString; +typedef struct TypeAttr { + int charset; + bool binary; +} TypeAttr; + /* ConstraintAttributeSpec yields an integer bitmask of these flags: */ #define CAS_NOT_DEFERRABLE 0x01 #define CAS_DEFERRABLE 0x02 @@ -590,6 +595,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); struct DolphinString *dolphinString; struct DolphinIdent *dolphinIdent; struct CondInfo* condinfo; + struct TypeAttr* typeattr; } %type CreateOption CreateIfNotExistsOption CreateAsOption %type CreateOptionList CreateIfNotExistsOptionList CreateAsOptionList @@ -940,11 +946,12 @@ static inline void ChangeBpcharCastType(TypeName* typname); %type selected_timezone %type character_set -%type charset opt_charset convert_charset default_charset +%type charset convert_charset default_charset %type collate opt_collate default_collate set_names_collate %type CharsetCollate charset_collate optCharsetCollate +%type opt_charset -%type opt_varying opt_timezone opt_no_inherit +%type opt_varying opt_timezone opt_no_inherit opt_bin_mode %type Iconst SignedIconst opt_partitions_num opt_subpartitions_num %type Sconst comment_text notify_payload DolphinColColId @@ -1156,7 +1163,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); /* ordinary key words in alphabetical order */ /* PGXC - added DISTRIBUTE, DIRECT, COORDINATOR, CLEAN, NODE, BARRIER, SLICE, DATANODE */ %token ABORT_P ABSOLUTE_P ACCESS ACCOUNT ACTION ADD_P ADMIN AFTER - AGGREGATE ALGORITHM ALL ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APP APPEND ARCHIVE ARRAY AS ASC + AGGREGATE ALGORITHM ALL ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APP APPEND ARCHIVE ARRAY AS ASC ASCII ASSERTION ASSIGNMENT ASYMMETRIC AT ATTRIBUTE AUDIT AUTHID AUTHORIZATION AUTOEXTEND AUTOEXTEND_SIZE AUTOMAPPED AUTO_INCREMENT AVG_ROW_LENGTH AGAINST BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_P BINARY_DOUBLE BINARY_INTEGER BIT BLANKS @@ -4760,11 +4767,14 @@ modify_column_cmd: n->def = (Node *) def; /* We only use these three fields of the ColumnDef node */ def->typname = $2; - def->typname->charset = $3; + def->typname->charset = $3->charset; def->collClause = NULL; def->raw_default = NULL; def->update_default = NULL; def->clientLogicColumnRef=NULL; + if ($3->binary) { + def->columnOptions = list_make1(makeString("binary")); + } $$ = (Node *)n; } else { #ifdef ENABLE_MULTIPLE_NODES @@ -4785,8 +4795,11 @@ modify_column_cmd: ColumnDef *def = makeNode(ColumnDef); def->colname = $1; def->typname = $2; - def->typname->charset = $3; + def->typname->charset = $3->charset; def->columnOptions = $5; + if ($3->binary) { + def->columnOptions = lappend(def->columnOptions, makeString("binary")); + } def->kvtype = ATT_KV_UNDEFINED; def->inhcount = 0; def->is_local = true; @@ -5696,8 +5709,11 @@ alter_table_cmd: ColumnDef *def = makeNode(ColumnDef); def->colname = $3; def->typname = $4; - def->typname->charset = $5; + def->typname->charset = $5->charset; def->columnOptions = $7; + if ($5->binary) { + def->columnOptions = lappend(def->columnOptions, makeString("binary")); + } def->kvtype = ATT_KV_UNDEFINED; def->inhcount = 0; def->is_local = true; @@ -5737,8 +5753,11 @@ alter_table_cmd: ColumnDef *def = makeNode(ColumnDef); def->colname = $4; def->typname = $5; - def->typname->charset = $6; + def->typname->charset = $6->charset; def->columnOptions = $8; + if ($6->binary) { + def->columnOptions = lappend(def->columnOptions, makeString("binary")); + } def->kvtype = ATT_KV_UNDEFINED; def->inhcount = 0; def->is_local = true; @@ -9895,7 +9914,7 @@ columnDefForTableElement: ColIdForTableElement Typename opt_charset KVType ColCm ColumnDef *n = makeNode(ColumnDef); n->colname = $1; n->typname = $2; - n->typname->charset = $3; + n->typname->charset = $3->charset; n->kvtype = $4; n->inhcount = 0; n->is_local = true; @@ -9916,6 +9935,9 @@ columnDefForTableElement: ColIdForTableElement Typename opt_charset KVType ColCm yyscanner); } n->columnOptions = $8; + if ($3->binary) { + n->columnOptions = lappend(n->columnOptions, makeString("binary")); + } $$ = (Node *)n; } ; @@ -9925,7 +9947,7 @@ columnDef: DolphinColColId Typename opt_charset KVType ColCmprsMode create_gener ColumnDef *n = makeNode(ColumnDef); n->colname = $1; n->typname = $2; - n->typname->charset = $3; + n->typname->charset = $3->charset; n->kvtype = $4; n->inhcount = 0; n->is_local = true; @@ -9946,6 +9968,9 @@ columnDef: DolphinColColId Typename opt_charset KVType ColCmprsMode create_gener yyscanner); } n->columnOptions = $8; + if ($3->binary) { + n->columnOptions = lappend(n->columnOptions, makeString("binary")); + } $$ = (Node *)n; } ; @@ -31846,14 +31871,56 @@ convert_charset: } ; +opt_bin_mode: + BINARY + { + $$ = true; + } + | /*EMPTY*/ { $$ = false; } + ; + opt_charset: - charset + charset opt_bin_mode { - $$ = $1; + TypeAttr *n = (TypeAttr*)palloc0(sizeof(TypeAttr)); + n->charset = $1; + n->binary = $2; + $$ = n; + } + | BINARY + { + TypeAttr *n = (TypeAttr*)palloc0(sizeof(TypeAttr)); + n->charset = PG_INVALID_ENCODING; + n->binary = true; + $$ = n; + } + | BINARY charset + { + TypeAttr *n = (TypeAttr*)palloc0(sizeof(TypeAttr)); + n->charset = $2; + n->binary = true; + $$ = n; + } + | ASCII opt_bin_mode + { + TypeAttr *n = (TypeAttr*)palloc0(sizeof(TypeAttr)); + n->charset = pg_valid_server_encoding("latin1"); + n->binary = $2; + $$ = n; + } + | BINARY ASCII + { + TypeAttr *n = (TypeAttr*)palloc0(sizeof(TypeAttr)); + n->charset = pg_valid_server_encoding("latin1"); + n->binary = true; + $$ = n; } | /*EMPTY*/ { - $$ = PG_INVALID_ENCODING; + TypeAttr *n = (TypeAttr*)palloc0(sizeof(TypeAttr)); + n->charset = PG_INVALID_ENCODING; + n->binary = false; + $$ = n; } ; @@ -37492,6 +37559,7 @@ unreserved_keyword_without_key: | APP | APPEND | ARCHIVE + | ASCII | ASSERTION | ASSIGNMENT | AT diff --git a/contrib/dolphin/plugin_parser/parse_type.cpp b/contrib/dolphin/plugin_parser/parse_type.cpp index d78849fce..926d8cecf 100644 --- a/contrib/dolphin/plugin_parser/parse_type.cpp +++ b/contrib/dolphin/plugin_parser/parse_type.cpp @@ -791,6 +791,64 @@ Oid LookupCollation(ParseState* pstate, List* collnames, int location) return colloid; } +#ifdef DOLPHIN +Oid transform_bin_collation(ColumnDef* coldef, Oid cur_col_oid) +{ + Oid result = InvalidOid; + bool has_binary = false; + List *columnOptions = coldef->columnOptions; + ListCell *ColumnOption = NULL; + foreach (ColumnOption, columnOptions) { + void *pointer = lfirst(ColumnOption); + if (IsA(pointer, String)) { + Value *v = (Value*)pointer; + if (strcmp(v->val.str, "binary") == 0) { + has_binary = true; + break; + } + } + } + if (!has_binary) { + return cur_col_oid; + } + + int column_charset; + /* + * get charset of this column: + * 1. if assign charset by coldef, use if + * 2. get charset by collation + * 3. get database's encoding + */ + if (PG_VALID_ENCODING(coldef->typname->charset)) { + column_charset = coldef->typname->charset; + } else if ((column_charset = get_charset_by_collation(cur_col_oid)) != PG_INVALID_ENCODING) { + /* noting to do, already set in if condition */ + } else { + column_charset = GetDatabaseEncoding(); + } + + /* if the charset is ascii, no need to change(a text charset binary binary) */ + if (column_charset != PG_SQL_ASCII) { + const char *encode_name = pg_encoding_to_char(column_charset); + if (encode_name[0] != 0) { + Size coll_name_len = strlen(encode_name) + strlen(COL_BINARY_ATTR) + 1; + char *coll_name = (char*)palloc(coll_name_len); + int ret = sprintf_s(coll_name, coll_name_len, "%s%s", encode_name, COL_BINARY_ATTR); + securec_check_ss(ret, "", ""); + List *coll_name_list = list_make2(makeString("pg_catalog"), makeString(pg_strtolower(coll_name))); + result = get_collation_oid(coll_name_list, true); + if (!OidIsValid(result)) { + ereport(WARNING, (errmsg("invalid collation name: %s, use default collation", coll_name))); + } + list_free(coll_name_list); + pfree(coll_name); + } + } + /* return origin col oid if get correspond binary collation failed */ + return OidIsValid(result) ? result : cur_col_oid; +} +#endif + Oid get_column_def_collation_b_format(ColumnDef* coldef, Oid typeOid, Oid typcollation, bool is_bin_type, Oid rel_coll_oid) { @@ -808,7 +866,11 @@ Oid get_column_def_collation_b_format(ColumnDef* coldef, Oid typeOid, Oid typcol return InvalidOid; } else if (OidIsValid(coldef->collOid)) { /* Precooked collation spec, use that */ +#ifdef DOLPHIN + return transform_bin_collation(coldef, coldef->collOid); +#else return coldef->collOid; +#endif } char* schemaname = NULL; @@ -834,6 +896,9 @@ Oid get_column_def_collation_b_format(ColumnDef* coldef, Oid typeOid, Oid typcol result = get_default_collation_by_charset(GetDatabaseEncoding()); } } +#ifdef DOLPHIN + result = transform_bin_collation(coldef, result); +#endif return result; } @@ -2027,4 +2092,4 @@ char* makeEnumTypeName(const char* relname, const char *colname, const char* sch return arr; } -#endif \ No newline at end of file +#endif diff --git a/contrib/dolphin/plugin_parser/scan.l b/contrib/dolphin/plugin_parser/scan.l index 603b3dbc6..c58d6b773 100644 --- a/contrib/dolphin/plugin_parser/scan.l +++ b/contrib/dolphin/plugin_parser/scan.l @@ -106,6 +106,9 @@ static char *litbufdup(core_yyscan_t yyscanner); static char *litbuf_udeescape(unsigned char escape, core_yyscan_t yyscanner); static unsigned char unescape_single_char(unsigned char c, core_yyscan_t yyscanner); static int process_integer_literal(const char *token, YYSTYPE *lval); +#ifdef DOLPHIN +static int process_hex_number(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); +#endif static bool is_utf16_surrogate_first(pg_wchar c); static bool is_utf16_surrogate_second(pg_wchar c); static pg_wchar surrogate_pair_to_codepoint(pg_wchar first, pg_wchar second); @@ -252,6 +255,8 @@ xbinside [^']* /* Hexadecimal number */ xhstart [xX]{quote} xhinside [^']* +/* Hexadecimal number, start with 0x */ +xhnumber 0x[0-9A-Fa-f]+ /* National character */ xnstart [nN]{quote} @@ -1201,6 +1206,12 @@ other . yyextra->is_hint_str = false; return PARAM; } +{xhnumber} { + SET_YYLLOC(); + yyextra->is_hint_str = false; + /* skip first '0', remain 'x' and other hex number */ + return process_hex_number(yytext + 1, yyleng - 1, yyscanner, yylval); + } {integer} { SET_YYLLOC(); yyextra->is_hint_str = false; @@ -1741,6 +1752,16 @@ litbufdup(core_yyscan_t yyscanner) return newm; } +#ifdef DOLPHIN +static int process_hex_number(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval) +{ + startlit(); + addlit(token, len, yyscanner); + lval->str = litbufdup(yyscanner); + return XCONST; +} +#endif + static int process_integer_literal(const char *token, YYSTYPE *lval) { diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 51e20b91a..e4a5ca100 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -22,6 +22,11 @@ #include #include +#ifdef DOLPHIN +#include +#include +#endif + #include "access/hash.h" #include "access/tuptoaster.h" #include "catalog/pg_collation.h" @@ -10866,4 +10871,48 @@ Datum uint8_xor_text(PG_FUNCTION_ARGS) PG_RETURN_UINT64(arg1 ^ arg2_int); } + +PG_FUNCTION_INFO_V1_PUBLIC(random_bytes); +extern "C" DLL_PUBLIC Datum random_bytes(PG_FUNCTION_ARGS); +#define MAX_RANDOM_BYTES_LEN 1024 +Datum random_bytes(PG_FUNCTION_ARGS) +{ + int len = PG_GETARG_INT32(0); + if (len <= 0 || len > MAX_RANDOM_BYTES_LEN) { + ereport(ERROR, (errmsg("length value is out of range in 'random_bytes'"))); + } + + bytea *res = (bytea*)palloc(VARHDRSZ + len); + SET_VARSIZE(res, VARHDRSZ + len); + int ret = RAND_bytes((unsigned char*)VARDATA_ANY(res), len); + if (ret != 1) { + ERR_clear_error(); + ereport(ERROR, (errmsg("RAND_bytes can't generate random bytes"))); + } + + PG_RETURN_BYTEA_P(res); +} + +PG_FUNCTION_INFO_V1_PUBLIC(rand_seed); +extern "C" DLL_PUBLIC Datum rand_seed(PG_FUNCTION_ARGS); +Datum rand_seed(PG_FUNCTION_ARGS) +{ + int128 n = PG_ARGISNULL(0) ? 0 : PG_GETARG_INT128(0); + int elevel = (!SQL_MODE_STRICT() || fcinfo->can_ignore) ? WARNING : ERROR; + + if (unlikely(n > PG_UINT64_MAX)) { + ereport(elevel, (errmsg("Truncated incorrect DECIMAL value"))); + n = PG_UINT64_MAX; + } else if (unlikely(n < PG_INT64_MIN)) { + ereport(elevel, (errmsg("Truncated incorrect DECIMAL value"))); + n = PG_INT64_MIN; + } + + gs_srandom((unsigned int)n); + float8 result; + /* result [0.0 - 1.0) */ + result = (double)gs_random() / ((double)MAX_RANDOM_VALUE + 1); + + PG_RETURN_FLOAT8(result); +} #endif diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 1dbbed42f..c61f14a97 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -75,6 +75,28 @@ drop CAST IF EXISTS (uint8 AS bit); CREATE CAST (uint4 AS bit) WITH FUNCTION bitfromuint4(uint4, int4); CREATE CAST (uint8 AS bit) WITH FUNCTION bitfromuint8(uint8, int4); +DROP FUNCTION IF EXISTS pg_catalog.rand(int16); +DROP FUNCTION IF EXISTS pg_catalog.rand(uint4); +DROP FUNCTION IF EXISTS pg_catalog.rand(timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.rand(date); +DROP FUNCTION IF EXISTS pg_catalog.rand(year); +DROP FUNCTION IF EXISTS pg_catalog.rand(binary); +DROP FUNCTION IF EXISTS pg_catalog.rand(blob); +DROP FUNCTION IF EXISTS pg_catalog.rand(anyenum); +DROP FUNCTION IF EXISTS pg_catalog.rand(anyset); +DROP FUNCTION IF EXISTS pg_catalog.rand(json); + +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(int4); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(bit); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(timestamp with time zone); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(date); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(year); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(binary); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(blob); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(anyenum); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(anyset); +DROP FUNCTION IF EXISTS pg_catalog.random_bytes(json); + CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_rawout ( tinyblob ) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; diff --git a/contrib/dolphin/sql/db_b_parser3.sql b/contrib/dolphin/sql/db_b_parser3.sql index 441380621..110201e9f 100644 --- a/contrib/dolphin/sql/db_b_parser3.sql +++ b/contrib/dolphin/sql/db_b_parser3.sql @@ -10,10 +10,148 @@ select lower('ABc'), lower('哈哈'), lower('123456'),lower('哈市&%%ï¿¥#'),low --测试点三:验è¯ucase函数 select ucase('ABc'), ucase('哈哈'), ucase('123456'),ucase('哈市&%%ï¿¥#'),ucase(null); select upper('ABc'), upper('哈哈'), upper('123456'),upper('哈市&%%ï¿¥#'),upper(null); ---测试点四:验è¯randå’Œrandom函数 --- select rand(), random();--䏿˜¾ç¤ºå°æ•°ç‚¹å‰çš„0 --- set behavior_compat_options := 'display_leading_zero'; --- select rand(), random();--æ˜¾ç¤ºå°æ•°ç‚¹å‰çš„0 +--测试点四:验è¯randå’Œrandom_bytes函数 +select rand(18446744073709551615) = rand(18446744073709551616); +select rand(-9223372036854775808) = rand(-9223372036854775809); +select rand(0) = rand(4294967296); +select rand(1) = rand(4294967297); +select rand(0) = rand(null); +select random_bytes(null); +select random_bytes(0); +select random_bytes(1); +select random_bytes(1024); +select random_bytes(1025); + +set dolphin.b_compatibility_mode to on; +CREATE TABLE test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); + +insert into test_type_table values(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,b'1', b'111', true,'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023','1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a','a', 'a,c',json_object('a', 1, 'b', 2)); +select +rand(`int1`)=rand(cast(`int1` as signed)), +rand(`uint1`)=rand(cast(`uint1` as signed)), +rand(`int2`)=rand(cast(`int2` as signed)), +rand(`uint2`)=rand(cast(`uint2` as signed)), +rand(`int4`)=rand(cast(`int4` as signed)), +rand(`uint4`)=rand(cast(`uint4` as signed)), +rand(`int8`)=rand(cast(`int8` as signed)), +rand(`uint8`)=rand(cast(`uint8` as signed)), +rand(`float4`)=rand(cast(`float4` as signed)), +rand(`float8`)=rand(cast(`float8` as signed)), +rand(`numeric`)=rand(cast(`numeric` as signed)), +rand(`bit1`)=rand(cast(`bit1` as signed)), +rand(`bit64`)=rand(cast(`bit64` as signed)), +rand(`boolean`)=rand(cast(`boolean` as signed)), +rand(`date`)=rand(cast(`date` as signed)), +rand(`time`)=rand(cast(`time` as signed)), +rand(`time(4)`)=rand(cast(`time(4)` as signed)), +rand(`datetime`)=rand(cast(`datetime` as signed)), +rand(`datetime(4)`)=rand(cast(`datetime(4)` as signed)), +rand(`timestamp`)=rand(cast(`timestamp` as signed)), +rand(`timestamp(4)`)=rand(cast(`timestamp(4)` as signed)), +rand(`year`)=rand(cast(`year` as signed)), +rand(`char`)=rand(cast(`char` as signed)), +rand(`varchar`)=rand(cast(`varchar` as signed)), +rand(`binary`)=rand(cast(`binary` as signed)), +rand(`varbinary`)=rand(cast(`varbinary` as signed)), +rand(`tinyblob`)=rand(cast(`tinyblob` as signed)), +rand(`blob`)=rand(cast(`blob` as signed)), +rand(`mediumblob`)=rand(cast(`mediumblob` as signed)), +rand(`longblob`)=rand(cast(`longblob` as signed)), +rand(`text`)=rand(cast(`text` as signed)), +rand(`enum_t`)=rand(cast(`enum_t` as signed)), +rand(`set_t`)=rand(cast(`set_t` as signed)), +rand(`json`)=rand(cast(`json` as signed)) +from test_type_table; + +select +length(random_bytes(`int1`)::binary), +length(random_bytes(`uint1`)::binary), +length(random_bytes(`int2`)::binary), +length(random_bytes(`uint2`)::binary), +length(random_bytes(`int4`)::binary), +length(random_bytes(`uint4`)::binary), +length(random_bytes(`int8`)::binary), +length(random_bytes(`uint8`)::binary), +length(random_bytes(`float4`)::binary), +length(random_bytes(`float8`)::binary), +length(random_bytes(`numeric`)::binary), +length(random_bytes(`bit1`)::binary), +length(random_bytes(`bit64`)::binary), +length(random_bytes(`boolean`)::binary), +length(random_bytes(`char`)::binary), +length(random_bytes(`varchar`)::binary), +length(random_bytes(`binary`)::binary), +length(random_bytes(`varbinary`)::binary), +length(random_bytes(`tinyblob`)::binary), +length(random_bytes(`blob`)::binary), +length(random_bytes(`mediumblob`)::binary), +length(random_bytes(`longblob`)::binary), +length(random_bytes(`text`)::binary), +length(random_bytes(`enum_t`)::binary), +length(random_bytes(`set_t`)::binary) +from test_type_table; + +--error, cause value out of range +select length(random_bytes(`date`)::binary) from test_type_table; +select length(random_bytes(`time`)::binary) from test_type_table; +select length(random_bytes(`time(4)`)::binary) from test_type_table; +select length(random_bytes(`datetime`)::binary) from test_type_table; +select length(random_bytes(`datetime(4)`)::binary) from test_type_table; +select length(random_bytes(`timestamp`)::binary) from test_type_table; +select length(random_bytes(`timestamp(4)`)::binary) from test_type_table; +select length(random_bytes(`year`)::binary)from test_type_table; +select length(random_bytes(`json`)::binary)from test_type_table; + +drop table test_type_table; + +reset dolphin.b_compatibility_mode; + +--hex number +select 0x123 = x'123'; +select 0x123; +select 0xfe = x'FE'; +select 0xFE = x'fe'; + +select 0xG123; --equal to select 0 xG123; +select 0x123G; --equal to select 0x123 G; +select 0x; --equal to select 0 x; + --测试点五:验è¯truncate函数 select truncate(111.28);--返回111 select truncate(111.28,0);--返回111 diff --git a/contrib/dolphin/sql/test_charset_collate.sql b/contrib/dolphin/sql/test_charset_collate.sql index 8e76538d6..a4c0ad528 100644 --- a/contrib/dolphin/sql/test_charset_collate.sql +++ b/contrib/dolphin/sql/test_charset_collate.sql @@ -114,5 +114,56 @@ alter table test_binary2 add column c7 char(10); alter table test_binary2 add column c8 tinytext; select pg_get_tabledef('test_binary2'); +--binary/ascii attribute +create schema bin_schema_test character set = utf8 collate utf8_unicode_ci; +use bin_schema_test; +set b_format_behavior_compat_options='enable_multi_charset'; -- use to enable different table charset with schema +create table t_bin(a text binary); +\d t_bin +create table t_bin2(a text character set gbk binary); +\d t_bin2 +create table t_bin3(a text binary) character set gb18030; +\d t_bin3 +-- binary has high proirity than collate clause +create table t_bin4(a text binary collate gb18030_chinese_ci); +\d t_bin4 +create table t_bin5(a text binary character set gbk) character set gb18030; +\d t_bin5 +-- ascii +create table t_bin6(a text ascii collate 'af_ZA.iso88591', b text charset binary binary); +show create table t_bin6; + +--alter table +alter table t_bin6 add column c text binary; +show create table t_bin6; +alter table t_bin6 add column d text character set gbk binary; +show create table t_bin6; +alter table t_bin6 add column e text binary character set gbk; +show create table t_bin6; +alter table t_bin6 modify column a text binary; +show create table t_bin6; +alter table t_bin6 modify column a text character set gbk binary; +show create table t_bin6; +alter table t_bin6 modify column a text binary character set gb18030; +show create table t_bin6; +alter table t_bin6 change column b b_new text binary; +show create table t_bin6; +alter table t_bin6 change column b_new b_new2 text charset gb18030 binary; +show create table t_bin6; +alter table t_bin6 change column b_new2 b_new3 text binary charset gbk; +show create table t_bin6; + +-- ascii, grammar is ok, but execute report error +create table t_bin7(a text binary ascii); +create table t_bin8(a text ascii binary); +create table t_bin9(a text ascii); + +-- wrong grammar +create table t_bin10(a text collate gb18030_chinese_ci binary); +create table t_bin11(a text collate gb18030_chinese_ci ascii); +create table t_bin12(a text character set gbk ascii); +create table t_bin13(a text ascii character set gbk); +drop schema bin_schema_test cascade; + drop schema test_charset cascade; reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 757bf33fe..894628491 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -143,6 +143,29 @@ drop CAST IF EXISTS (uint8 AS bit); CREATE CAST (uint4 AS bit) WITH FUNCTION bitfromuint4(uint4, int4) AS ASSIGNMENT; CREATE CAST (uint8 AS bit) WITH FUNCTION bitfromuint8(uint8, int4) AS ASSIGNMENT; +-- non-strict, accept null input +CREATE OR REPLACE FUNCTION pg_catalog.rand(int16) returns double precision LANGUAGE C volatile as '$libdir/dolphin', 'rand_seed'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(uint4) returns double precision LANGUAGE SQL volatile as 'select rand($1::int16)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(timestamp with time zone) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(date) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(year) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(binary) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(blob) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(anyenum) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(anyset) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; +CREATE OR REPLACE FUNCTION pg_catalog.rand(json) returns double precision LANGUAGE SQL volatile as 'select rand($1::int8)'; + +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(int4) returns blob LANGUAGE C volatile STRICT as '$libdir/dolphin', 'random_bytes'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(bit) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(timestamp with time zone) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(date) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(year) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(binary) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(blob) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(anyenum) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(anyset) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; +CREATE OR REPLACE FUNCTION pg_catalog.random_bytes(json) returns blob LANGUAGE SQL volatile as 'select random_bytes($1::int4)'; + -- The reason for using replace is because we don't want to change the OID CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_rawout ( tinyblob -- Gitee From 449bb4f0974feafcb4f10e7036b795cc63e4b1f7 Mon Sep 17 00:00:00 2001 From: zhoujingbnu Date: Fri, 10 Nov 2023 08:27:48 +0800 Subject: [PATCH 049/434] fix issue I8DP6S core when calling varlenatoset --- contrib/dolphin/expected/test_datatype.out | 33 ++++++++++++++++++++++ contrib/dolphin/plugin_utils/adt/set.cpp | 6 +++- contrib/dolphin/sql/test_datatype.sql | 22 +++++++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/test_datatype.out b/contrib/dolphin/expected/test_datatype.out index e029a8a72..05db8ed0c 100644 --- a/contrib/dolphin/expected/test_datatype.out +++ b/contrib/dolphin/expected/test_datatype.out @@ -329,5 +329,38 @@ SELECT * FROM t0 WHERE t0.c1 is false order by 1,2; (3 rows) drop table t0; +--test for set +drop table if exists set_tab; +NOTICE: table "set_tab" does not exist, skipping +create table set_tab ( + c1 set('1','2','3','4','5'), + c2 set('a', 'b', 'c', 'd', 'e') +); +NOTICE: CREATE TABLE will create implicit set "set_tab_c1_set" for column "set_tab.c1" +NOTICE: CREATE TABLE will create implicit set "set_tab_c2_set" for column "set_tab.c2" +insert into set_tab values('1,2','a,b'); +insert into set_tab values('3,4','c,d'); +create or replace function gettypeid(tname text) returns INT4 as +$$ +begin + return oid from pg_type where typname = :tname; +end; +$$ language plpgsql; +select varlenatoset(c1,gettypeid('set_tab_c1_set')) from set_tab order by 1; + varlenatoset +-------------- + 1,2 + 3,4 +(2 rows) + +select varlenatoset(c2,gettypeid('set_tab_c2_set')) from set_tab order by 1; + varlenatoset +-------------- + a,b + c,d +(2 rows) + +drop function gettypeid; +drop table set_tab; drop schema b_datatype_test cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/set.cpp b/contrib/dolphin/plugin_utils/adt/set.cpp index bc4adbdfa..13e99eb7b 100644 --- a/contrib/dolphin/plugin_utils/adt/set.cpp +++ b/contrib/dolphin/plugin_utils/adt/set.cpp @@ -1400,7 +1400,11 @@ PG_FUNCTION_INFO_V1_PUBLIC(varlenatoset); extern "C" DLL_PUBLIC Datum varlenatoset(PG_FUNCTION_ARGS); Datum varlenatoset(PG_FUNCTION_ARGS) { - char *setlabels = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + char* setlabels = NULL; + Oid typeOutput = InvalidOid; + bool typIsVarlena = false; + getTypeOutputInfo(fcinfo->argTypes[0], &typeOutput, &typIsVarlena); + setlabels = DatumGetCString(OidOutputFunctionCall(typeOutput, PG_GETARG_DATUM(0))); Datum result = (Datum)get_set_in_result(PG_GETARG_OID(1), setlabels, PG_GET_COLLATION()); pfree_ext(setlabels); PG_RETURN_VARBIT_P(result); diff --git a/contrib/dolphin/sql/test_datatype.sql b/contrib/dolphin/sql/test_datatype.sql index 3a031b6db..8b73eb38f 100644 --- a/contrib/dolphin/sql/test_datatype.sql +++ b/contrib/dolphin/sql/test_datatype.sql @@ -108,5 +108,27 @@ SELECT * FROM t0 WHERE t0.c1 is true order by 1,2; SELECT * FROM t0 WHERE t0.c1 is false order by 1,2; drop table t0; +--test for set +drop table if exists set_tab; +create table set_tab ( + c1 set('1','2','3','4','5'), + c2 set('a', 'b', 'c', 'd', 'e') +); +insert into set_tab values('1,2','a,b'); +insert into set_tab values('3,4','c,d'); + +create or replace function gettypeid(tname text) returns INT4 as +$$ +begin + return oid from pg_type where typname = :tname; +end; +$$ language plpgsql; + +select varlenatoset(c1,gettypeid('set_tab_c1_set')) from set_tab order by 1; +select varlenatoset(c2,gettypeid('set_tab_c2_set')) from set_tab order by 1; + +drop function gettypeid; +drop table set_tab; + drop schema b_datatype_test cascade; reset current_schema; \ No newline at end of file -- Gitee From 3c40fc589d14bda7b46e6005a0888281ea33f4e3 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Sat, 11 Nov 2023 16:12:23 +0800 Subject: [PATCH 050/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91spq=E5=A4=9A=E6=9C=BA=E5=B9=B6=E8=A1=8Cbugfix?= =?UTF-8?q?=20=E4=BF=AE=E5=A4=8D=E6=9F=A5=E8=AF=A2=E7=9B=AE=E6=A0=87?= =?UTF-8?q?=E5=88=97=E5=8C=85=E5=90=AB=E5=87=BD=E6=95=B0group=5Fconcat?= =?UTF-8?q?=EF=BC=8C=E6=95=B0=E6=8D=AE=E5=BA=93coredump?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp | 4 +--- contrib/spq_plugin/src/spqplugin.cpp | 6 ++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp index 9a63b7cad..265e9f814 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp @@ -533,9 +533,7 @@ CTranslatorDXLToScalar::TranslateDXLScalarAggrefToScalar( break; case EdxlaggstageFinal: aggref->aggsplittype = AGGSTAGE_FINAL; - if (aggref->aggfnoid != STRINGAGGFUNCOID) { - aggref->agghas_collectfn = true; - } + aggref->agghas_collectfn = true; SPQOS_ASSERT(aggref->aggstage == 0); aggref->aggstage = aggref->aggstage + 1; break; diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index c7eb5d6f3..4ea174031 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -61,6 +61,12 @@ static bool check_disable_spq_planner_walker(Node *node, void *context) if (node == NULL) return false; +#ifdef PGXC + if (IsA(node, Aggref) && !((Aggref*)node)->agghas_collectfn) { + return true; + } +#endif + if (!IsA(node, Query)) { return expression_tree_walker(node, (bool (*)())check_disable_spq_planner_walker, context); } -- Gitee From 5c15e453abd93da605b828fd7324e21b8fbd3800 Mon Sep 17 00:00:00 2001 From: zhoujingbnu Date: Sun, 12 Nov 2023 12:48:12 +0800 Subject: [PATCH 051/434] =?UTF-8?q?fix=20issue=20I8FCYL=20timestampdiff?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E9=83=A8=E5=88=86=E8=BF=94=E5=9B=9E=E7=BB=93?= =?UTF-8?q?=E6=9E=9C=E4=B8=8Emysql=E4=BE=A7=E4=B8=8D=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/db_b_date_time_functions3.out | 27 +++++++++++++++++++ .../dolphin/plugin_utils/adt/timestamp.cpp | 2 ++ .../dolphin/sql/db_b_date_time_functions3.sql | 13 +++++++++ 3 files changed, 42 insertions(+) diff --git a/contrib/dolphin/expected/db_b_date_time_functions3.out b/contrib/dolphin/expected/db_b_date_time_functions3.out index 62f33e4ac..fa570dfbc 100644 --- a/contrib/dolphin/expected/db_b_date_time_functions3.out +++ b/contrib/dolphin/expected/db_b_date_time_functions3.out @@ -124,6 +124,17 @@ insert into test values('timestampdiff(microsecond, 101, 99991231235959.999999)' insert into test values('timestampdiff(week, null, ''9999-12-31'')', timestampdiff(week, null, '9999-12-31')); insert into test values('timestampdiff(week, ''9999-12-31'', null)', timestampdiff(week, '9999-12-31', null)); insert into test values('timestampdiff(week, null, null)', timestampdiff(week, null, null)); +--严格模å¼ä¸‹ï¼Œé’ˆå¯¹unitçš„éžæ³•报error +insert into test values('timestampdiff(abc, ''2020-11-11'', ''2020-11-12'')', timestampdiff(abc, '2020-11-11', '2020-11-12')); +ERROR: timestamp with time zone units "abc" not recognized +CONTEXT: referenced column: result +insert into test values('timestampdiff(decades, ''2020-11-11'', ''2020-11-12'')', timestampdiff(decades, '2020-11-11', '2020-11-12')); +ERROR: timestamp units "decades" not supported +CONTEXT: referenced column: result +--严格模å¼ä¸‹ï¼Œé’ˆå¯¹valueçš„éžæ³•报warning +insert into test values('timestampdiff(day, ''2020-11-11'', ''abc'')', timestampdiff(day, '2020-11-11', 'abc')); +ERROR: Incorrect datetime value: "abc" +CONTEXT: referenced column: result -- éžä¸¥æ ¼æ¨¡å¼ï¼Œå‚æ•°ä¸åˆæ³•,报warning,返回NULL或者对应值 set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test values('timestampdiff(microsecond, ''abc'', ''2021-11-12'')', timestampdiff(microsecond, 'abc', '2021-11-12')); @@ -135,6 +146,13 @@ CONTEXT: referenced column: result insert into test values('timestampdiff(microsecond, ''10000-1-1'', ''2021-11-12'')', timestampdiff(microsecond, '10000-1-1', '2021-11-12')); WARNING: Incorrect datetime value: "10000-1-1" CONTEXT: referenced column: result +--éžä¸¥æ ¼æ¨¡å¼ä¸‹ï¼Œunitä¸åˆæ³•,ä»ç„¶æŠ¥error +insert into test values('timestampdiff(abc, ''2020-11-11'', ''2020-11-12'')', timestampdiff(abc, '2020-11-11', '2020-11-12')); +ERROR: timestamp with time zone units "abc" not recognized +CONTEXT: referenced column: result +insert into test values('timestampdiff(decades, ''2020-11-11'', ''2020-11-12'')', timestampdiff(decades, '2020-11-11', '2020-11-12')); +ERROR: timestamp units "decades" not supported +CONTEXT: referenced column: result -- 严格模å¼ï¼Œå‚æ•°ä¸åˆæ³•,抛出错误 set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test values('timestampdiff(microsecond, ''abc'', ''2021-11-12'')', timestampdiff(microsecond, 'abc', '2021-11-12')); @@ -146,6 +164,15 @@ CONTEXT: referenced column: result insert into test values('timestampdiff(microsecond, ''10000-1-1'', ''2021-11-12'')', timestampdiff(microsecond, '10000-1-1', '2021-11-12')); ERROR: Incorrect datetime value: "10000-1-1" CONTEXT: referenced column: result +insert into test values('timestampdiff(abc, ''2020-11-11'', ''2020-11-12'')', timestampdiff(abc, '2020-11-11', '2020-11-12')); +ERROR: timestamp with time zone units "abc" not recognized +CONTEXT: referenced column: result +insert into test values('timestampdiff(decades, ''2020-11-11'', ''2020-11-12'')', timestampdiff(decades, '2020-11-11', '2020-11-12')); +ERROR: timestamp units "decades" not supported +CONTEXT: referenced column: result +insert into test values('timestampdiff(day, ''2020-11-11'', ''abc'')', timestampdiff(day, '2020-11-11', 'abc')); +ERROR: Incorrect datetime value: "abc" +CONTEXT: referenced column: result set dolphin.b_compatibility_mode = false; -- test convert_tz -- ä¸¥æ ¼æ¨¡å¼æˆ–者éžä¸¥æ ¼æ¨¡å¼éƒ½æœ‰å€¼ diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 5d8f59458..4f237d80f 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -8683,6 +8683,8 @@ static bool timestampdiff_datetime_internal(int64 *result, text *units, Timesta } else { code = geterrcode(); msg = pstrdup(Geterrmsg()); + if(code != ERRCODE_DATETIME_VALUE_OUT_OF_RANGE) + PG_RE_THROW(); FlushErrorState(); } } diff --git a/contrib/dolphin/sql/db_b_date_time_functions3.sql b/contrib/dolphin/sql/db_b_date_time_functions3.sql index ceb82e408..f34ed9bd5 100644 --- a/contrib/dolphin/sql/db_b_date_time_functions3.sql +++ b/contrib/dolphin/sql/db_b_date_time_functions3.sql @@ -111,18 +111,31 @@ insert into test values('timestampdiff(microsecond, 101, 99991231235959.999999)' insert into test values('timestampdiff(week, null, ''9999-12-31'')', timestampdiff(week, null, '9999-12-31')); insert into test values('timestampdiff(week, ''9999-12-31'', null)', timestampdiff(week, '9999-12-31', null)); insert into test values('timestampdiff(week, null, null)', timestampdiff(week, null, null)); +--严格模å¼ä¸‹ï¼Œé’ˆå¯¹unitçš„éžæ³•报error +insert into test values('timestampdiff(abc, ''2020-11-11'', ''2020-11-12'')', timestampdiff(abc, '2020-11-11', '2020-11-12')); +insert into test values('timestampdiff(decades, ''2020-11-11'', ''2020-11-12'')', timestampdiff(decades, '2020-11-11', '2020-11-12')); +--严格模å¼ä¸‹ï¼Œé’ˆå¯¹valueçš„éžæ³•报warning +insert into test values('timestampdiff(day, ''2020-11-11'', ''abc'')', timestampdiff(day, '2020-11-11', 'abc')); -- éžä¸¥æ ¼æ¨¡å¼ï¼Œå‚æ•°ä¸åˆæ³•,报warning,返回NULL或者对应值 set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test values('timestampdiff(microsecond, ''abc'', ''2021-11-12'')', timestampdiff(microsecond, 'abc', '2021-11-12')); insert into test values('timestampdiff(microsecond, ''2021-11-12'', ''10000-1-1'')', timestampdiff(microsecond, '2021-11-12', '10000-1-1')); insert into test values('timestampdiff(microsecond, ''10000-1-1'', ''2021-11-12'')', timestampdiff(microsecond, '10000-1-1', '2021-11-12')); +--éžä¸¥æ ¼æ¨¡å¼ä¸‹ï¼Œunitä¸åˆæ³•,ä»ç„¶æŠ¥error +insert into test values('timestampdiff(abc, ''2020-11-11'', ''2020-11-12'')', timestampdiff(abc, '2020-11-11', '2020-11-12')); +insert into test values('timestampdiff(decades, ''2020-11-11'', ''2020-11-12'')', timestampdiff(decades, '2020-11-11', '2020-11-12')); -- 严格模å¼ï¼Œå‚æ•°ä¸åˆæ³•,抛出错误 set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test values('timestampdiff(microsecond, ''abc'', ''2021-11-12'')', timestampdiff(microsecond, 'abc', '2021-11-12')); insert into test values('timestampdiff(microsecond, ''2021-11-12'', ''10000-1-1'')', timestampdiff(microsecond, '2021-11-12', '10000-1-1')); insert into test values('timestampdiff(microsecond, ''10000-1-1'', ''2021-11-12'')', timestampdiff(microsecond, '10000-1-1', '2021-11-12')); + +insert into test values('timestampdiff(abc, ''2020-11-11'', ''2020-11-12'')', timestampdiff(abc, '2020-11-11', '2020-11-12')); +insert into test values('timestampdiff(decades, ''2020-11-11'', ''2020-11-12'')', timestampdiff(decades, '2020-11-11', '2020-11-12')); +insert into test values('timestampdiff(day, ''2020-11-11'', ''abc'')', timestampdiff(day, '2020-11-11', 'abc')); + set dolphin.b_compatibility_mode = false; -- test convert_tz -- Gitee From c9cb13368eb489b0d0a05f9896d673ce4b9ee2c9 Mon Sep 17 00:00:00 2001 From: zhoujingbnu Date: Sun, 12 Nov 2023 13:40:27 +0800 Subject: [PATCH 052/434] add blank to pass the code check --- contrib/dolphin/plugin_utils/adt/timestamp.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 4f237d80f..75d1d6d87 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -8683,7 +8683,7 @@ static bool timestampdiff_datetime_internal(int64 *result, text *units, Timesta } else { code = geterrcode(); msg = pstrdup(Geterrmsg()); - if(code != ERRCODE_DATETIME_VALUE_OUT_OF_RANGE) + if (code != ERRCODE_DATETIME_VALUE_OUT_OF_RANGE) PG_RE_THROW(); FlushErrorState(); } -- Gitee From f6fb8bcce646130ab2ad7ba463e6e69bf7b4f5d9 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 13 Nov 2023 15:38:37 +0800 Subject: [PATCH 053/434] Sync server code. 7920260008dc92617d6310058737d14d12c3863d --- contrib/dolphin/include/builtin_funcs.ini | 4 ++ contrib/dolphin/include/plugin_catalog/heap.h | 5 ++ .../dolphin/include/plugin_nodes/parsenodes.h | 5 ++ contrib/dolphin/include/plugin_utils/fmgr.h | 1 - contrib/dolphin/plugin_catalog/heap.cpp | 65 +++++++++++++++++++ .../plugin_optimizer/commands/copy.cpp | 1 - .../commands/functioncmds.cpp | 2 +- contrib/dolphin/plugin_parser/gram.y | 12 ++-- .../dolphin/plugin_parser/parse_utilcmd.cpp | 7 +- contrib/dolphin/plugin_pl/plpgsql/src/gram.y | 49 +++++++++----- .../dolphin/plugin_pl/plpgsql/src/pl_comp.cpp | 4 +- .../plugin_pl/plpgsql/src/pl_handler.cpp | 57 ++++++---------- .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 2 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 12 +++- contrib/dolphin/plugin_utils/adt/selfuncs.cpp | 5 ++ contrib/dolphin/tablecmds.cpp | 10 ++- 16 files changed, 171 insertions(+), 70 deletions(-) diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index 422b8dd16..6d6c97754 100644 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -3679,6 +3679,10 @@ "gs_get_global_barriers_status", 1, AddBuiltinFunc(_0(9034), _1("gs_get_global_barriers_status"), _2(0), _3(true), _4(false), _5(gs_get_global_barriers_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(3, 25, 25, 25), _22(3, 'o', 'o', 'o'), _23(3, "slot_name", "global_barrier_id", "global_achive_barrier_id"), _24(NULL), _25("gs_get_global_barriers_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_xlog_keepers", 1, + AddBuiltinFunc(_0(9040), _1("gs_xlog_keepers"), _2(0), _3(true), _4(false), _5(gs_xlog_keepers), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(3, 25, 25, 25), _22(3, 'o', 'o', 'o'), _23(3, "keeptype", "keepsegment", "describe"), _24(NULL), _25("gs_xlog_keepers"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + ), AddFuncGroup( "gs_get_next_xid_csn", 1, AddBuiltinFunc(_0(6224), _1("gs_get_next_xid_csn"), _2(1), _3(true), _4(true), _5(gs_get_next_xid_csn), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(3, 25, 28, 28), _22(3, 'o', 'o', 'o'), _23(3, "node_name", "next_xid", "next_csn"), _24(NULL), _25("gs_get_next_xid_csn"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/contrib/dolphin/include/plugin_catalog/heap.h b/contrib/dolphin/include/plugin_catalog/heap.h index b3b4391ff..b78941970 100644 --- a/contrib/dolphin/include/plugin_catalog/heap.h +++ b/contrib/dolphin/include/plugin_catalog/heap.h @@ -27,6 +27,10 @@ #define PSORT_RESERVE_COLUMN "tid" #define CHCHK_PSORT_RESERVE_COLUMN(attname) (strcmp(PSORT_RESERVE_COLUMN, (attname)) == 0) +#ifdef USE_SPQ +extern HeapTuple heaptuple_from_pg_attribute(Relation pg_attribute_rel, Form_pg_attribute new_attribute); +#endif + typedef struct RawColumnDefault { AttrNumber attnum; /* attribute to attach default to */ Node *raw_default; /* default value (untransformed parse tree) */ @@ -262,4 +266,5 @@ extern int GetIndexKeyAttsByTuple(Relation relation, HeapTuple indexTuple); extern bool GetIndexVisibleStateByTuple(HeapTuple indexTuple); extern void AddOrDropUidsAttr(Oid relOid, bool oldRelHasUids, bool newRelHasUids); +extern char* heap_serialize_row_attr(Oid rel_oid, bool* depend_undefined); #endif /* HEAP_H */ diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index 1973c6e7d..e2e6e1c12 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -34,6 +34,11 @@ #include "tcop/dest.h" #include "nodes/parsenodes_common.h" +#ifdef USE_SPQ +#define CURSOR_OPT_SPQ_OK 0x0200 /* SPQ Execution */ +#define CURSOR_OPT_SPQ_FORCE 0x0400 /* Force to generate a SPQ plan */ +#endif + /* * Relids * Set of relation identifiers (indexes into the rangetable). diff --git a/contrib/dolphin/include/plugin_utils/fmgr.h b/contrib/dolphin/include/plugin_utils/fmgr.h index 5a806120a..578131cef 100644 --- a/contrib/dolphin/include/plugin_utils/fmgr.h +++ b/contrib/dolphin/include/plugin_utils/fmgr.h @@ -29,7 +29,6 @@ #include "access/tupdesc.h" #include "nodes/primnodes.h" - #ifndef FRONTEND_PARSER typedef ScalarVector* (*VectorFunction)(FunctionCallInfo fcinfo); diff --git a/contrib/dolphin/plugin_catalog/heap.cpp b/contrib/dolphin/plugin_catalog/heap.cpp index 9fd2d7385..92c634a6e 100644 --- a/contrib/dolphin/plugin_catalog/heap.cpp +++ b/contrib/dolphin/plugin_catalog/heap.cpp @@ -412,7 +412,28 @@ static FormData_pg_attribute a10 = {0, true, 0}; +#ifdef USE_SPQ +static FormData_pg_attribute a11 = {0, + {"_root_ctid"}, + TIDOID, + 0, + sizeof(ItemPointerData), + RootSelfItemPointerAttributeNumber, + 0, + -1, + -1, + false, + 'p', + 's', + true, + false, + false, + true, + 0}; +static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11}; +#else static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10}; +#endif #else static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7}; #endif @@ -1130,6 +1151,10 @@ static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relki /* skip OID where appropriate */ if (!tupdesc->tdhasoid && SysAtt[i]->attnum == ObjectIdAttributeNumber) continue; +#ifdef USE_SPQ + if (SysAtt[i]->attnum == RootSelfItemPointerAttributeNumber) + continue; +#endif if (!hasbucket && SysAtt[i]->attnum == BucketIdAttributeNumber) continue; if (!hasuids && SysAtt[i]->attnum == UidAttributeNumber) @@ -8340,3 +8365,43 @@ char* heap_serialize_row_attr(Oid rel_oid, bool* depend_undefined) FreeStringInfo(&concat_name); return ret; } + +#ifdef USE_SPQ +HeapTuple heaptuple_from_pg_attribute(Relation pg_attribute_rel, + Form_pg_attribute new_attribute) +{ + Datum values[Natts_pg_attribute] = { 0 }; + bool nulls[Natts_pg_attribute] = { false }; + + values[Anum_pg_attribute_attrelid - 1] = ObjectIdGetDatum(new_attribute->attrelid); + values[Anum_pg_attribute_attname - 1] = NameGetDatum(&new_attribute->attname); + values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(new_attribute->atttypid); + values[Anum_pg_attribute_attstattarget - 1] = Int32GetDatum(new_attribute->attstattarget); + values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(new_attribute->attlen); + values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(new_attribute->attnum); + values[Anum_pg_attribute_attndims - 1] = Int32GetDatum(new_attribute->attndims); + values[Anum_pg_attribute_attcacheoff - 1] = Int32GetDatum(new_attribute->attcacheoff); + values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(new_attribute->atttypmod); + values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(new_attribute->attbyval); + values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(new_attribute->attstorage); + values[Anum_pg_attribute_attalign - 1] = CharGetDatum(new_attribute->attalign); + values[Anum_pg_attribute_attnotnull - 1] = BoolGetDatum(new_attribute->attnotnull); + values[Anum_pg_attribute_atthasdef - 1] = BoolGetDatum(new_attribute->atthasdef); + values[Anum_pg_attribute_attisdropped - 1] = BoolGetDatum(new_attribute->attisdropped); + values[Anum_pg_attribute_attislocal - 1] = BoolGetDatum(new_attribute->attislocal); + values[Anum_pg_attribute_attcmprmode - 1] = Int8GetDatum(new_attribute->attcmprmode); + values[Anum_pg_attribute_attinhcount - 1] = Int32GetDatum(new_attribute->attinhcount); + values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(new_attribute->attcollation); + values[Anum_pg_attribute_attkvtype - 1] = Int8GetDatum(new_attribute->attkvtype); + + /* start out with empty permissions and empty options */ + nulls[Anum_pg_attribute_attacl - 1] = true; + nulls[Anum_pg_attribute_attoptions - 1] = true; + nulls[Anum_pg_attribute_attfdwoptions - 1] = true; + + /* at default, new fileld attinitdefval of pg_attribute is null. */ + nulls[Anum_pg_attribute_attinitdefval - 1] = true; + + return heap_form_tuple(RelationGetDescr(pg_attribute_rel), values, nulls); +} +#endif diff --git a/contrib/dolphin/plugin_optimizer/commands/copy.cpp b/contrib/dolphin/plugin_optimizer/commands/copy.cpp index cbcc28a5e..71ba492cd 100644 --- a/contrib/dolphin/plugin_optimizer/commands/copy.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/copy.cpp @@ -5862,7 +5862,6 @@ bool IsTypeAcceptEmptyStr(Oid typeOid) if (type_is_set(typeOid)) { return true; } - switch (typeOid) { case VARCHAROID: case NVARCHAR2OID: diff --git a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp index ea1ee443a..8f953aab4 100755 --- a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp @@ -2429,7 +2429,7 @@ static void RecompileFunctionWithArgs(CompileStmt* stmt) if (PROC_IS_PRO(get_func_prokind(func_oid)) && stmt->compileItem == COMPILE_FUNCTION) { ReportRecompileFuncWarning(stmt); } - if (!IsNeedRecompile(func_oid)) { + if (IsNeedRecompile(func_oid)) { RecompileSingleFunction(func_oid, stmt->compileItem == COMPILE_PROCEDURE); return; } diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 2ae995fc6..b4a9b3ad5 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -21419,17 +21419,17 @@ param_name: type_function_name %prec lower_than_zerofill ; func_return: - func_type { - if (enable_plpgsql_gsdependency_guc()) { - pg_yyget_extra(yyscanner)->core_yy_extra.return_pos_end = yylloc; + func_type + { + if (enable_plpgsql_gsdependency_guc()) { + pg_yyget_extra(yyscanner)->core_yy_extra.return_pos_end = yylloc; + } /* We can catch over-specified results here if we want to, * but for now better to silently swallow typmod, etc. * - thomas 2000-03-22 */ + $$ = $1; } - $$ = $1; - } - ; /* diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index e1a07986e..7846ae5c9 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -5742,7 +5742,7 @@ static void CheckColumnTableOfType(Type ctype) errmsg("type %u cannot get tupledesc", HeapTupleGetOid(ctype)))); } for (int i = 0; i < tupleDesc->natts; i++) { - if (tupleDesc->attrs[i].attisdropped) { + if (tupleDesc->attrs[i].attisdropped || strcmp(NameStr(tupleDesc->attrs[i].attname), "pljson_list_data") == 0) { continue; } HeapTuple typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(tupleDesc->attrs[i].atttypid)); @@ -6577,6 +6577,11 @@ static char* CreatestmtGetOrientation(CreateStmt *stmt) foreach (lc, stmt->options) { DefElem* def = (DefElem*)lfirst(lc); if (pg_strcasecmp(def->defname, "orientation") == 0) { +#ifdef ENABLE_FINANCE_MODE + if (defGetString(def) == ORIENTATION_COLUMN) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ORIENTATION==COLUMN is not supported on finance mode"))); +#endif return defGetString(def); } } diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y index 0366a66ee..3cd88f8c2 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y +++ b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y @@ -1493,6 +1493,8 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no plpgsql_build_varrayType($2->name, $2->lineno, $9, true); if (IS_PACKAGE) { plpgsql_build_package_array_type($2->name, $9->typoid, TYPCATEGORY_ARRAY, $9->dependExtend); + } else if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_type_in_body_dependency($9); } pfree_ext($2->name); pfree($2); @@ -1528,6 +1530,12 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no plpgsql_build_varrayType($2->name, $2->lineno, newp, true); if (IS_PACKAGE) { plpgsql_build_package_array_type($2->name, newp->typoid, TYPCATEGORY_ARRAY); + } else if (enable_plpgsql_gsdependency()) { + PLpgSQL_rec_type* rec_var = (PLpgSQL_rec_type*)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$9]; + int i; + for (i = 0; i < rec_var->attrnum; i++) { + gsplsql_build_gs_type_in_body_dependency(rec_var->types[i]); + } } pfree_ext($2->name); pfree($2); @@ -1647,6 +1655,8 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no plpgsql_build_tableType($2->name, $2->lineno, $6, true); if (IS_PACKAGE) { plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF, $6->dependExtend); + } else if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_type_in_body_dependency($6); } pfree_ext($2->name); pfree($2); @@ -1709,6 +1719,12 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no plpgsql_build_tableType($2->name, $2->lineno, newp, true); if (IS_PACKAGE) { plpgsql_build_package_array_type($2->name, newp->typoid, TYPCATEGORY_TABLEOF); + } else if (enable_plpgsql_gsdependency()) { + PLpgSQL_rec_type* rec_var = (PLpgSQL_rec_type*)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6]; + int i; + for (i = 0; i < rec_var->attrnum; i++) { + gsplsql_build_gs_type_in_body_dependency(rec_var->types[i]); + } } pfree_ext($2->name); pfree($2); @@ -1821,6 +1837,8 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no } else { plpgsql_build_package_array_type($2->name, $6->typoid, TYPCATEGORY_TABLEOF_INTEGER, $6->dependExtend); } + } else if (enable_plpgsql_gsdependency()) { + gsplsql_build_gs_type_in_body_dependency($6); } pfree_ext($2->name); pfree($2); @@ -1904,6 +1922,12 @@ decl_statement : decl_varname_list decl_const decl_datatype decl_collate decl_no } else { plpgsql_build_package_array_type($2->name, newp->typoid, TYPCATEGORY_TABLEOF_INTEGER); } + } else if (enable_plpgsql_gsdependency()) { + int i; + PLpgSQL_rec_type* rec_var = (PLpgSQL_rec_type*)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6]; + for (i = 0; i < rec_var->attrnum; i++) { + gsplsql_build_gs_type_in_body_dependency(rec_var->types[i]); + } } pfree_ext($2->name); pfree($2); @@ -3539,7 +3563,7 @@ stmt_while : opt_block_label K_WHILE expr_until_while_loop loop_body { if($3.endtoken != K_LOOP) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), errmsg("'while-do' is only supported in database which dbcompatibility='B'."), parser_errposition(@2))); + (errcode(ERRCODE_SYNTAX_ERROR), errmsg("while-loop syntax is mixed with while-do syntax"), parser_errposition(@2))); } PLpgSQL_stmt_while *newp; @@ -12803,24 +12827,12 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid Oid oldtypeoid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(casttypename), ObjectIdGetDatum(pkgNamespaceOid)); bool oldtypeoidIsValid = OidIsValid(oldtypeoid); - if (enable_plpgsql_gsdependency() && u_sess->plsql_cxt.need_create_depend) { - char* schemaName = get_namespace_name(pkgNamespaceOid); - char* packageName = GetPackageName(pkgOid); - bool dependUndef = gsplsql_check_type_depend_undefined(schemaName, packageName, typname); - pfree_ext(schemaName); - pfree_ext(packageName); - if (dependUndef) { - ObjectAddress address; - address.classId = TypeRelationId; - address.objectId = oldtypeoid; - address.objectSubId = 0; - performDeletion(&address, DROP_CASCADE, PERFORM_DELETION_INTERNAL); - oldtypeoidIsValid = false; - } - } if (OidIsValid(oldtypeoid)) { /* alread build one, just return */ - if(IsPackageDependType(oldtypeoid, u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid)) { + if(IsPackageDependType(oldtypeoid, pkgOid)) { + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE) { + (void)gsplsql_flush_undef_ref_type_dependency(oldtypeoid); + } return; } else { ereport(errstate, @@ -12890,6 +12902,9 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid myself.objectSubId = 0; recordDependencyOn(&referenced, &myself, DEPENDENCY_AUTO); CommandCounterIncrement(); + if (CompileWhich() == PLPGSQL_COMPILE_PACKAGE && typtyp != TYPTYPE_TABLEOF) { + (void)gsplsql_build_ref_type_dependency(referenced.objectId); + } pfree_ext(casttypename); } diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp index 5da492123..b4c9b4108 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp @@ -1304,7 +1304,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, FlushErrorState(); } PG_END_TRY(); - }else { + } else { bool save_isPerform = u_sess->parser_cxt.isPerform; u_sess->parser_cxt.isPerform = false; parse_rc = plpgsql_yyparse(); @@ -5247,7 +5247,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP } expr->func = NULL; list_free_deep(parsetreeList); - return NULL; + PG_TRY_RETURN(NULL); } } queryList = pg_analyze_and_rewrite_params(parsetree, expr->query, diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp index 4509c2c3c..14ad26601 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp @@ -734,6 +734,20 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) has_switch = true; } + bool save_need_create_depend = u_sess->plsql_cxt.need_create_depend; + u_sess->plsql_cxt.need_create_depend = false; + + _PG_init(); + /* + * Connect to SPI manager + */ + SPI_STACK_LOG("connect", NULL, NULL); + rc = SPI_connect_ext(DestSPI, NULL, NULL, nonatomic ? SPI_OPT_NONATOMIC : 0, func_oid); + if (rc != SPI_OK_CONNECT) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("SPI_connect failed: %s when execute PLSQL function.", SPI_result_code_string(rc)))); + } + Oid package_oid = get_package_id(func_oid); if (OidIsValid(package_oid)) { if (u_sess->plsql_cxt.curr_compile_context == NULL || @@ -751,19 +765,7 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) } } } - int fun_arg = fcinfo->nargs; - - _PG_init(); - /* - * Connect to SPI manager - */ - SPI_STACK_LOG("connect", NULL, NULL); - rc = SPI_connect_ext(DestSPI, NULL, NULL, nonatomic ? SPI_OPT_NONATOMIC : 0, func_oid); - if (rc != SPI_OK_CONNECT) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("SPI_connect failed: %s when execute PLSQL function.", SPI_result_code_string(rc)))); - } #ifdef ENABLE_MULTIPLE_NODES bool outer_is_stream = false; bool outer_is_stream_support = false; @@ -782,7 +784,6 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) #endif int connect = SPI_connectid(); Oid firstLevelPkgOid = InvalidOid; - bool save_need_create_depend = u_sess->plsql_cxt.need_create_depend; bool save_curr_status = GetCurrCompilePgObjStatus(); PG_TRY(); { @@ -796,31 +797,15 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) if (func == NULL) { u_sess->plsql_cxt.compile_has_warning_info = false; SetCurrCompilePgObjStatus(true); - if (enable_plpgsql_gsdependency_guc()) { - if (gsplsql_is_undefined_func(func_oid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - (errmsg("\"%s\" header is undefined, you can try to recreate", get_func_name(func_oid))))); - } - if (GetPgObjectValid(func_oid, OBJECT_TYPE_PROC)) { - u_sess->plsql_cxt.need_create_depend = false; - } else { - u_sess->plsql_cxt.need_create_depend = true; - } - } func = plpgsql_compile(fcinfo, false); - if (func == NULL) { - ereport(ERROR, (errcode(ERRCODE_NO_FUNCTION_PROVIDED), errmodule(MOD_PLSQL), - errmsg("compile function error."), - errdetail("It may be because the compilation encountered an error and the exception was caught."), - errcause("compile procedure error."), - erraction("compile function result is null, it has error"))); - } - if (enable_plpgsql_gsdependency_guc()) { - if (!OidIsValid(func->pkg_oid)) { - SetPgObjectValid(func_oid, OBJECT_TYPE_PROC, true); + if (enable_plpgsql_gsdependency_guc() && func != NULL) { + SetPgObjectValid(func_oid, OBJECT_TYPE_PROC, GetCurrCompilePgObjStatus()); + if (!GetCurrCompilePgObjStatus()) { + ereport(WARNING, (errmodule(MOD_PLSQL), + errmsg("Function %s recompile with compilation errors, please use ALTER COMPILE to recompile.", + get_func_name(func_oid)))); } } - u_sess->plsql_cxt.need_create_depend = save_need_create_depend; } if (func->fn_readonly) { stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_IMMUTABLE); @@ -1035,11 +1020,11 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) /* destory all the SPI connect created in this PL function. */ SPI_disconnect(connect); - u_sess->plsql_cxt.need_create_depend = save_need_create_depend; /* re-throw the original error messages */ ReThrowError(edata); } PG_END_TRY(); + u_sess->plsql_cxt.need_create_depend = save_need_create_depend; /* clean stp save pointer if the outermost function is end. */ if (u_sess->SPI_cxt._connected == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = NULL; diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index fc5d369f8..932d4c6bf 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -8723,7 +8723,7 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) */ for (i = 0; i < g_instance.attr.attr_storage.NBuffers; i++) { - uint32 buf_state; + uint64 buf_state; bufHdr = GetBufferDescriptor(i); /* Lock each buffer header before inspecting. */ diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index d087fa0b4..4f2abd977 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -993,7 +993,11 @@ void GetPartitionExprKeySrc(StringInfo buf, Datum* datum, char* relname, Oid tab if (partkeyexpr->type == T_OpExpr) (*iPartboundary)[0] = ((OpExpr*)partkeyexpr)->opresulttype; else if (partkeyexpr->type == T_FuncExpr) +#ifdef DOLPHIN (*iPartboundary)[0] = INT8OID; +#else + (*iPartboundary)[0] = ((FuncExpr*)partkeyexpr)->funcresulttype; +#endif else ereport(ERROR, (errcode(ERRCODE_NODE_ID_MISSMATCH), @@ -3271,9 +3275,11 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty) } if (tgfbody != NULL) { - char* tgordername = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgordername, tgrel->rd_att, &isnull)); - char* tgorder = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgorder, tgrel->rd_att, &isnull)); - if (tgorder != NULL) + bool isordernull = false; + bool isordernamenull = false; + char* tgordername = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgordername, tgrel->rd_att, &isordernamenull)); + char* tgorder = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgorder, tgrel->rd_att, &isordernull)); + if (!isordernull && !isordernamenull) appendStringInfo(&buf, "%s %s ", tgorder, tgordername); appendStringInfo(&buf, "%s;", tgfbody); diff --git a/contrib/dolphin/plugin_utils/adt/selfuncs.cpp b/contrib/dolphin/plugin_utils/adt/selfuncs.cpp index b38946e18..30730ffe4 100644 --- a/contrib/dolphin/plugin_utils/adt/selfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/selfuncs.cpp @@ -5096,6 +5096,11 @@ double get_variable_numdistinct(VariableStatData* vardata, bool* isdefault, bool case UidAttributeNumber: stadistinct = 1.0; /* only 1 value */ break; +#endif +#ifdef USE_SPQ + case RootSelfItemPointerAttributeNumber: + stadistinct = 0.0; /* means "unknown" */ + break; #endif default: stadistinct = 0.0; /* means "unknown" */ diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index c3ac6790b..79e6f8cc2 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -1169,6 +1169,14 @@ static bool isOrientationSet(List* options, bool* isCUFormat, bool isDfsTbl) errdetail("Valid string are \"column\", \"row\"."))); } #endif /* ENABLE_MULTIPLE_NODES */ +#ifdef ENABLE_FINANCE_MODE + if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) == 0) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPTION), + errmsg("Invalid string for \"ORIENTATION\" option"), + errdetail("ORIENTATION=COLUNMN is incorrect, not work on finance mode."))); + } +#endif } if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) == 0 && isCUFormat != NULL) { *isCUFormat = true; @@ -9018,7 +9026,7 @@ static void sqlcmd_alter_prep_convert_charset(AlteredTableInfo* tab, Relation re Form_pg_attribute attTup = (Form_pg_attribute)GETSTRUCT(tuple); int attnum = attTup->attnum; if (attnum <= 0 || attTup->attisdropped || !type_is_collatable(attTup->atttypid) || - get_charset_by_collation(attTup->attcollation) == cc->charset) + attTup->attcollation == targetcollid) continue; transform = (Node*)makeVar(1, attnum, attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0); -- Gitee From 0ce13745f795d128e326f2f3f057a5f761bc26ba Mon Sep 17 00:00:00 2001 From: chenbd Date: Tue, 14 Nov 2023 12:03:53 +0800 Subject: [PATCH 054/434] fix force index primary --- .../test_create_index_if_not_exists.out | 556 +++++++++++++++++- contrib/dolphin/plugin_parser/gram.y | 11 +- .../dolphin/plugin_parser/parse_relation.cpp | 10 + .../sql/test_create_index_if_not_exists.sql | 291 ++++++++- 4 files changed, 863 insertions(+), 5 deletions(-) diff --git a/contrib/dolphin/expected/test_create_index_if_not_exists.out b/contrib/dolphin/expected/test_create_index_if_not_exists.out index 0533c07aa..65ba336e2 100644 --- a/contrib/dolphin/expected/test_create_index_if_not_exists.out +++ b/contrib/dolphin/expected/test_create_index_if_not_exists.out @@ -8,4 +8,558 @@ create index if not exists test_index on test(a); NOTICE: relation "test_index" already exists, skipping create index if not exists test_index1 on test(a); drop table test; -drop schema create_index_if_not_exists; +-- normal index +create table db_1097149_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097149_tb values(1,1,1,'a'); +insert into db_1097149_tb values(1,2,2,'a'); +insert into db_1097149_tb values(2,2,2,'a'); +insert into db_1097149_tb values(2,2,3,'b'); +insert into db_1097149_tb values(2,3,3,'b'); +insert into db_1097149_tb values(3,3,4,'b'); +insert into db_1097149_tb values(3,3,4,'a'); +insert into db_1097149_tb values(3,4,5,'c'); +insert into db_1097149_tb values(4,4,5,'c'); +insert into db_1097149_tb values(4,null,1,'c'); +create index index_1097149_1 on db_1097149_tb (col1); +create index index_1097149_2 on db_1097149_tb (col2); +create index index_1097149_3 on db_1097149_tb (col3); +create index index_1097149_4 on db_1097149_tb (col4); +analyze db_1097149_tb; +select * from db_1097149_tb force key (index_1097149_2) where col2= 3; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | b + 3 | 3 | 4 | a +(3 rows) + +select * from db_1097149_tb force key (index_1097149_4) where col2= 3 and col4 = 'a'; + col1 | col2 | col3 | col4 +------+------+------+------ + 3 | 3 | 4 | a +(1 row) + +select * from db_1097149_tb FORCE key (index_1097149_1) where col2= 3; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | b + 3 | 3 | 4 | a +(3 rows) + +explain (costs off,verbose true )select * from db_1097149_tb force key (index_1097149_2) where col2= 3; + QUERY PLAN +------------------------------------------------------------------------------ + [Bypass] + Index Scan using index_1097149_2 on create_index_if_not_exists.db_1097149_tb + Output: col1, col2, col3, col4 + Index Cond: (db_1097149_tb.col2 = 3) +(4 rows) + +explain (costs off,verbose true )select * from db_1097149_tb force key (index_1097149_4) where col2= 3 and col4 = 'a'; + QUERY PLAN +------------------------------------------------------------------------------ + Index Scan using index_1097149_4 on create_index_if_not_exists.db_1097149_tb + Output: col1, col2, col3, col4 + Index Cond: ((db_1097149_tb.col4)::text = 'a'::text) + Filter: (db_1097149_tb.col2 = 3) +(4 rows) + +explain (costs off,verbose true ) select * from db_1097149_tb FORCE key (index_1097149_1) where col2= 3; + QUERY PLAN +------------------------------------------------------ + Seq Scan on create_index_if_not_exists.db_1097149_tb + Output: col1, col2, col3, col4 + Filter: (db_1097149_tb.col2 = 3) +(3 rows) + +--mix use force and use index error. +create table db_1097156_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097156_tb values(1,1,1,'a'); +insert into db_1097156_tb values(1,2,2,'a'); +insert into db_1097156_tb values(2,2,2,'a'); +insert into db_1097156_tb values(2,2,3,'b'); +insert into db_1097156_tb values(2,3,3,'b'); +insert into db_1097156_tb values(3,3,4,'b'); +insert into db_1097156_tb values(3,3,4,'a'); +insert into db_1097156_tb values(3,4,5,'c'); +insert into db_1097156_tb values(4,4,5,'c'); +insert into db_1097156_tb values(4,null,1,'c'); +create index index_1097156_1 on db_1097156_tb (col1); +create index index_1097156_2 on db_1097156_tb (col2); +create index index_1097156_3 on db_1097156_tb (col3); +create index index_1097156_4 on db_1097156_tb (col4); +analyze db_1097156_tb; +select * from db_1097156_tb use index (index_1097156_1) force index (index_1097156_2) where col2= 3; +ERROR: mixed use force index and use index +select * from db_1097156_tb force index (index_1097156_2) use index (index_1097156_1) where col2= 3; +ERROR: mixed use force index and use index +select * from db_1097156_tb use index (index_1097156_2) force index (index_1097156_2) where col2= 3; +ERROR: mixed use force index and use index +select * from db_1097156_tb use index (index_1097156_1) force index (index_1097156_2) use index (index_1097156_3) use index (index_1097156_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; +ERROR: mixed use force index and use index +-- use ,choose low cost plan in index or seqscan +create table db_1097155_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097155_tb values(1,1,1,'a'); +insert into db_1097155_tb values(1,2,2,'a'); +insert into db_1097155_tb values(2,2,2,'a'); +insert into db_1097155_tb values(2,2,3,'b'); +insert into db_1097155_tb values(2,3,3,'b'); +insert into db_1097155_tb values(3,3,4,'b'); +insert into db_1097155_tb values(3,3,4,'a'); +insert into db_1097155_tb values(3,4,5,'c'); +insert into db_1097155_tb values(4,4,5,'c'); +insert into db_1097155_tb values(4,null,1,'c'); +create index index_1097155_1 on db_1097155_tb (col1); +create index index_1097155_2 on db_1097155_tb (col2); +create index index_1097155_3 on db_1097155_tb (col3); +create index index_1097155_4 on db_1097155_tb (col4); +analyze db_1097155_tb; +select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2) where col2= 3 order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | a + 3 | 3 | 4 | b +(3 rows) + +select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_1) where col2= 3 order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | a + 3 | 3 | 4 | b +(3 rows) + +select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_2) where col2= 3 order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | a + 3 | 3 | 4 | b +(3 rows) + +select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2)use index (index_1097155_3) use index (index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b' order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b +(1 row) + +select * from db_1097155_tb use index (index_1097155_1, index_1097155_2) where col2= 3 order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | a + 3 | 3 | 4 | b +(3 rows) + +select * from db_1097155_tb use index (index_1097155_2, index_1097155_1) where col2= 3 order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | a + 3 | 3 | 4 | b +(3 rows) + +select * from db_1097155_tb use index (index_1097155_2, index_1097155_2) where col2= 3 order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b + 3 | 3 | 4 | a + 3 | 3 | 4 | b +(3 rows) + +select * from db_1097155_tb use index (index_1097155_1, index_1097155_2, index_1097155_3, index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b' order by 1,2,3,4; + col1 | col2 | col3 | col4 +------+------+------+------ + 2 | 3 | 3 | b +(1 row) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2) where col2= 3; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col2 = 3) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_1) where col2= 3; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col2 = 3) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_2) where col2= 3; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col2 = 3) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2)use index (index_1097155_3) use index (index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + QUERY PLAN +------------------------------------------------------------------------------------- + Seq Scan on db_1097155_tb + Filter: ((col2 = 3) AND (col3 = 3) AND (col1 = 2) AND ((col4)::text = 'b'::text)) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1, index_1097155_2) where col2= 3; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col2 = 3) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2, index_1097155_1) where col2= 3; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col2 = 3) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2, index_1097155_2) where col2= 3; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col2 = 3) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1, index_1097155_2, index_1097155_3, index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + QUERY PLAN +------------------------------------------------------------------------------------- + Seq Scan on db_1097155_tb + Filter: ((col2 = 3) AND (col3 = 3) AND (col1 = 2) AND ((col4)::text = 'b'::text)) +(2 rows) + +-- force ,choose index only, no seqscan +explain (costs off )select * from db_1097155_tb force index (index_1097155_1) use index (index_1097155_2) where col2= 3; +ERROR: mixed use force index and use index +explain (costs off )select * from db_1097155_tb force index (index_1097155_2) use index (index_1097155_1) where col2= 3; +ERROR: mixed use force index and use index +explain (costs off )select * from db_1097155_tb force index (index_1097155_2) use index (index_1097155_2) where col2= 3; +ERROR: mixed use force index and use index +explain (costs off )select * from db_1097155_tb force index (index_1097155_1) use index (index_1097155_2)use index (index_1097155_3) use index (index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; +ERROR: mixed use force index and use index +explain (costs off )select * from db_1097155_tb force index (index_1097155_1, index_1097155_2) where col2= 3; + QUERY PLAN +--------------------------------------------------- + [Bypass] + Index Scan using index_1097155_2 on db_1097155_tb + Index Cond: (col2 = 3) +(3 rows) + +explain (costs off )select * from db_1097155_tb force index (index_1097155_2, index_1097155_1) where col2= 3; + QUERY PLAN +--------------------------------------------------- + [Bypass] + Index Scan using index_1097155_2 on db_1097155_tb + Index Cond: (col2 = 3) +(3 rows) + +explain (costs off )select * from db_1097155_tb force index (index_1097155_2, index_1097155_2) where col2= 3; + QUERY PLAN +--------------------------------------------------- + [Bypass] + Index Scan using index_1097155_2 on db_1097155_tb + Index Cond: (col2 = 3) +(3 rows) + +explain (costs off )select * from db_1097155_tb force index (index_1097155_1, index_1097155_2, index_1097155_3, index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + QUERY PLAN +---------------------------------------------------------------------- + Index Scan using index_1097155_3 on db_1097155_tb + Index Cond: (col3 = 3) + Filter: ((col2 = 3) AND (col1 = 2) AND ((col4)::text = 'b'::text)) +(3 rows) + +-- test use index can choose best index and seqscan +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) where col1 > 1; + QUERY PLAN +--------------------------- + Seq Scan on db_1097155_tb + Filter: (col1 > 1) +(2 rows) + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) where col1 < 1; + QUERY PLAN +--------------------------------------------------- + [Bypass] + Index Scan using index_1097155_1 on db_1097155_tb + Index Cond: (col1 < 1) +(3 rows) + +-- index not exists +create table db_1097157_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097157_tb values(1,1,1,'a'); +insert into db_1097157_tb values(1,2,2,'a'); +insert into db_1097157_tb values(2,2,2,'a'); +insert into db_1097157_tb values(2,2,3,'b'); +insert into db_1097157_tb values(2,3,3,'b'); +insert into db_1097157_tb values(3,3,4,'b'); +insert into db_1097157_tb values(3,3,4,'a'); +insert into db_1097157_tb values(3,4,5,'c'); +insert into db_1097157_tb values(4,4,5,'c'); +insert into db_1097157_tb values(4,null,1,'c'); +create index index_1097157_1 on db_1097157_tb (col1); +create index index_1097157_2 on db_1097157_tb (col2); +create index index_1097157_3 on db_1097157_tb (col3); +create index index_1097157_4 on db_1097157_tb (col4); +analyze db_1097157_tb; +create table db_1097157_tb_1 as select * from db_1097157_tb; +create index index_1097157_5 on db_1097157_tb_1 (col1); +select * from db_1097157_tb use index (index_1097157_5) where col2= 3; +ERROR: index not exists in relation db_1097157_tb +select * from db_1097157_tb force index (index_1097157_5) where col2= 3; +ERROR: index not exists in relation db_1097157_tb +select * from db_1097157_tb use index (index_1097157_6) where col2= 3; +ERROR: index not exists in relation db_1097157_tb +select * from db_1097157_tb force index (index_1097157_6) where col2= 3; +ERROR: index not exists in relation db_1097157_tb +-- index_hint in group by +create table db_ID1097254_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_ID1097254_tb values(1,1,1,'a'); +insert into db_ID1097254_tb values(1,2,2,'a'); +insert into db_ID1097254_tb values(2,2,2,'a'); +insert into db_ID1097254_tb values(2,2,3,'b'); +insert into db_ID1097254_tb values(2,3,3,'b'); +insert into db_ID1097254_tb values(3,3,4,'b'); +insert into db_ID1097254_tb values(3,3,4,'a'); +insert into db_ID1097254_tb values(3,4,5,'c'); +insert into db_ID1097254_tb values(4,4,5,'c'); +insert into db_ID1097254_tb values(4,null,1,'c'); +create index "Index_ID1097254%%_1" on db_ID1097254_tb (col1); +create index INDEX_ID1097254_2 on db_ID1097254_tb (col2); +analyze db_ID1097254_tb; +select max(t1.col2)+1,t2.col2 from db_ID1097254_tb t1 force index ("Index_ID1097254%%_1") join db_ID1097254_tb t2 force index (INDEX_ID1097254_2) on t1.col3=t2.col3 where t1.col1>= 3 and t2.col2 <=4 group by 2,t1.col2 having t1.col2 in (select max(col2) from db_ID1097254_tb force index ("Index_ID1097254%%_1") where col1>= 3) order by 1,2 ; + ?column? | col2 +----------+------ + 5 | 4 +(1 row) + +explain (costs off )select max(t1.col2)+1,t2.col2 from db_ID1097254_tb t1 force index ("Index_ID1097254%%_1") join db_ID1097254_tb t2 force index (INDEX_ID1097254_2) on t1.col3=t2.col3 where t1.col1>= 3 and t2.col2 <=4 group by 2,t1.col2 having t1.col2 in (select max(col2) from db_ID1097254_tb force index ("Index_ID1097254%%_1") where col1>= 3) order by 1,2 ; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: ((max(t1.col2) + 1)), t2.col2 + -> HashAggregate + Group By Key: t2.col2, t1.col2 + Filter: (hashed SubPlan 1) + -> Hash Join + Hash Cond: (t2.col3 = t1.col3) + -> Index Scan using index_id1097254_2 on db_id1097254_tb t2 + Index Cond: (col2 <= 4) + -> Hash + -> Seq Scan on db_id1097254_tb t1 + Filter: (col1 >= 3) + SubPlan 1 + -> Aggregate + -> Index Scan using "Index_ID1097254%%_1" on db_id1097254_tb + Index Cond: (col1 >= 3) +(16 rows) + +--test first index exists ,second not exists +create table tb_ih_1 (a int, b int); +insert into tb_ih_1 values(1,2),(3,4),(5,6); +create index INDEX_tb1 on tb_ih_1 (a); +create index INDEX_tb2 on tb_ih_1 (a); +analyze tb_ih_1; +--can report +explain (costs off) select a from tb_ih_1 force key (INDEX_tb1,idex_t2); +ERROR: index not exists in relation tb_ih_1 +--can plan +explain (costs off) select a from tb_ih_1 force key (INDEX_tb1,INDEX_tb2); + QUERY PLAN +-------------------------------------------- + [Bypass] + Index Only Scan using index_tb2 on tb_ih_1 +(2 rows) + +--partition +drop table if exists startend_pt; +NOTICE: table "startend_pt" does not exist, skipping +CREATE TABLE startend_pt (c1 INT, c2 INT) +PARTITION BY RANGE (c2) ( + PARTITION p1 START(1) END(1000), + PARTITION p2 END(2000) +); +create index idx_startend on startend_pt(c2) local (partition idxp1,partition idxp2,partition idxp3); +insert into startend_pt values(2,2),(3,3),(4,4),(5,5); +insert into startend_pt values(202,202),(203,203),(204,204),(1999,1999); +analyze startend_pt; +explain(costs off) select c2 from startend_pt partition (p2) force index (idx_startend) where c2 > 1998; + QUERY PLAN +--------------------------------------------------------------------- + Partition Iterator + Iterations: 1 + -> Partitioned Index Only Scan using idx_startend on startend_pt + Index Cond: (c2 > 1998) + Selected Partitions: 3 +(5 rows) + +explain(costs off) select c2 from startend_pt partition for (200) force index (idx_startend) where c2 > 1998; + QUERY PLAN +--------------------------------------------------------------------- + Partition Iterator + Iterations: 1 + -> Partitioned Index Only Scan using idx_startend on startend_pt + Index Cond: (c2 > 1998) + Selected Partitions: 2 +(5 rows) + +explain(costs off) select c2 from startend_pt partition (p2) where c2 > 1998; + QUERY PLAN +------------------------------------------- + Partition Iterator + Iterations: 1 + -> Partitioned Seq Scan on startend_pt + Filter: (c2 > 1998) + Selected Partitions: 3 +(5 rows) + +drop table if exists list_list; +NOTICE: table "list_list" does not exist, skipping +CREATE TABLE list_list +( + month_code int NOT NULL , + dept_code int NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 values(100) + ( + SUBPARTITION pa1 values less than (100), + SUBPARTITION pa2 values less than (200) + ) +); +create index idx_list on list_list(dept_code) local ; +insert into list_list values('100', '2', '1', 1); +insert into list_list values('100', '1', '1', 1); +analyze list_list; +explain (costs off) select user_no from list_list subpartition (pa1) use index (idx_list) where user_no = 1; + QUERY PLAN +----------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 1 + -> Partitioned Seq Scan on list_list + Filter: ((user_no)::bigint = 1) + Selected Partitions: 1 + Selected Subpartitions: 1:1 +(6 rows) + +explain (costs off)select user_no from list_list subpartition for (100,4) use index (idx_list) where user_no = 1; + QUERY PLAN +----------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 1 + -> Partitioned Seq Scan on list_list + Filter: ((user_no)::bigint = 1) + Selected Partitions: 1 + Selected Subpartitions: 1:1 +(6 rows) + +-- not support in a database +create table db_ID1097168_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_ID1097168_tb values(1,1,1,'a'); +insert into db_ID1097168_tb values(1,2,2,'a'); +insert into db_ID1097168_tb values(2,2,2,'a'); +insert into db_ID1097168_tb values(2,2,3,'b'); +insert into db_ID1097168_tb values(2,3,3,'b'); +insert into db_ID1097168_tb values(3,3,4,'b'); +insert into db_ID1097168_tb values(3,3,4,'a'); +insert into db_ID1097168_tb values(3,4,5,'c'); +insert into db_ID1097168_tb values(4,4,5,'c'); +insert into db_ID1097168_tb values(4,null,1,'c'); +create index "Index_ID1097168%%_1" on db_ID1097168_tb (col1); +create index INDEX_ID1097168_2 on db_ID1097168_tb (col2); +create index index_ID1097168_3 on db_ID1097168_tb (col3); +analyze db_ID1097168_tb; +select max(col2)+1 from db_ID1097168_tb force index ("Index_ID1097168%%_1") where col2>= 3 ; + ?column? +---------- + 5 +(1 row) + +--primary test +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +create table t1 (a int primary key); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" +insert into t1 values(1); +insert into t1 values(2); +insert into t1 values(4); +insert into t1 values(5); +select a from t1 force index(primary) where a = 2 order by a; + a +--- + 2 +(1 row) + +create index inx_t1 on t1(a); +select a from t1 force index(primary) where a = 2 order by a; + a +--- + 2 +(1 row) + +select a from t1 force index("inx_t1") where a = 2 order by a; + a +--- + 2 +(1 row) + +explain (costs off) select a from t1 force index(primary) where a > 2 order by a; + QUERY PLAN +------------------------------------- + [Bypass] + Index Only Scan using t1_pkey on t1 + Index Cond: (a > 2) +(3 rows) + +explain (costs off) select a from t1 force index("inx_t1") where a > 2 order by a; + QUERY PLAN +------------------------------------ + [Bypass] + Index Only Scan using inx_t1 on t1 + Index Cond: (a > 2) +(3 rows) + +explain (costs off) select a from t1 use index(primary) where a > 2 order by a; + QUERY PLAN +------------------------- + Sort + Sort Key: a + -> Seq Scan on t1 + Filter: (a > 2) +(4 rows) + +explain (costs off) select a from t1 use index("inx_t1") where a > 2 order by a; + QUERY PLAN +------------------------- + Sort + Sort Key: a + -> Seq Scan on t1 + Filter: (a > 2) +(4 rows) + +reset current_schema; +drop schema create_index_if_not_exists cascade; +NOTICE: drop cascades to 11 other objects +DETAIL: drop cascades to table create_index_if_not_exists.db_1097149_tb +drop cascades to table create_index_if_not_exists.db_1097156_tb +drop cascades to table create_index_if_not_exists.db_1097155_tb +drop cascades to table create_index_if_not_exists.db_1097157_tb +drop cascades to table create_index_if_not_exists.db_1097157_tb_1 +drop cascades to table create_index_if_not_exists.db_id1097254_tb +drop cascades to table create_index_if_not_exists.tb_ih_1 +drop cascades to table create_index_if_not_exists.startend_pt +drop cascades to table create_index_if_not_exists.list_list +drop cascades to table create_index_if_not_exists.db_id1097168_tb +drop cascades to table create_index_if_not_exists.t1 diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 14e0edc70..b2fc9fb41 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -743,7 +743,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); name namedata_string fdwName cursor_name file_name index_name cluster_index_specification dolphin_index_name pgxcnode_name pgxcgroup_name resource_pool_name workload_group_name - application_name password_string hint_string + application_name password_string hint_string dolphin_force_index_name %type func_name func_name_opt_arg dolphin_func_name_opt_arg pkg_name handler_name qual_Op qual_all_Op subquery_Op dolphin_func_name opt_class opt_inline_handler opt_validator validator_clause opt_collation collate_option @@ -18986,11 +18986,11 @@ opt_index_name: /* b compatibility index hint part */ key_usage_list: - index_name + dolphin_force_index_name { $$ = list_make1(makeString($1)); } - | key_usage_list ',' index_name + | key_usage_list ',' dolphin_force_index_name { $$ = lappend($1,makeString($3)); } @@ -36590,6 +36590,11 @@ attr_name: ColLabel { $$ = $1; }; index_name: ColId { $$ = $1; }; +dolphin_force_index_name: + index_name { $$ = $1; } + | PRIMARY { $$ = downcase_str(pstrdup($1), false);} + ; + dolphin_index_name: DolphinColId { $$ = downcase_str($1->str, $1->is_quoted); }; file_name: SCONST { $$ = $1; }; diff --git a/contrib/dolphin/plugin_parser/parse_relation.cpp b/contrib/dolphin/plugin_parser/parse_relation.cpp index 75ee3ecf1..7a96bf06d 100644 --- a/contrib/dolphin/plugin_parser/parse_relation.cpp +++ b/contrib/dolphin/plugin_parser/parse_relation.cpp @@ -3077,7 +3077,17 @@ static IndexHintType preCheckIndexHints(ParseState* pstate, List* indexhints, Re } /*check index is in table*/ foreach (lc_index, idef->indexnames) { +#ifdef DOLPHIN + char* indexName = strVal(lfirst(lc_index)); + /* if index name is a primary, we use this relation's pk */ + if (strcmp(indexName, "primary") == 0) { + indexOid = RelationGetPrimaryKeyIndex(relation); + } else { + indexOid = get_relname_relid(indexName, relationNsOid); + } +#else indexOid = get_relname_relid(strVal(lfirst(lc_index)), relationNsOid); +#endif exist_indexs = false; if (OidIsValid(indexOid)) { if (list_member_oid(indexList, indexOid)) { diff --git a/contrib/dolphin/sql/test_create_index_if_not_exists.sql b/contrib/dolphin/sql/test_create_index_if_not_exists.sql index cabb6d835..1a7faf1ec 100644 --- a/contrib/dolphin/sql/test_create_index_if_not_exists.sql +++ b/contrib/dolphin/sql/test_create_index_if_not_exists.sql @@ -8,4 +8,293 @@ create index if not exists test_index on test(a); create index if not exists test_index1 on test(a); drop table test; -drop schema create_index_if_not_exists; + +-- normal index +create table db_1097149_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097149_tb values(1,1,1,'a'); +insert into db_1097149_tb values(1,2,2,'a'); +insert into db_1097149_tb values(2,2,2,'a'); +insert into db_1097149_tb values(2,2,3,'b'); +insert into db_1097149_tb values(2,3,3,'b'); +insert into db_1097149_tb values(3,3,4,'b'); +insert into db_1097149_tb values(3,3,4,'a'); +insert into db_1097149_tb values(3,4,5,'c'); +insert into db_1097149_tb values(4,4,5,'c'); +insert into db_1097149_tb values(4,null,1,'c'); + +create index index_1097149_1 on db_1097149_tb (col1); +create index index_1097149_2 on db_1097149_tb (col2); +create index index_1097149_3 on db_1097149_tb (col3); +create index index_1097149_4 on db_1097149_tb (col4); +analyze db_1097149_tb; + +select * from db_1097149_tb force key (index_1097149_2) where col2= 3; + +select * from db_1097149_tb force key (index_1097149_4) where col2= 3 and col4 = 'a'; + +select * from db_1097149_tb FORCE key (index_1097149_1) where col2= 3; + +explain (costs off,verbose true )select * from db_1097149_tb force key (index_1097149_2) where col2= 3; + +explain (costs off,verbose true )select * from db_1097149_tb force key (index_1097149_4) where col2= 3 and col4 = 'a'; + +explain (costs off,verbose true ) select * from db_1097149_tb FORCE key (index_1097149_1) where col2= 3; + + +--mix use force and use index error. + +create table db_1097156_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097156_tb values(1,1,1,'a'); +insert into db_1097156_tb values(1,2,2,'a'); +insert into db_1097156_tb values(2,2,2,'a'); +insert into db_1097156_tb values(2,2,3,'b'); +insert into db_1097156_tb values(2,3,3,'b'); +insert into db_1097156_tb values(3,3,4,'b'); +insert into db_1097156_tb values(3,3,4,'a'); +insert into db_1097156_tb values(3,4,5,'c'); +insert into db_1097156_tb values(4,4,5,'c'); +insert into db_1097156_tb values(4,null,1,'c'); + +create index index_1097156_1 on db_1097156_tb (col1); +create index index_1097156_2 on db_1097156_tb (col2); +create index index_1097156_3 on db_1097156_tb (col3); +create index index_1097156_4 on db_1097156_tb (col4); +analyze db_1097156_tb; + +select * from db_1097156_tb use index (index_1097156_1) force index (index_1097156_2) where col2= 3; + +select * from db_1097156_tb force index (index_1097156_2) use index (index_1097156_1) where col2= 3; + +select * from db_1097156_tb use index (index_1097156_2) force index (index_1097156_2) where col2= 3; + +select * from db_1097156_tb use index (index_1097156_1) force index (index_1097156_2) use index (index_1097156_3) use index (index_1097156_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + +-- use ,choose low cost plan in index or seqscan + +create table db_1097155_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097155_tb values(1,1,1,'a'); +insert into db_1097155_tb values(1,2,2,'a'); +insert into db_1097155_tb values(2,2,2,'a'); +insert into db_1097155_tb values(2,2,3,'b'); +insert into db_1097155_tb values(2,3,3,'b'); +insert into db_1097155_tb values(3,3,4,'b'); +insert into db_1097155_tb values(3,3,4,'a'); +insert into db_1097155_tb values(3,4,5,'c'); +insert into db_1097155_tb values(4,4,5,'c'); +insert into db_1097155_tb values(4,null,1,'c'); + +create index index_1097155_1 on db_1097155_tb (col1); +create index index_1097155_2 on db_1097155_tb (col2); +create index index_1097155_3 on db_1097155_tb (col3); +create index index_1097155_4 on db_1097155_tb (col4); +analyze db_1097155_tb; + +select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2) where col2= 3 order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_1) where col2= 3 order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_2) where col2= 3 order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2)use index (index_1097155_3) use index (index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b' order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_1, index_1097155_2) where col2= 3 order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_2, index_1097155_1) where col2= 3 order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_2, index_1097155_2) where col2= 3 order by 1,2,3,4; + +select * from db_1097155_tb use index (index_1097155_1, index_1097155_2, index_1097155_3, index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b' order by 1,2,3,4; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_1) where col2= 3; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2) use index (index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) use index (index_1097155_2)use index (index_1097155_3) use index (index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1, index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2, index_1097155_1) where col2= 3; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_2, index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1, index_1097155_2, index_1097155_3, index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + +-- force ,choose index only, no seqscan +explain (costs off )select * from db_1097155_tb force index (index_1097155_1) use index (index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_2) use index (index_1097155_1) where col2= 3; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_2) use index (index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_1) use index (index_1097155_2)use index (index_1097155_3) use index (index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_1, index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_2, index_1097155_1) where col2= 3; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_2, index_1097155_2) where col2= 3; + +explain (costs off )select * from db_1097155_tb force index (index_1097155_1, index_1097155_2, index_1097155_3, index_1097155_4) where col2= 3 and col1 = 2 and col3 = 3 and col4='b'; + +-- test use index can choose best index and seqscan +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; +insert into db_1097155_tb select * from db_1097155_tb; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) where col1 > 1; + +explain (costs off )select * from db_1097155_tb use index (index_1097155_1) where col1 < 1; + +-- index not exists +create table db_1097157_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_1097157_tb values(1,1,1,'a'); +insert into db_1097157_tb values(1,2,2,'a'); +insert into db_1097157_tb values(2,2,2,'a'); +insert into db_1097157_tb values(2,2,3,'b'); +insert into db_1097157_tb values(2,3,3,'b'); +insert into db_1097157_tb values(3,3,4,'b'); +insert into db_1097157_tb values(3,3,4,'a'); +insert into db_1097157_tb values(3,4,5,'c'); +insert into db_1097157_tb values(4,4,5,'c'); +insert into db_1097157_tb values(4,null,1,'c'); + +create index index_1097157_1 on db_1097157_tb (col1); +create index index_1097157_2 on db_1097157_tb (col2); +create index index_1097157_3 on db_1097157_tb (col3); +create index index_1097157_4 on db_1097157_tb (col4); +analyze db_1097157_tb; + +create table db_1097157_tb_1 as select * from db_1097157_tb; + +create index index_1097157_5 on db_1097157_tb_1 (col1); + +select * from db_1097157_tb use index (index_1097157_5) where col2= 3; + +select * from db_1097157_tb force index (index_1097157_5) where col2= 3; + +select * from db_1097157_tb use index (index_1097157_6) where col2= 3; + +select * from db_1097157_tb force index (index_1097157_6) where col2= 3; + +-- index_hint in group by + +create table db_ID1097254_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_ID1097254_tb values(1,1,1,'a'); +insert into db_ID1097254_tb values(1,2,2,'a'); +insert into db_ID1097254_tb values(2,2,2,'a'); +insert into db_ID1097254_tb values(2,2,3,'b'); +insert into db_ID1097254_tb values(2,3,3,'b'); +insert into db_ID1097254_tb values(3,3,4,'b'); +insert into db_ID1097254_tb values(3,3,4,'a'); +insert into db_ID1097254_tb values(3,4,5,'c'); +insert into db_ID1097254_tb values(4,4,5,'c'); +insert into db_ID1097254_tb values(4,null,1,'c'); + +create index "Index_ID1097254%%_1" on db_ID1097254_tb (col1); +create index INDEX_ID1097254_2 on db_ID1097254_tb (col2); + +analyze db_ID1097254_tb; + + +select max(t1.col2)+1,t2.col2 from db_ID1097254_tb t1 force index ("Index_ID1097254%%_1") join db_ID1097254_tb t2 force index (INDEX_ID1097254_2) on t1.col3=t2.col3 where t1.col1>= 3 and t2.col2 <=4 group by 2,t1.col2 having t1.col2 in (select max(col2) from db_ID1097254_tb force index ("Index_ID1097254%%_1") where col1>= 3) order by 1,2 ; + +explain (costs off )select max(t1.col2)+1,t2.col2 from db_ID1097254_tb t1 force index ("Index_ID1097254%%_1") join db_ID1097254_tb t2 force index (INDEX_ID1097254_2) on t1.col3=t2.col3 where t1.col1>= 3 and t2.col2 <=4 group by 2,t1.col2 having t1.col2 in (select max(col2) from db_ID1097254_tb force index ("Index_ID1097254%%_1") where col1>= 3) order by 1,2 ; + +--test first index exists ,second not exists + +create table tb_ih_1 (a int, b int); +insert into tb_ih_1 values(1,2),(3,4),(5,6); +create index INDEX_tb1 on tb_ih_1 (a); +create index INDEX_tb2 on tb_ih_1 (a); +analyze tb_ih_1; +--can report +explain (costs off) select a from tb_ih_1 force key (INDEX_tb1,idex_t2); +--can plan +explain (costs off) select a from tb_ih_1 force key (INDEX_tb1,INDEX_tb2); + +--partition +drop table if exists startend_pt; +CREATE TABLE startend_pt (c1 INT, c2 INT) +PARTITION BY RANGE (c2) ( + PARTITION p1 START(1) END(1000), + PARTITION p2 END(2000) +); + +create index idx_startend on startend_pt(c2) local (partition idxp1,partition idxp2,partition idxp3); +insert into startend_pt values(2,2),(3,3),(4,4),(5,5); +insert into startend_pt values(202,202),(203,203),(204,204),(1999,1999); +analyze startend_pt; + +explain(costs off) select c2 from startend_pt partition (p2) force index (idx_startend) where c2 > 1998; +explain(costs off) select c2 from startend_pt partition for (200) force index (idx_startend) where c2 > 1998; +explain(costs off) select c2 from startend_pt partition (p2) where c2 > 1998; + +drop table if exists list_list; +CREATE TABLE list_list +( + month_code int NOT NULL , + dept_code int NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 values(100) + ( + SUBPARTITION pa1 values less than (100), + SUBPARTITION pa2 values less than (200) + ) +); +create index idx_list on list_list(dept_code) local ; +insert into list_list values('100', '2', '1', 1); +insert into list_list values('100', '1', '1', 1); +analyze list_list; +explain (costs off) select user_no from list_list subpartition (pa1) use index (idx_list) where user_no = 1; +explain (costs off)select user_no from list_list subpartition for (100,4) use index (idx_list) where user_no = 1; + + +-- not support in a database + +create table db_ID1097168_tb (col1 int ,col2 int,col3 int,col4 varchar(10)); +insert into db_ID1097168_tb values(1,1,1,'a'); +insert into db_ID1097168_tb values(1,2,2,'a'); +insert into db_ID1097168_tb values(2,2,2,'a'); +insert into db_ID1097168_tb values(2,2,3,'b'); +insert into db_ID1097168_tb values(2,3,3,'b'); +insert into db_ID1097168_tb values(3,3,4,'b'); +insert into db_ID1097168_tb values(3,3,4,'a'); +insert into db_ID1097168_tb values(3,4,5,'c'); +insert into db_ID1097168_tb values(4,4,5,'c'); +insert into db_ID1097168_tb values(4,null,1,'c'); + +create index "Index_ID1097168%%_1" on db_ID1097168_tb (col1); +create index INDEX_ID1097168_2 on db_ID1097168_tb (col2); +create index index_ID1097168_3 on db_ID1097168_tb (col3); + +analyze db_ID1097168_tb; + + +select max(col2)+1 from db_ID1097168_tb force index ("Index_ID1097168%%_1") where col2>= 3 ; +--primary test +drop table if exists t1; +create table t1 (a int primary key); +insert into t1 values(1); +insert into t1 values(2); +insert into t1 values(4); +insert into t1 values(5); +select a from t1 force index(primary) where a = 2 order by a; +create index inx_t1 on t1(a); +select a from t1 force index(primary) where a = 2 order by a; +select a from t1 force index("inx_t1") where a = 2 order by a; +explain (costs off) select a from t1 force index(primary) where a > 2 order by a; +explain (costs off) select a from t1 force index("inx_t1") where a > 2 order by a; +explain (costs off) select a from t1 use index(primary) where a > 2 order by a; +explain (costs off) select a from t1 use index("inx_t1") where a > 2 order by a; + +reset current_schema; +drop schema create_index_if_not_exists cascade; -- Gitee From c9922a3c51052d441718252d3e86d7a6b9c8fdfd Mon Sep 17 00:00:00 2001 From: he-shaoyu Date: Tue, 14 Nov 2023 11:47:49 +0800 Subject: [PATCH 055/434] =?UTF-8?q?=E8=A1=A5=E5=85=85dayofmonth=E8=AF=AD?= =?UTF-8?q?=E4=B9=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/db_b_parser2.out | 44 +++++++++++++++++++ contrib/dolphin/expected/test_dayofweek.out | 6 +-- .../rollback_script/dolphin--3.0--2.0.sql | 3 ++ contrib/dolphin/sql/db_b_parser2.sql | 7 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 6 +++ 5 files changed, 62 insertions(+), 4 deletions(-) diff --git a/contrib/dolphin/expected/db_b_parser2.out b/contrib/dolphin/expected/db_b_parser2.out index 04cb83748..29c67960e 100644 --- a/contrib/dolphin/expected/db_b_parser2.out +++ b/contrib/dolphin/expected/db_b_parser2.out @@ -351,6 +351,50 @@ SELECT weekofyear(20221009); 40 (1 row) +SELECT dayofmonth(''); +WARNING: Incorrect datetime value: "" +CONTEXT: referenced column: dayofmonth + dayofmonth +------------ + +(1 row) + +SELECT day(''); +WARNING: Incorrect datetime value: "" +CONTEXT: referenced column: day + day +----- + +(1 row) + +SELECT dayofmonth(true); +WARNING: Incorrect datetime value: "1" +CONTEXT: referenced column: dayofmonth + dayofmonth +------------ + +(1 row) + +SELECT day(true); +WARNING: Incorrect datetime value: "1" +CONTEXT: referenced column: day + day +----- + +(1 row) + +SELECT dayofmonth(false); + dayofmonth +------------ + 0 +(1 row) + +SELECT day(false); + day +----- + 0 +(1 row) + select year(''); WARNING: invalid input syntax for type timestamp: "" CONTEXT: referenced column: year diff --git a/contrib/dolphin/expected/test_dayofweek.out b/contrib/dolphin/expected/test_dayofweek.out index f1b6088a8..b69d23075 100644 --- a/contrib/dolphin/expected/test_dayofweek.out +++ b/contrib/dolphin/expected/test_dayofweek.out @@ -204,9 +204,7 @@ select * from t_datetime; set dolphin.b_compatibility_mode = true; select dayofmonth('20:38:40'); -WARNING: invalid input syntax for type timestamp: "20:38:40" -LINE 1: select dayofmonth('20:38:40'); - ^ +WARNING: Incorrect datetime value: "20:38:40" CONTEXT: referenced column: dayofmonth dayofmonth ------------ @@ -220,7 +218,7 @@ select dayofmonth(time'20:38:40'); (1 row) select dayofmonth(203840); -WARNING: timestamp out of range +WARNING: Incorrect datetime value: "203840" CONTEXT: referenced column: dayofmonth dayofmonth ------------ diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index c907dd1fd..189776159 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -97,3 +97,6 @@ BEGIN RETURN 0; END; $$ + +DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(text); +DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(numeric); \ No newline at end of file diff --git a/contrib/dolphin/sql/db_b_parser2.sql b/contrib/dolphin/sql/db_b_parser2.sql index ba6ab89b1..45556c03f 100644 --- a/contrib/dolphin/sql/db_b_parser2.sql +++ b/contrib/dolphin/sql/db_b_parser2.sql @@ -75,6 +75,13 @@ SELECT quarter(20221009); SELECT weekday(20221009); SELECT weekofyear(20221009); +SELECT dayofmonth(''); +SELECT day(''); +SELECT dayofmonth(true); +SELECT day(true); +SELECT dayofmonth(false); +SELECT day(false); + select year(''); select year('2022'); select year('2022-06'); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index a14048c14..cd21f6c8d 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -166,3 +166,9 @@ DROP FUNCTION IF EXISTS pg_catalog.oct(bit); CREATE OR REPLACE FUNCTION pg_catalog.oct(t1 bit) RETURNS text AS $$ SELECT pg_catalog.conv(t1, 10, 8) $$ LANGUAGE SQL; + +-- Supplement function dayofmonth to make the function dayofmonth() the same as function day() +DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(text); +DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(numeric); +CREATE OR REPLACE FUNCTION pg_catalog.dayofmonth(text) RETURNS int4 AS $$ SELECT pg_catalog.day($1); $$ LANGUAGE SQL; +CREATE OR REPLACE FUNCTION pg_catalog.dayofmonth(numeric) RETURNS int4 AS $$ SELECT pg_catalog.day($1); $$ LANGUAGE SQL; -- Gitee From 98bee906d21a8d6240ee77ebb3a54693c2969292 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Wed, 15 Nov 2023 14:49:50 +0800 Subject: [PATCH 056/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dhook=E7=82=B9?= =?UTF-8?q?=E9=87=8D=E5=AE=9A=E4=B9=89=E5=AF=BC=E8=87=B4=E5=85=BC=E5=AE=B9?= =?UTF-8?q?B=E5=BA=93=E6=9F=A5=E8=AF=A2=E4=B8=8B=E4=B8=8D=E8=B5=B0?= =?UTF-8?q?=E5=A4=9A=E6=9C=BA=E5=B9=B6=E8=A1=8C=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_optimizer/plan/planner.cpp | 6 ------ contrib/whale/plugin_optimizer/plan/planner.cpp | 6 ------ 2 files changed, 12 deletions(-) diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index 9e14ceaba..7e4b4aa56 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -94,12 +94,6 @@ #include "catalog/gs_collation.h" #include "replication/libpqsw.h" -/* Hook for plugins to get control in planner() */ -THR_LOCAL ndp_pushdown_hook_type ndp_pushdown_hook = NULL; -#ifdef USE_SPQ -THR_LOCAL spq_planner_hook_type spq_planner_hook = NULL; -#endif - #ifndef MIN #define MIN(A, B) ((B) < (A) ? (B) : (A)) #endif diff --git a/contrib/whale/plugin_optimizer/plan/planner.cpp b/contrib/whale/plugin_optimizer/plan/planner.cpp index 09e188a86..1b6d7f9f1 100644 --- a/contrib/whale/plugin_optimizer/plan/planner.cpp +++ b/contrib/whale/plugin_optimizer/plan/planner.cpp @@ -92,12 +92,6 @@ #include "catalog/gs_collation.h" #include "replication/libpqsw.h" -/* Hook for plugins to get control in planner() */ -THR_LOCAL ndp_pushdown_hook_type ndp_pushdown_hook = NULL; -#ifdef USE_SPQ -THR_LOCAL spq_planner_hook_type spq_planner_hook = NULL; -#endif - #ifndef MIN #define MIN(A, B) ((B) < (A) ? (B) : (A)) #endif -- Gitee From 6d4f393b7458055198d15a876e471ef78aed8d2f Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 15 Nov 2023 14:47:17 +0800 Subject: [PATCH 057/434] Fix oidhash bug. --- contrib/dolphin/pg_builtin_proc.cpp | 30 ++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/contrib/dolphin/pg_builtin_proc.cpp b/contrib/dolphin/pg_builtin_proc.cpp index 266704981..9891a3757 100644 --- a/contrib/dolphin/pg_builtin_proc.cpp +++ b/contrib/dolphin/pg_builtin_proc.cpp @@ -34,6 +34,15 @@ #include "plugin_utils/fmgroids.h" #include "plugin_utils/pg_builtin_proc.h" +#ifdef DOLPHIN +/* + * we init b_oidhash and b_nameHash by tmp value, to avoid other backend access b_oidhash/b_nameHash + * before we have init all entry in it. + */ +struct HTAB* g_tmp_b_nameHash = NULL; +struct HTAB* g_tmp_b_oidHash = NULL; +#endif + static_assert(sizeof(true) == sizeof(char), "illegal bool size"); static_assert(sizeof(false) == sizeof(char), "illegal bool size"); @@ -91,17 +100,17 @@ static void InitHashTable(int size) info.entrysize = sizeof(HashEntryNameToFuncGroup); info.hash = string_hash; info.hcxt = g_instance.builtin_proc_context; - b_nameHash = hash_create("builtin proc name Lookup Table", size, &info, + g_tmp_b_nameHash = hash_create("builtin proc name Lookup Table", size, &info, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); info.keysize = sizeof(Oid); info.entrysize = sizeof(HashEntryOidToBuiltinFunc); info.hash = oid_hash; info.hcxt = g_instance.builtin_proc_context; - b_oidHash = hash_create("builtin proc Oid Lookup Table", size, &info, + g_tmp_b_oidHash = hash_create("builtin proc Oid Lookup Table", size, &info, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); } -static const FuncGroup* NameHashTableAccess(HASHACTION action, const char* name, const FuncGroup* group) +static const FuncGroup* NameHashTableAccess(HASHACTION action, const char* name, const FuncGroup* group, HTAB* hashp) { char temp_name[MAX_PROC_NAME_LEN] = {0}; int rc = strncpy_s((char*)temp_name, MAX_PROC_NAME_LEN, name, strlen(name)); @@ -111,7 +120,7 @@ static const FuncGroup* NameHashTableAccess(HASHACTION action, const char* name, Assert(name != NULL); - result = (HashEntryNameToFuncGroup *)hash_search(b_nameHash, &temp_name, action, &found); + result = (HashEntryNameToFuncGroup *)hash_search(hashp, &temp_name, action, &found); if (action == HASH_ENTER) { Assert(!found); result->group = group; @@ -127,13 +136,13 @@ static const FuncGroup* NameHashTableAccess(HASHACTION action, const char* name, } -static const Builtin_func* OidHashTableAccess(HASHACTION action, Oid oid, const Builtin_func* func) +static const Builtin_func* OidHashTableAccess(HASHACTION action, Oid oid, const Builtin_func* func, HTAB* hashp) { HashEntryOidToBuiltinFunc *result = NULL; bool found = false; Assert(oid > 0); - result = (HashEntryOidToBuiltinFunc *)hash_search(b_oidHash, &oid, action, &found); + result = (HashEntryOidToBuiltinFunc *)hash_search(hashp, &oid, action, &found); if (action == HASH_ENTER) { Assert(!found); result->func = func; @@ -165,11 +174,11 @@ void initBSQLBuiltinFuncs() for (int i = 0; i < b_nfuncgroups; i++) { const FuncGroup* fg = &b_func_groups[i]; CheckNameLength(fg->funcName); - NameHashTableAccess(HASH_ENTER, fg->funcName, fg); + NameHashTableAccess(HASH_ENTER, fg->funcName, fg, g_tmp_b_nameHash); for (int j = 0; j < fg->fnums; j++) { CheckNameLength(fg->funcs[j].funcName); - OidHashTableAccess(HASH_ENTER, fg->funcs[j].foid, &fg->funcs[j]); + OidHashTableAccess(HASH_ENTER, fg->funcs[j].foid, &fg->funcs[j], g_tmp_b_oidHash); g_sorted_funcs[nfunc++] = &fg->funcs[j]; } } @@ -180,4 +189,7 @@ void initBSQLBuiltinFuncs() "the number of functions in is mismatch with the declaration"))); } qsort(g_sorted_funcs, nBuiltinFuncs, sizeof(g_sorted_funcs[0]), cmp_func_by_oid); -} \ No newline at end of file + /* all have beed inited, assign value to real b_nameHash/b_oidHash */ + b_nameHash = g_tmp_b_nameHash; + b_oidHash = g_tmp_b_oidHash; +} -- Gitee From 9a80ef15d75f2a9458a7295ea3c2fab768805b87 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Wed, 15 Nov 2023 17:17:12 +0800 Subject: [PATCH 058/434] =?UTF-8?q?=E9=80=82=E9=85=8D=E6=9C=80=E6=96=B0mas?= =?UTF-8?q?ter1115=EF=BC=8C=E4=BF=AE=E6=94=B9=E6=9D=83=E9=99=90=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/CMakeLists.txt | 1 + contrib/timescaledb/src/CMakeLists.txt | 1 + contrib/timescaledb/src/import/planner.cpp | 2 +- contrib/timescaledb/src/loader/CMakeLists.txt | 1 + contrib/timescaledb/src/net/CMakeLists.txt | 1 + contrib/timescaledb/src/plan_add_hashagg.cpp | 16 +++++----- contrib/timescaledb/src/plan_partialize.cpp | 6 ++-- .../timescaledb/src/telemetry/CMakeLists.txt | 1 + contrib/timescaledb/src/tsdb.cpp | 2 +- contrib/timescaledb/src/tsdb_head.h | 29 ++----------------- contrib/timescaledb/test/src/CMakeLists.txt | 1 + .../timescaledb/test/src/bgw/CMakeLists.txt | 1 + .../test/src/loader/CMakeLists.txt | 1 + .../timescaledb/test/src/net/CMakeLists.txt | 1 + .../test/src/telemetry/CMakeLists.txt | 1 + contrib/timescaledb/tsl/CMakeLists.txt | 1 + contrib/timescaledb/tsl/src/CMakeLists.txt | 1 + .../tsl/src/bgw_policy/CMakeLists.txt | 1 + .../tsl/src/compression/CMakeLists.txt | 1 + .../tsl/src/continuous_aggs/CMakeLists.txt | 1 + .../tsl/src/continuous_aggs/create.cpp | 2 +- .../timescaledb/tsl/src/nodes/CMakeLists.txt | 1 + .../tsl/test/isolation/CMakeLists.txt | 1 + .../timescaledb/tsl/test/src/CMakeLists.txt | 1 + 24 files changed, 34 insertions(+), 41 deletions(-) diff --git a/contrib/timescaledb/CMakeLists.txt b/contrib/timescaledb/CMakeLists.txt index 5bfbe524a..b893837b4 100644 --- a/contrib/timescaledb/CMakeLists.txt +++ b/contrib/timescaledb/CMakeLists.txt @@ -385,5 +385,6 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) diff --git a/contrib/timescaledb/src/CMakeLists.txt b/contrib/timescaledb/src/CMakeLists.txt index 04dd8a914..832a2a530 100644 --- a/contrib/timescaledb/src/CMakeLists.txt +++ b/contrib/timescaledb/src/CMakeLists.txt @@ -141,5 +141,6 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) diff --git a/contrib/timescaledb/src/import/planner.cpp b/contrib/timescaledb/src/import/planner.cpp index 7b833faf2..a22df10a7 100644 --- a/contrib/timescaledb/src/import/planner.cpp +++ b/contrib/timescaledb/src/import/planner.cpp @@ -267,7 +267,7 @@ ts_make_partial_grouping_target(struct PlannerInfo *root, PathTarget *grouping_t memcpy(newaggref, aggref, sizeof(struct Aggref)); /* For now, assume serialization is required */ - mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL); + mark_partial_aggref(newaggref, AGGSTAGE_PARTIAL); lfirst(lc) = newaggref; } diff --git a/contrib/timescaledb/src/loader/CMakeLists.txt b/contrib/timescaledb/src/loader/CMakeLists.txt index 2ab11d7c1..6c2145398 100644 --- a/contrib/timescaledb/src/loader/CMakeLists.txt +++ b/contrib/timescaledb/src/loader/CMakeLists.txt @@ -42,4 +42,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) diff --git a/contrib/timescaledb/src/net/CMakeLists.txt b/contrib/timescaledb/src/net/CMakeLists.txt index 93e9fe6e3..41f38d4cf 100644 --- a/contrib/timescaledb/src/net/CMakeLists.txt +++ b/contrib/timescaledb/src/net/CMakeLists.txt @@ -16,4 +16,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/src/plan_add_hashagg.cpp b/contrib/timescaledb/src/plan_add_hashagg.cpp index c77005c71..1dd570830 100644 --- a/contrib/timescaledb/src/plan_add_hashagg.cpp +++ b/contrib/timescaledb/src/plan_add_hashagg.cpp @@ -52,15 +52,15 @@ plan_add_parallel_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo * /* partial phase */ get_agg_clause_costs(root, (Node *) partial_grouping_target->exprs, - AGGSPLIT_INITIAL_SERIAL, + AGGSTAGE_PARTIAL, &agg_partial_costs); /* final phase */ get_agg_clause_costs(root, (Node *) target->exprs, - AGGSPLIT_FINAL_DESERIAL, + AGGSTAGE_FINAL, &agg_final_costs); - get_agg_clause_costs(root, parse->havingQual, AGGSPLIT_FINAL_DESERIAL, &agg_final_costs); + get_agg_clause_costs(root, parse->havingQual, AGGSTAGE_FINAL, &agg_final_costs); } hashagg_table_size = ts_estimate_hashagg_tablesize(cheapest_partial_path, @@ -80,7 +80,7 @@ plan_add_parallel_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo * cheapest_partial_path, partial_grouping_target, AGG_HASHED, - AGGSPLIT_INITIAL_SERIAL, + AGGSTAGE_PARTIAL, parse->groupClause, NIL, &agg_partial_costs, @@ -105,7 +105,7 @@ plan_add_parallel_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo * partial_path, target, AGG_HASHED, - AGGSPLIT_FINAL_DESERIAL, + AGGSTAGE_FINAL, parse->groupClause, (List *) parse->havingQual, &agg_final_costs, @@ -131,8 +131,8 @@ ts_plan_add_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *output return; MemSet(&agg_costs, 0, sizeof(AggClauseCosts)); - get_agg_clause_costs(root, (Node *) root->processed_tlist, AGGSPLIT_SIMPLE, &agg_costs); - get_agg_clause_costs(root, parse->havingQual, AGGSPLIT_SIMPLE, &agg_costs); + get_agg_clause_costs(root, (Node *) root->processed_tlist, AGGSTAGE_NORMAL, &agg_costs); + get_agg_clause_costs(root, parse->havingQual, AGGSTAGE_NORMAL, &agg_costs); can_hash = (parse->groupClause != NIL && agg_costs.numOrderedAggs == 0 && grouping_is_hashable(parse->groupClause)); @@ -181,7 +181,7 @@ ts_plan_add_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *output cheapest_path, target, AGG_HASHED, - AGGSPLIT_SIMPLE, + AGGSTAGE_NORMAL, parse->groupClause, (List *) parse->havingQual, &agg_costs, diff --git a/contrib/timescaledb/src/plan_partialize.cpp b/contrib/timescaledb/src/plan_partialize.cpp index a63e5ffdf..4d2d3b717 100644 --- a/contrib/timescaledb/src/plan_partialize.cpp +++ b/contrib/timescaledb/src/plan_partialize.cpp @@ -73,7 +73,7 @@ check_for_partialize_function_call(Node *node, PartializeWalkerState *state) aggref->aggsplit =6 ; if (aggref->aggtranstype == INTERNALOID && - DO_AGGSPLIT_SERIALIZE(AGGSPLIT_INITIAL_SERIAL)) + DO_AGGSPLIT_SERIALIZE(AGGSTAGE_PARTIAL)) aggref->aggtype = BYTEAOID; else aggref->aggtype = aggref->aggtranstype; @@ -85,7 +85,7 @@ check_for_partialize_function_call(Node *node, PartializeWalkerState *state) * We check for non-partial aggs to ensure that if any of the aggregates * in a statement are partialized, all of them have to be. */ - else if (aggref->aggsplit != AGGSPLIT_INITIAL_SERIAL) + else if (aggref->aggsplit != AGGSTAGE_PARTIAL) state->found_non_partial_agg = true; } else if (IsA(node, FuncExpr) && ((FuncExpr *) node)->funcid == state->fnoid) @@ -133,7 +133,7 @@ partialize_agg_paths(RelOptInfo *rel) Path *path =(Path *) lfirst(lc); if (IsA(path, AggPath)) - castNode(AggPath, path)->aggsplit = AGGSPLIT_INITIAL_SERIAL; + castNode(AggPath, path)->aggsplit = AGGSTAGE_PARTIAL; } } diff --git a/contrib/timescaledb/src/telemetry/CMakeLists.txt b/contrib/timescaledb/src/telemetry/CMakeLists.txt index 08c08da71..c39055500 100644 --- a/contrib/timescaledb/src/telemetry/CMakeLists.txt +++ b/contrib/timescaledb/src/telemetry/CMakeLists.txt @@ -10,4 +10,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/src/tsdb.cpp b/contrib/timescaledb/src/tsdb.cpp index 1561b9607..915925071 100644 --- a/contrib/timescaledb/src/tsdb.cpp +++ b/contrib/timescaledb/src/tsdb.cpp @@ -2256,7 +2256,7 @@ mark_partial_aggref(Aggref *agg, AggSplit aggsplit) /* aggtranstype should be computed by this point */ Assert(OidIsValid(agg->aggtranstype)); /* ... but aggsplit should still be as the parser left it */ - Assert(agg->aggsplit == AGGSPLIT_SIMPLE); + Assert(agg->aggsplit == AGGSTAGE_NORMAL); /* Mark the Aggref with the intended partial-aggregation mode */ agg->aggsplit = aggsplit; diff --git a/contrib/timescaledb/src/tsdb_head.h b/contrib/timescaledb/src/tsdb_head.h index 24990a01a..255f2cab9 100644 --- a/contrib/timescaledb/src/tsdb_head.h +++ b/contrib/timescaledb/src/tsdb_head.h @@ -912,11 +912,7 @@ typedef struct ConstraintAwareAppendPath ExtensiblePath cpath; } ConstraintAwareAppendPath; -typedef struct Result -{ - Plan plan; - Node *resconstantqual; -} Result; + typedef enum VolatileFunctionStatus { @@ -931,15 +927,7 @@ typedef enum VolatileFunctionStatus #define AGGSPLITOP_SKIPFINAL 0x02 #define AGGSPLITOP_SERIALIZE 0x04 #define AGGSPLITOP_DESERIALIZE 0x08 -typedef enum AggSplit -{ - /* Basic, non-split aggregation: */ - AGGSPLIT_SIMPLE = 0, - /* Initial phase of partial aggregation, with serialization: */ - AGGSPLIT_INITIAL_SERIAL = AGGSPLITOP_SKIPFINAL | AGGSPLITOP_SERIALIZE, - /* Final phase of partial aggregation, with deserialization: */ - AGGSPLIT_FINAL_DESERIAL = AGGSPLITOP_COMBINE | AGGSPLITOP_DESERIALIZE -} AggSplit; + typedef struct AggPath { @@ -1158,20 +1146,7 @@ typedef enum WCOKind WCO_RLS_CONFLICT_CHECK /* RLS ON CONFLICT DO UPDATE USING policy */ } WCOKind; -typedef struct RangeTblFunction -{ - NodeTag type; - Node *funcexpr; /* expression tree for func call */ - int funccolcount; /* number of columns it contributes to RTE */ - /* These fields record the contents of a column definition list, if any: */ - List *funccolnames; /* column names (list of String) */ - List *funccoltypes; /* OID list of column type OIDs */ - List *funccoltypmods; /* integer list of column typmods */ - List *funccolcollations; /* OID list of column collation OIDs */ - /* This is set during planning for use by the executor: */ - Bitmapset *funcparams; /* PARAM_EXEC Param IDs affecting this func */ -} RangeTblFunction; typedef struct InferenceElem diff --git a/contrib/timescaledb/test/src/CMakeLists.txt b/contrib/timescaledb/test/src/CMakeLists.txt index 4fdab18a1..b09a5eee3 100644 --- a/contrib/timescaledb/test/src/CMakeLists.txt +++ b/contrib/timescaledb/test/src/CMakeLists.txt @@ -33,4 +33,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/bgw/CMakeLists.txt b/contrib/timescaledb/test/src/bgw/CMakeLists.txt index 82d0c743d..00d81167d 100644 --- a/contrib/timescaledb/test/src/bgw/CMakeLists.txt +++ b/contrib/timescaledb/test/src/bgw/CMakeLists.txt @@ -14,4 +14,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/loader/CMakeLists.txt b/contrib/timescaledb/test/src/loader/CMakeLists.txt index 66cf71745..8bcae9aef 100644 --- a/contrib/timescaledb/test/src/loader/CMakeLists.txt +++ b/contrib/timescaledb/test/src/loader/CMakeLists.txt @@ -39,4 +39,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/net/CMakeLists.txt b/contrib/timescaledb/test/src/net/CMakeLists.txt index d36b4cd45..04eca7d30 100644 --- a/contrib/timescaledb/test/src/net/CMakeLists.txt +++ b/contrib/timescaledb/test/src/net/CMakeLists.txt @@ -11,4 +11,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/test/src/telemetry/CMakeLists.txt b/contrib/timescaledb/test/src/telemetry/CMakeLists.txt index 596f9de70..a949f346b 100644 --- a/contrib/timescaledb/test/src/telemetry/CMakeLists.txt +++ b/contrib/timescaledb/test/src/telemetry/CMakeLists.txt @@ -10,4 +10,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/CMakeLists.txt b/contrib/timescaledb/tsl/CMakeLists.txt index c413f45fd..7a7a3e59d 100644 --- a/contrib/timescaledb/tsl/CMakeLists.txt +++ b/contrib/timescaledb/tsl/CMakeLists.txt @@ -5,4 +5,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/src/CMakeLists.txt b/contrib/timescaledb/tsl/src/CMakeLists.txt index d55a2e842..c51ab3c73 100644 --- a/contrib/timescaledb/tsl/src/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/CMakeLists.txt @@ -155,4 +155,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) diff --git a/contrib/timescaledb/tsl/src/bgw_policy/CMakeLists.txt b/contrib/timescaledb/tsl/src/bgw_policy/CMakeLists.txt index 05e64ebfb..464af86e3 100644 --- a/contrib/timescaledb/tsl/src/bgw_policy/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/bgw_policy/CMakeLists.txt @@ -11,4 +11,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/src/compression/CMakeLists.txt b/contrib/timescaledb/tsl/src/compression/CMakeLists.txt index a2cab972a..a9fe67b7c 100644 --- a/contrib/timescaledb/tsl/src/compression/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/compression/CMakeLists.txt @@ -15,4 +15,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/src/continuous_aggs/CMakeLists.txt b/contrib/timescaledb/tsl/src/continuous_aggs/CMakeLists.txt index b9e7ad935..c92eedcba 100644 --- a/contrib/timescaledb/tsl/src/continuous_aggs/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/continuous_aggs/CMakeLists.txt @@ -12,4 +12,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp b/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp index a0206bb46..8bbb03a29 100644 --- a/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp +++ b/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp @@ -966,7 +966,7 @@ get_finalize_aggref(Aggref *inp, Var *partial_state_var) aggref->aggstar = false; aggref->aggvariadic = false; aggref->aggkind = AGGKIND_NORMAL; - aggref->aggsplit = AGGSPLIT_SIMPLE; + aggref->aggsplit = AGGSTAGE_NORMAL; aggref->location = -1; /*unknown */ /* construct the arguments */ agggregate_signature = DatumGetCString(DirectFunctionCall1(regprocedureout, inp->aggfnoid)); diff --git a/contrib/timescaledb/tsl/src/nodes/CMakeLists.txt b/contrib/timescaledb/tsl/src/nodes/CMakeLists.txt index df0852825..f352c8546 100644 --- a/contrib/timescaledb/tsl/src/nodes/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/nodes/CMakeLists.txt @@ -6,4 +6,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt b/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt index 052585ccb..12516e55b 100644 --- a/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt +++ b/contrib/timescaledb/tsl/test/isolation/CMakeLists.txt @@ -6,4 +6,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file diff --git a/contrib/timescaledb/tsl/test/src/CMakeLists.txt b/contrib/timescaledb/tsl/test/src/CMakeLists.txt index e221d0adf..2ff37def7 100644 --- a/contrib/timescaledb/tsl/test/src/CMakeLists.txt +++ b/contrib/timescaledb/tsl/test/src/CMakeLists.txt @@ -20,4 +20,5 @@ set(PROJECT_OPENGS_DIR ${PROJECT_TRUNK_DIR} CACHE INTERNAL "") set(PROJECT_SRC_DIR ${PROJECT_OPENGS_DIR}/src CACHE INTERNAL "") set(Third_party_library $ENV{BINARYLIBS}/kernel/dependency/libobs/comm/include) add_definitions(-DPGXC) +add_definitions(-DUSE_SPQ) include_directories(${PROJECT_SRC_DIR}/include ${PROJECT_INCLUDE_DIR} ${Third_party_library}) \ No newline at end of file -- Gitee From 048e34eae5f835318cf17f316eae4d17f17e8f49 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 15 Nov 2023 17:41:23 +0800 Subject: [PATCH 059/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91fix?= =?UTF-8?q?up=20issue=20I8EI9U:openguass=E7=9A=84time=5Fto=5Fsec=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E9=83=A8=E5=88=86=E8=BF=94=E5=9B=9E=E4=B8=8Emysql?= =?UTF-8?q?=E4=BE=A7=E4=B8=8D=E4=B8=80=E8=87=B4.=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=B8=BB=E8=A6=81?= =?UTF-8?q?=E6=98=AF=E5=AE=9E=E7=8E=B0=E4=B8=80=E4=B8=AAint64=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E5=92=8Cnumeric=E5=8F=82=E6=95=B0=E7=9A=84time=5Fto?= =?UTF-8?q?=5Fsec=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91?= =?UTF-8?q?:=20=E5=85=B6=E5=AE=9E=E5=9C=A8mysql=E4=B8=AD=EF=BC=8Ctime=5Fto?= =?UTF-8?q?=5Fsec(20220101010101)=E8=B5=B0=E7=9A=84=E6=98=AFget=5Fdate=5Ff?= =?UTF-8?q?rom=5Fint=E6=B5=81=E7=A8=8B=EF=BC=8C=E4=BD=86=E6=98=AFtime=5Fto?= =?UTF-8?q?=5Fsec('20220101010101')=E8=B5=B0=E7=9A=84=E6=98=AFget=5Ftime?= =?UTF-8?q?=5Ffrom=5Fstring=E6=B5=81=E7=A8=8B=EF=BC=8C=E4=B8=A4=E4=B8=AA?= =?UTF-8?q?=E6=B5=81=E7=A8=8B=E5=AF=B9=E5=BC=82=E5=B8=B8=E5=80=BC=E7=9A=84?= =?UTF-8?q?=E6=A0=A1=E9=AA=8C=E4=B8=8D=E4=B8=80=E6=A0=B7=EF=BC=8Cget=5Fdat?= =?UTF-8?q?e=5Ffrom=5Fint=E4=BC=9A=E6=A0=A1=E9=AA=8C=E5=B9=B4=E6=9C=88?= =?UTF-8?q?=E6=97=A5=E7=9A=84=E8=8C=83=E5=9B=B4=EF=BC=8C=E4=BD=86=E6=98=AF?= =?UTF-8?q?get=5Ftime=5Ffrom=5Fstring=E4=B8=8D=E4=BC=9A=EF=BC=8C=E4=BD=86?= =?UTF-8?q?=E6=98=AFopenguass=E5=BD=93=E5=89=8D=E9=83=BD=E6=98=AF=E8=B5=B0?= =?UTF-8?q?get=5Ftime=5Ffrom=5Fstring=E4=B8=8A=E5=8D=88=E6=B5=81=E7=A8=8B?= =?UTF-8?q?=EF=BC=8C=E6=89=80=E4=BB=A5int=E5=85=A5=E5=8F=82=E7=9A=84?= =?UTF-8?q?=E6=97=B6=E5=80=99=EF=BC=8C=E8=A1=A8=E7=8E=B0=E5=92=8Cmysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E3=80=82=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E4=B8=80=E4=B8=AAint=E5=85=A5=E5=8F=82=E7=9A=84time=5Fto=5Fsec?= =?UTF-8?q?=EF=BC=8C=E5=9C=A8int=E5=85=A5=E5=8F=82=E7=9A=84time=5Fto=5Fsec?= =?UTF-8?q?=E6=A0=A1=E9=AA=8Cdate=E7=9A=84=E8=8C=83=E5=9B=B4=EF=BC=8C?= =?UTF-8?q?=E5=A6=82mysql=E6=89=80=E7=A4=BA=EF=BC=9A=20=E3=80=90=E5=85=B3?= =?UTF-8?q?=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e?= =?UTF-8?q?.gitee.com/opengaussorg/dashboard=3Fissue=3DI8EI9U=20=E3=80=90?= =?UTF-8?q?=E5=BC=80=E5=8F=91=E8=87=AA=E9=AA=8C=E6=8A=A5=E5=91=8A=E3=80=91?= =?UTF-8?q?:=20=E8=AF=B7=E9=99=84=E4=B8=8A=E8=87=AA=E9=AA=8C=E7=BB=93?= =?UTF-8?q?=E6=9E=9C(=E5=86=85=E5=AE=B9=E6=88=96=E8=80=85=E6=88=AA?= =?UTF-8?q?=E5=9B=BE)=20=E6=98=AF=E5=90=A6=E5=8F=AF=E4=BB=A5=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0fastcheck=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B=EF=BC=8C?= =?UTF-8?q?=E5=A6=82=E6=98=AF=EF=BC=8C=E8=AF=B7=E8=A1=A5=E5=85=85fastcheck?= =?UTF-8?q?=E7=94=A8=E4=BE=8B=20->=20=E6=98=AF=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99=E4=BF=AE=E6=94=B9=EF=BC=8C?= =?UTF-8?q?=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8docs=E4=BB=93=E5=BA=93?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20=20=20=20->=20=E4=B8=8D?= =?UTF-8?q?=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5=E5=BF=97=E6=8C=81?= =?UTF-8?q?=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE=E6=94=B9=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC=E5=BC=8F)=20=20->?= =?UTF-8?q?=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9=E7=AD=89=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6=E5=8F=91=E5=9C=BA?= =?UTF-8?q?=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC=E5=AE=B9/=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1=E5=93=8D=20=20=20?= =?UTF-8?q?->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/db_b_date_time_functions3.out | 949 ++++++++++++++++++ contrib/dolphin/include/plugin_utils/date.h | 17 +- .../dolphin/include/plugin_utils/datetime.h | 1 + contrib/dolphin/plugin_utils/adt/date.cpp | 218 +++- .../rollback_script/dolphin--3.0--2.0.sql | 5 + .../dolphin/sql/db_b_date_time_functions3.sql | 245 +++++ .../upgrade_script/dolphin--2.0--3.0.sql | 6 + 7 files changed, 1430 insertions(+), 11 deletions(-) diff --git a/contrib/dolphin/expected/db_b_date_time_functions3.out b/contrib/dolphin/expected/db_b_date_time_functions3.out index fa570dfbc..2c5efbdc3 100644 --- a/contrib/dolphin/expected/db_b_date_time_functions3.out +++ b/contrib/dolphin/expected/db_b_date_time_functions3.out @@ -1042,5 +1042,954 @@ select timediff('2022-01-01',date'2022-01-02'); -24:00:00 (1 row) +-- 测试time_to_sec异常边界 +select time_to_sec(8385960); +WARNING: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(8385959); + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(-8385960); +WARNING: Truncated incorrect time value: "-8385960" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-8385959); + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(10000101000000); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec(10000100000000); +WARNING: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(100000101000000); +WARNING: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(99999999999999); +WARNING: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(99990100000000); +WARNING: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(20220101010101); + time_to_sec +------------- + 3661 +(1 row) + +select time_to_sec(99991231235959); + time_to_sec +------------- + 86399 +(1 row) + +select time_to_sec(99991231240000); +WARNING: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(10000101000001); + time_to_sec +------------- + 1 +(1 row) + +select time_to_sec(10000101000059); + time_to_sec +------------- + 59 +(1 row) + +select time_to_sec(10000101000060); +WARNING: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(0); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec(100000000000000); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec('8385960'); +WARNING: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec('8385959'); + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec('-8385960'); +WARNING: Truncated incorrect time value: "-8385960" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec('-8385959'); + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec('10000101000000'); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec('10000100000000'); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec('100000101000000'); +WARNING: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + 36000 +(1 row) + +select time_to_sec('99999999999999'); +WARNING: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec('99990100000000'); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec('20220101010101'); + time_to_sec +------------- + 3661 +(1 row) + +select time_to_sec('99991231235959'); + time_to_sec +------------- + 86399 +(1 row) + +select time_to_sec('99991231240000'); +WARNING: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec('10000101000001'); + time_to_sec +------------- + 1 +(1 row) + +select time_to_sec('10000101000059'); + time_to_sec +------------- + 59 +(1 row) + +select time_to_sec('10000101000060'); +WARNING: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec('0'); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec('100000000000000'); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec(8385960.11); +WARNING: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(8385959.11); + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(-8385960.11); +WARNING: Truncated incorrect time value: "-8385960.11" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-8385959.11); + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(10000101000000.11); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec(10000100000000.11); +WARNING: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(100000101000000.11); +WARNING: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(99999999999999.11); +WARNING: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(99990100000000.11); +WARNING: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(20220101010101.11); + time_to_sec +------------- + 3661 +(1 row) + +select time_to_sec(99991231235959.11); + time_to_sec +------------- + 86399 +(1 row) + +select time_to_sec(99991231240000.11); +WARNING: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(10000101000001.11); + time_to_sec +------------- + 1 +(1 row) + +select time_to_sec(10000101000059.11); + time_to_sec +------------- + 59 +(1 row) + +select time_to_sec(10000101000060.11); +WARNING: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(0.11); + time_to_sec +------------- + 0 +(1 row) + +select time_to_sec(100000000000000.11); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(10000101000001.99999); + time_to_sec +------------- + 1 +(1 row) + +select time_to_sec(10000101000001.999999); + time_to_sec +------------- + 1 +(1 row) + +select time_to_sec(10000101000001.9999994); + time_to_sec +------------- + 1 +(1 row) + +select time_to_sec(10000101000001.9999995); + time_to_sec +------------- + 2 +(1 row) + +select time_to_sec(10000101000001.999999999); + time_to_sec +------------- + 2 +(1 row) + +select time_to_sec(8385959.99999); + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(8385959.999999); + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(8385959.9999994); + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(8385959.9999995); +WARNING: Truncated incorrect time value: "8385959.9999995" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(8385959.999999999); +WARNING: Truncated incorrect time value: "8385959.999999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + 3020399 +(1 row) + +select time_to_sec(-10000101000001.99999); +WARNING: Truncated incorrect time value: "-10000101000001.99999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-10000101000001.999999); +WARNING: Truncated incorrect time value: "-10000101000001.999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-10000101000001.999994); +WARNING: Truncated incorrect time value: "-10000101000001.999994" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-10000101000001.999995); +WARNING: Truncated incorrect time value: "-10000101000001.999995" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-10000101000001.999999999); +WARNING: Truncated incorrect time value: "-10000101000001.999999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +select time_to_sec(-8385959.99999); + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(-8385959.999999); + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(-8385959.9999994); + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(-8385959.9999995); +WARNING: Truncated incorrect time value: "-8385959.9999995" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(-8385959.999999999); +WARNING: Truncated incorrect time value: "-8385959.999999999" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + -3020399 +(1 row) + +select time_to_sec(-10000101000001); +WARNING: Truncated incorrect time value: "-10000101000001" +CONTEXT: referenced column: time_to_sec + time_to_sec +------------- + +(1 row) + +-- ä¸¥æ ¼æ¨¡å¼ insert +create table test2(funcname text, result text); +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +ERROR: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +ERROR: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +ERROR: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +ERROR: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''8385960'')', time_to_sec('8385960')); +ERROR: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''8385959'')', time_to_sec('8385959')); +insert into test2 values ('time_to_sec(''-8385960'')', time_to_sec('-8385960')); +ERROR: Truncated incorrect time value: "-8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''-8385959'')', time_to_sec('-8385959')); +insert into test2 values ('time_to_sec(''10000101000000'')', time_to_sec('10000101000000')); +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''100000101000000'')', time_to_sec('100000101000000')); +ERROR: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''99999999999999'')', time_to_sec('99999999999999')); +ERROR: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''20220101010101'')', time_to_sec('20220101010101')); +insert into test2 values ('time_to_sec(''99991231235959'')', time_to_sec('99991231235959')); +insert into test2 values ('time_to_sec(''99991231240000'')', time_to_sec('99991231240000')); +ERROR: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''10000101000001'')', time_to_sec('10000101000001')); +insert into test2 values ('time_to_sec(''10000101000059'')', time_to_sec('10000101000059')); +insert into test2 values ('time_to_sec(''10000101000060'')', time_to_sec('10000101000060')); +ERROR: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''0'')', time_to_sec('0')); +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +ERROR: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385960)', time_to_sec(8385960)); +ERROR: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385959)', time_to_sec(8385959)); +insert into test2 values ('time_to_sec(-8385960)', time_to_sec(-8385960)); +ERROR: Truncated incorrect time value: "-8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(-8385959)', time_to_sec(-8385959)); +insert into test2 values ('time_to_sec(10000101000000)', time_to_sec(10000101000000)); +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +ERROR: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(100000101000000)', time_to_sec(100000101000000)); +ERROR: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99999999999999)', time_to_sec(99999999999999)); +ERROR: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +ERROR: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(20220101010101)', time_to_sec(20220101010101)); +insert into test2 values ('time_to_sec(99991231235959)', time_to_sec(99991231235959)); +insert into test2 values ('time_to_sec(99991231240000)', time_to_sec(99991231240000)); +ERROR: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(10000101000001)', time_to_sec(10000101000001)); +insert into test2 values ('time_to_sec(10000101000059)', time_to_sec(10000101000059)); +insert into test2 values ('time_to_sec(10000101000060)', time_to_sec(10000101000060)); +ERROR: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(0)', time_to_sec(0)); +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +ERROR: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385960.11)', time_to_sec(8385960.11)); +ERROR: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385959.11)', time_to_sec(8385959.11)); +insert into test2 values ('time_to_sec(-8385960.11)', time_to_sec(-8385960.11)); +ERROR: Truncated incorrect time value: "-8385960.11" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(-8385959.11)', time_to_sec(-8385959.11)); +insert into test2 values ('time_to_sec(10000101000000.11)', time_to_sec(10000101000000.11)); +insert into test2 values ('time_to_sec(10000100000000.11)', time_to_sec(10000100000000.11)); +ERROR: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(100000101000000.11)', time_to_sec(100000101000000.11)); +ERROR: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99999999999999.11)', time_to_sec(99999999999999.11)); +ERROR: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99990100000000.11)', time_to_sec(99990100000000.11)); +ERROR: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(20220101010101.11)', time_to_sec(20220101010101.11)); +insert into test2 values ('time_to_sec(99991231235959.11)', time_to_sec(99991231235959.11)); +insert into test2 values ('time_to_sec(99991231240000.11)', time_to_sec(99991231240000.11)); +ERROR: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(10000101000001.11)', time_to_sec(10000101000001.11)); +insert into test2 values ('time_to_sec(10000101000059.11)', time_to_sec(10000101000059.11)); +insert into test2 values ('time_to_sec(10000101000060.11)', time_to_sec(10000101000060.11)); +ERROR: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(0.11)', time_to_sec(0.11)); +insert into test2 values ('time_to_sec(100000000000000.11)', time_to_sec(100000000000000.11)); +ERROR: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(10000101000001.99999)', time_to_sec(10000101000001.99999)); +insert into test2 values('time_to_sec(10000101000001.999999)', time_to_sec(10000101000001.999999)); +insert into test2 values('time_to_sec(10000101000001.9999994)', time_to_sec(10000101000001.9999994)); +insert into test2 values('time_to_sec(10000101000001.9999995)', time_to_sec(10000101000001.9999995)); +insert into test2 values('time_to_sec(10000101000001.999999999)', time_to_sec(10000101000001.999999999)); +insert into test2 values('time_to_sec(8385959.99999)', time_to_sec(8385959.99999)); +insert into test2 values('time_to_sec(8385959.999999)', time_to_sec(8385959.999999)); +insert into test2 values('time_to_sec(8385959.9999994)', time_to_sec(8385959.9999994)); +insert into test2 values('time_to_sec(8385959.9999995)', time_to_sec(8385959.9999995)); +ERROR: Truncated incorrect time value: "8385959.9999995" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(8385959.999999999)', time_to_sec(8385959.999999999)); +ERROR: Truncated incorrect time value: "8385959.999999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.99999)', time_to_sec(-10000101000001.99999)); +ERROR: Truncated incorrect time value: "-10000101000001.99999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999999)', time_to_sec(-10000101000001.999999)); +ERROR: Truncated incorrect time value: "-10000101000001.999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999994)', time_to_sec(-10000101000001.999994)); +ERROR: Truncated incorrect time value: "-10000101000001.999994" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999995)', time_to_sec(-10000101000001.999995)); +ERROR: Truncated incorrect time value: "-10000101000001.999995" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999999999)', time_to_sec(-10000101000001.999999999)); +ERROR: Truncated incorrect time value: "-10000101000001.999999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-8385959.99999)', time_to_sec(-8385959.99999)); +insert into test2 values('time_to_sec(-8385959.999999)', time_to_sec(-8385959.999999)); +insert into test2 values('time_to_sec(-8385959.9999994)', time_to_sec(-8385959.9999994)); +insert into test2 values('time_to_sec(-8385959.9999995)', time_to_sec(-8385959.9999995)); +ERROR: Truncated incorrect time value: "-8385959.9999995" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-8385959.999999999)', time_to_sec(-8385959.999999999)); +ERROR: Truncated incorrect time value: "-8385959.999999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001)', time_to_sec(-10000101000001)); +ERROR: Truncated incorrect time value: "-10000101000001" +CONTEXT: referenced column: result +select * from test2 order by funcname; + funcname | result +---------------------------------------+---------- + time_to_sec('-8385959') | -3020399 + time_to_sec('0') | 0 + time_to_sec('10000100000000') | 0 + time_to_sec('10000100000000') | 0 + time_to_sec('10000101000000') | 0 + time_to_sec('10000101000001') | 1 + time_to_sec('10000101000059') | 59 + time_to_sec('20220101010101') | 3661 + time_to_sec('8385959') | 3020399 + time_to_sec('99990100000000') | 0 + time_to_sec('99990100000000') | 0 + time_to_sec('99991231235959') | 86399 + time_to_sec(-8385959) | -3020399 + time_to_sec(-8385959.11) | -3020399 + time_to_sec(-8385959.99999) | -3020399 + time_to_sec(-8385959.999999) | -3020399 + time_to_sec(-8385959.9999994) | -3020399 + time_to_sec(0) | 0 + time_to_sec(0.11) | 0 + time_to_sec(10000101000000) | 0 + time_to_sec(10000101000000.11) | 0 + time_to_sec(10000101000001) | 1 + time_to_sec(10000101000001.11) | 1 + time_to_sec(10000101000001.99999) | 1 + time_to_sec(10000101000001.999999) | 1 + time_to_sec(10000101000001.9999994) | 1 + time_to_sec(10000101000001.9999995) | 2 + time_to_sec(10000101000001.999999999) | 2 + time_to_sec(10000101000059) | 59 + time_to_sec(10000101000059.11) | 59 + time_to_sec(20220101010101) | 3661 + time_to_sec(20220101010101.11) | 3661 + time_to_sec(8385959) | 3020399 + time_to_sec(8385959.11) | 3020399 + time_to_sec(8385959.99999) | 3020399 + time_to_sec(8385959.999999) | 3020399 + time_to_sec(8385959.9999994) | 3020399 + time_to_sec(99991231235959) | 86399 + time_to_sec(99991231235959.11) | 86399 +(39 rows) + +-- éžä¸¥æ ¼æ¨¡å¼ insert +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +delete from test2 where 1=1; +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +WARNING: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +WARNING: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''8385960'')', time_to_sec('8385960')); +WARNING: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''8385959'')', time_to_sec('8385959')); +insert into test2 values ('time_to_sec(''-8385960'')', time_to_sec('-8385960')); +WARNING: Truncated incorrect time value: "-8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''-8385959'')', time_to_sec('-8385959')); +insert into test2 values ('time_to_sec(''10000101000000'')', time_to_sec('10000101000000')); +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''100000101000000'')', time_to_sec('100000101000000')); +WARNING: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''99999999999999'')', time_to_sec('99999999999999')); +WARNING: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''20220101010101'')', time_to_sec('20220101010101')); +insert into test2 values ('time_to_sec(''99991231235959'')', time_to_sec('99991231235959')); +insert into test2 values ('time_to_sec(''99991231240000'')', time_to_sec('99991231240000')); +WARNING: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''10000101000001'')', time_to_sec('10000101000001')); +insert into test2 values ('time_to_sec(''10000101000059'')', time_to_sec('10000101000059')); +insert into test2 values ('time_to_sec(''10000101000060'')', time_to_sec('10000101000060')); +WARNING: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(''0'')', time_to_sec('0')); +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385960)', time_to_sec(8385960)); +WARNING: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385959)', time_to_sec(8385959)); +insert into test2 values ('time_to_sec(-8385960)', time_to_sec(-8385960)); +WARNING: Truncated incorrect time value: "-8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(-8385959)', time_to_sec(-8385959)); +insert into test2 values ('time_to_sec(10000101000000)', time_to_sec(10000101000000)); +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +WARNING: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(100000101000000)', time_to_sec(100000101000000)); +WARNING: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99999999999999)', time_to_sec(99999999999999)); +WARNING: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +WARNING: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(20220101010101)', time_to_sec(20220101010101)); +insert into test2 values ('time_to_sec(99991231235959)', time_to_sec(99991231235959)); +insert into test2 values ('time_to_sec(99991231240000)', time_to_sec(99991231240000)); +WARNING: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(10000101000001)', time_to_sec(10000101000001)); +insert into test2 values ('time_to_sec(10000101000059)', time_to_sec(10000101000059)); +insert into test2 values ('time_to_sec(10000101000060)', time_to_sec(10000101000060)); +WARNING: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(0)', time_to_sec(0)); +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385960.11)', time_to_sec(8385960.11)); +WARNING: Truncated incorrect time value: "8385960" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(8385959.11)', time_to_sec(8385959.11)); +insert into test2 values ('time_to_sec(-8385960.11)', time_to_sec(-8385960.11)); +WARNING: Truncated incorrect time value: "-8385960.11" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(-8385959.11)', time_to_sec(-8385959.11)); +insert into test2 values ('time_to_sec(10000101000000.11)', time_to_sec(10000101000000.11)); +insert into test2 values ('time_to_sec(10000100000000.11)', time_to_sec(10000100000000.11)); +WARNING: Truncated incorrect time value: "10000100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(100000101000000.11)', time_to_sec(100000101000000.11)); +WARNING: Truncated incorrect time value: "100000101000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99999999999999.11)', time_to_sec(99999999999999.11)); +WARNING: Truncated incorrect time value: "99999999999999" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(99990100000000.11)', time_to_sec(99990100000000.11)); +WARNING: Truncated incorrect time value: "99990100000000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(20220101010101.11)', time_to_sec(20220101010101.11)); +insert into test2 values ('time_to_sec(99991231235959.11)', time_to_sec(99991231235959.11)); +insert into test2 values ('time_to_sec(99991231240000.11)', time_to_sec(99991231240000.11)); +WARNING: Truncated incorrect time value: "99991231240000" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(10000101000001.11)', time_to_sec(10000101000001.11)); +insert into test2 values ('time_to_sec(10000101000059.11)', time_to_sec(10000101000059.11)); +insert into test2 values ('time_to_sec(10000101000060.11)', time_to_sec(10000101000060.11)); +WARNING: Truncated incorrect time value: "10000101000060" +CONTEXT: referenced column: result +insert into test2 values ('time_to_sec(0.11)', time_to_sec(0.11)); +insert into test2 values ('time_to_sec(100000000000000.11)', time_to_sec(100000000000000.11)); +WARNING: Truncated incorrect time value: "100000000000000" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(10000101000001.99999)', time_to_sec(10000101000001.99999)); +insert into test2 values('time_to_sec(10000101000001.999999)', time_to_sec(10000101000001.999999)); +insert into test2 values('time_to_sec(10000101000001.9999994)', time_to_sec(10000101000001.9999994)); +insert into test2 values('time_to_sec(10000101000001.9999995)', time_to_sec(10000101000001.9999995)); +insert into test2 values('time_to_sec(10000101000001.999999999)', time_to_sec(10000101000001.999999999)); +insert into test2 values('time_to_sec(8385959.99999)', time_to_sec(8385959.99999)); +insert into test2 values('time_to_sec(8385959.999999)', time_to_sec(8385959.999999)); +insert into test2 values('time_to_sec(8385959.9999994)', time_to_sec(8385959.9999994)); +insert into test2 values('time_to_sec(8385959.9999995)', time_to_sec(8385959.9999995)); +WARNING: Truncated incorrect time value: "8385959.9999995" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(8385959.999999999)', time_to_sec(8385959.999999999)); +WARNING: Truncated incorrect time value: "8385959.999999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.99999)', time_to_sec(-10000101000001.99999)); +WARNING: Truncated incorrect time value: "-10000101000001.99999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999999)', time_to_sec(-10000101000001.999999)); +WARNING: Truncated incorrect time value: "-10000101000001.999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999994)', time_to_sec(-10000101000001.999994)); +WARNING: Truncated incorrect time value: "-10000101000001.999994" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999995)', time_to_sec(-10000101000001.999995)); +WARNING: Truncated incorrect time value: "-10000101000001.999995" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001.999999999)', time_to_sec(-10000101000001.999999999)); +WARNING: Truncated incorrect time value: "-10000101000001.999999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-8385959.99999)', time_to_sec(-8385959.99999)); +insert into test2 values('time_to_sec(-8385959.999999)', time_to_sec(-8385959.999999)); +insert into test2 values('time_to_sec(-8385959.9999994)', time_to_sec(-8385959.9999994)); +insert into test2 values('time_to_sec(-8385959.9999995)', time_to_sec(-8385959.9999995)); +WARNING: Truncated incorrect time value: "-8385959.9999995" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-8385959.999999999)', time_to_sec(-8385959.999999999)); +WARNING: Truncated incorrect time value: "-8385959.999999999" +CONTEXT: referenced column: result +insert into test2 values('time_to_sec(-10000101000001)', time_to_sec(-10000101000001)); +WARNING: Truncated incorrect time value: "-10000101000001" +CONTEXT: referenced column: result +select * from test2 order by funcname; + funcname | result +----------------------------------------+---------- + time_to_sec('-8385959') | -3020399 + time_to_sec('-8385960') | + time_to_sec('0') | 0 + time_to_sec('100000000000000') | 0 + time_to_sec('100000000000000') | 0 + time_to_sec('100000101000000') | 36000 + time_to_sec('10000100000000') | 0 + time_to_sec('10000100000000') | 0 + time_to_sec('10000101000000') | 0 + time_to_sec('10000101000001') | 1 + time_to_sec('10000101000059') | 59 + time_to_sec('10000101000060') | + time_to_sec('20220101010101') | 3661 + time_to_sec('8385959') | 3020399 + time_to_sec('8385960') | + time_to_sec('99990100000000') | 0 + time_to_sec('99990100000000') | 0 + time_to_sec('99991231235959') | 86399 + time_to_sec('99991231240000') | + time_to_sec('99999999999999') | + time_to_sec(-10000101000001) | + time_to_sec(-10000101000001.99999) | + time_to_sec(-10000101000001.999994) | + time_to_sec(-10000101000001.999995) | + time_to_sec(-10000101000001.999999) | + time_to_sec(-10000101000001.999999999) | + time_to_sec(-8385959) | -3020399 + time_to_sec(-8385959.11) | -3020399 + time_to_sec(-8385959.99999) | -3020399 + time_to_sec(-8385959.999999) | -3020399 + time_to_sec(-8385959.9999994) | -3020399 + time_to_sec(-8385959.9999995) | -3020399 + time_to_sec(-8385959.999999999) | -3020399 + time_to_sec(-8385960) | + time_to_sec(-8385960.11) | + time_to_sec(0) | 0 + time_to_sec(0.11) | 0 + time_to_sec(100000000000000) | + time_to_sec(100000000000000) | + time_to_sec(100000000000000.11) | + time_to_sec(100000101000000) | + time_to_sec(100000101000000.11) | + time_to_sec(10000100000000) | + time_to_sec(10000100000000) | + time_to_sec(10000100000000.11) | + time_to_sec(10000101000000) | 0 + time_to_sec(10000101000000.11) | 0 + time_to_sec(10000101000001) | 1 + time_to_sec(10000101000001.11) | 1 + time_to_sec(10000101000001.99999) | 1 + time_to_sec(10000101000001.999999) | 1 + time_to_sec(10000101000001.9999994) | 1 + time_to_sec(10000101000001.9999995) | 2 + time_to_sec(10000101000001.999999999) | 2 + time_to_sec(10000101000059) | 59 + time_to_sec(10000101000059.11) | 59 + time_to_sec(10000101000060) | + time_to_sec(10000101000060.11) | + time_to_sec(20220101010101) | 3661 + time_to_sec(20220101010101.11) | 3661 + time_to_sec(8385959) | 3020399 + time_to_sec(8385959.11) | 3020399 + time_to_sec(8385959.99999) | 3020399 + time_to_sec(8385959.999999) | 3020399 + time_to_sec(8385959.9999994) | 3020399 + time_to_sec(8385959.9999995) | 3020399 + time_to_sec(8385959.999999999) | 3020399 + time_to_sec(8385960) | + time_to_sec(8385960.11) | + time_to_sec(99990100000000) | + time_to_sec(99990100000000) | + time_to_sec(99990100000000.11) | + time_to_sec(99991231235959) | 86399 + time_to_sec(99991231235959.11) | 86399 + time_to_sec(99991231240000) | + time_to_sec(99991231240000.11) | + time_to_sec(99999999999999) | + time_to_sec(99999999999999.11) | +(78 rows) + +drop table test2; \c contrib_regression DROP DATABASE b_datetime_func_test3; diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index 75b200bdc..40b73919e 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -45,7 +45,6 @@ #define FRAC_PART_LEN_IN_NUMERICSEC 100000000 #define TIME_WITH_FORMAT_ARGS_SIZE 4 #define TIME_MS_TO_S_RADIX 1000 - #ifdef DOLPHIN #define TWO_DIGITS_YEAR_DATE_ONE 691231 /* 2069-12-31 */ #define TWO_DIGITS_YEAR_DATE_TWO 700101 /* 1970-01-01 */ @@ -57,6 +56,13 @@ #define DATE_ALL_ZERO_VALUE (-2451508) #define JDATE_ALL_ZERO_VALUE (DATE_ALL_ZERO_VALUE + POSTGRES_EPOCH_JDATE) #define MONTH_TO_QUARTER_RADIX 3 +#define MAX_LONGLONG_TO_CHAR_LENGTH 21 +#define TIME_NANO_SECOND_RADIX 10 +#define MAX_NANO_SECOND 1000000000 +#define NANO_SECOND_ROUND_BASE 500 +#define MAX_MICRO_SECOND 1000000 +#define TIME_MAX_NANO_SECOND 99999999 +#define TIME_NANO_SECOND_TO_MICRO_SECOND_RADIX 1000 #endif /* for b compatibility type*/ @@ -83,14 +89,19 @@ extern void check_b_format_time_range_with_ereport(TimeADT &time, bool can_ignor extern void check_b_format_date_range_with_ereport(DateADT &date); extern Oid convert_to_datetime_date(Datum value, Oid valuetypid, Timestamp *datetime, DateADT *date); extern void adjust_time_range(pg_tm *tm, fsec_t &fsec, bool &warnings); -extern TimeADT time_in_with_flag(char *str, unsigned int date_flag); -extern bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag); +extern bool time_in_with_flag(char *str, unsigned int date_flag, TimeADT* time_adt, bool vertify_time = false); +extern bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag, bool vertify_time = false); extern bool date_add_interval(DateADT date, Interval *span, DateADT *result); extern Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_error_type); extern "C" Datum time_float(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum date_enum(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum timestamp_enum(PG_FUNCTION_ARGS); extern Datum textout (PG_FUNCTION_ARGS); +extern bool time_add_nanoseconds_with_round(char* input_str, pg_tm *tm, long long rem, fsec_t* fsec, bool can_ignore); +extern long long align_to_nanoseconds(long long src); +extern bool check_time_mmssff_range(pg_tm *tm, long long microseconds); +extern bool longlong_to_tm(long long nr, TimeADT* time, pg_tm* result_tm, fsec_t* fsec, int32* timeSign); +bool check_time_min_value(char* input_str, long long nr, bool can_ignore); typedef struct DateTimeFormat { diff --git a/contrib/dolphin/include/plugin_utils/datetime.h b/contrib/dolphin/include/plugin_utils/datetime.h index fe123bd03..3b1f06d3e 100644 --- a/contrib/dolphin/include/plugin_utils/datetime.h +++ b/contrib/dolphin/include/plugin_utils/datetime.h @@ -126,6 +126,7 @@ extern void DateTimeParseErrorWithFlag(int dterr, const char* str, const char* d extern void DateTimeParseErrorInternal(int dterr, const char* str, const char* datatype, int level); bool CheckDateRange(const pg_tm *tm, bool not_zero_date, time_flags flags); +bool CheckDatetimeRange(const pg_tm *tm, const fsec_t fsec, const int tm_type); #endif extern bool datetime_add_nanoseconds_with_round(pg_tm *tm, fsec_t &fsec, int nano); diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 04d6e4a36..ccd4b24e2 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -133,6 +133,10 @@ extern "C" DLL_PUBLIC Datum utc_time_func(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(time_to_sec); extern "C" DLL_PUBLIC Datum time_to_sec(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int64_time_to_sec); +extern "C" DLL_PUBLIC Datum int64_time_to_sec(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(numeric_time_to_sec); +extern "C" DLL_PUBLIC Datum numeric_time_to_sec(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(datediff_t_t); extern "C" DLL_PUBLIC Datum datediff_t_t(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(datediff_t_n); @@ -5067,8 +5071,13 @@ Datum time_bool(PG_FUNCTION_ARGS) /** * The function is similar to time_in, but uses flag to control the parsing process. + * + * return value : + * true : there is no warning on str to time, str is correct + * false : there are some warnings on str to time, str is incorrect */ -void time_in_with_flag_internal(char *str, struct pg_tm *result_tm, fsec_t *result_fsec, int *result_timeSign, unsigned int date_flag) +bool time_in_with_flag_internal(char *str, struct pg_tm *result_tm, fsec_t *result_fsec, int *result_timeSign, + unsigned int date_flag, bool vertify_time = false) { struct pg_tm tt1, *tm = &tt1; fsec_t fsec = 0; @@ -5080,6 +5089,10 @@ void time_in_with_flag_internal(char *str, struct pg_tm *result_tm, fsec_t *resu securec_check(rc, "\0", "\0"); cstring_to_time(str, tm, fsec, timeSign, tm_type, warnings, &null_func_result); + if (!warnings && vertify_time) { + warnings = !CheckDatetimeRange(tm, fsec, DTK_TIME) || !CheckDateRange(tm, non_zero_date(tm), 0); + } + if (warnings) { int errlevel = (SQL_MODE_STRICT() || null_func_result) ? ERROR : WARNING; ereport(errlevel, @@ -5090,20 +5103,24 @@ void time_in_with_flag_internal(char *str, struct pg_tm *result_tm, fsec_t *resu securec_check(rc, "\0", "\0"); *result_fsec = fsec; *result_timeSign = timeSign; - return; + return vertify_time ? !warnings : true; } -TimeADT time_in_with_flag(char *str, unsigned int date_flag) +bool time_in_with_flag(char *str, unsigned int date_flag, TimeADT* time_adt, bool vertify_time) { TimeADT result; struct pg_tm tt, *tm = &tt; fsec_t fsec; int timeSign; - time_in_with_flag_internal(str, tm, &fsec, &timeSign, date_flag); + bool vertify_time_result = time_in_with_flag_internal(str, tm, &fsec, &timeSign, date_flag, vertify_time); + if (!vertify_time_result) { + return false; + } tm2time(tm, fsec, &result); AdjustTimeForTypmod(&result, TIMESTAMP_MAX_PRECISION); result *= timeSign; - return result; + *time_adt = result; + return true; } /** @@ -5148,7 +5165,8 @@ Datum time_to_sec(PG_FUNCTION_ARGS) { TimeADT time; char *time_str = text_to_cstring(PG_GETARG_TEXT_PP(0)); - struct pg_tm result_tt, *result_tm = &result_tt; + pg_tm result_tt; + pg_tm* result_tm = &result_tt; fsec_t fsec; int32 result; int32 timeSign = 1; @@ -5166,6 +5184,102 @@ Datum time_to_sec(PG_FUNCTION_ARGS) PG_RETURN_INT32(result); } + +/* int64_time_to_sec + * @param time, type is int + * @return seconds of the given time + */ +Datum int64_time_to_sec(PG_FUNCTION_ARGS) +{ + TimeADT time; + int64 input_time = PG_GETARG_INT64(0); + pg_tm result_tt = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + pg_tm* result_tm = &result_tt; + fsec_t fsec; + int32 timeSign = 1; + int32 result; + + errno_t errorno = EOK; + char time_str[MAX_LONGLONG_TO_CHAR_LENGTH] = {0}; + errorno = sprintf_s(time_str, sizeof(time_str), "%lld", input_time); + securec_check_ss(errorno, "\0", "\0"); + + if (!check_time_min_value(time_str, input_time, fcinfo->can_ignore)) { + PG_RETURN_NULL(); + } + if (!longlong_to_tm(input_time, &time, result_tm, &fsec, &timeSign)) { + PG_RETURN_NULL(); + } + + result = ((result_tm->tm_hour * MINS_PER_HOUR + result_tm->tm_min) * SECS_PER_MINUTE) + result_tm->tm_sec; + result *= timeSign; + PG_RETURN_INT32(result); +} + + +/* numeric_time_to_sec + * @param time, type is numeric + * @return seconds of the given time + */ +Datum numeric_time_to_sec(PG_FUNCTION_ARGS) +{ + Numeric num1 = PG_GETARG_NUMERIC(0); + lldiv_t div1; + struct pg_tm tt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + pg_tm* result_tm = &tt1; + int32 result; + TimeADT time; + fsec_t fsec; + int32 timeSign = 1; + + Numeric_to_lldiv(num1, &div1); + + char* input_str = DatumGetCString(numeric_out_with_zero(fcinfo)); + + if (!check_time_min_value(input_str, div1.quot, fcinfo->can_ignore)) { + PG_RETURN_NULL(); + } + if (!longlong_to_tm(div1.quot, &time, result_tm, &fsec, &timeSign)) { + PG_RETURN_NULL(); + } + + div1.rem = div1.rem < 0 ? -div1.rem : div1.rem; + + time_add_nanoseconds_with_round(input_str, result_tm, div1.rem, &fsec, fcinfo->can_ignore); + result = ((result_tm->tm_hour * MINS_PER_HOUR + result_tm->tm_min) * SECS_PER_MINUTE) + result_tm->tm_sec; + result *= timeSign; + PG_RETURN_INT32(result); +} + + +bool check_time_min_value(char* input_str, long long nr, bool can_ignore) +{ + int errlevel = (SQL_MODE_STRICT() && !can_ignore) ? ERROR : WARNING; + if (nr < -TIME_MAX_VALUE) { + ereport(errlevel, (errmsg("Truncated incorrect time value: \"%s\"", input_str))); + return false; + } + return true; +} + +bool longlong_to_tm(long long nr, TimeADT* time, pg_tm* result_tm, fsec_t* fsec, int32* timeSign) +{ + errno_t errorno = EOK; + char time_str[MAX_LONGLONG_TO_CHAR_LENGTH] = {0}; + errorno = sprintf_s(time_str, sizeof(time_str), "%lld", nr); + securec_check_ss(errorno, "\0", "\0"); + *timeSign = 1; + if (!time_in_with_sql_mode(time_str, time, (ENABLE_ZERO_DAY | ENABLE_ZERO_MONTH), true)) { + return false; + } + if (*time < 0) { + *timeSign = -1; + *time = -*time; + } + time2tm(*time, result_tm, fsec); + return true; +} + static inline int32 datediff_internal(struct pg_tm* tm1, struct pg_tm* tm2) { DateADT dt1, dt2; @@ -5582,14 +5696,14 @@ Datum GetSecond(PG_FUNCTION_ARGS) return GetSepecificPartOfTime(fcinfo, "second"); } -bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag) +bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag, bool vertify_time) { bool ret = true; int code; const char *msg = NULL; PG_TRY(); { - *result = time_in_with_flag(str, date_flag); + ret = time_in_with_flag(str, date_flag, result, vertify_time); } PG_CATCH(); { @@ -5997,6 +6111,94 @@ Datum date_int(PG_FUNCTION_ARGS) int32 res = date2int(tm); PG_RETURN_INT32(res); } + +/** + format frac part of the time as nanoseconds format + for example it return .900000000 when src = .900 + for time, xx.900000000 is equal to .900 +*/ +long long align_to_nanoseconds(long long src) +{ + if (src <= 0) { + return src; + } + while (src <= TIME_MAX_NANO_SECOND) { + src = src * TIME_NANO_SECOND_RADIX; + } + return src; +} + +/** + verity the available of timestamp and micro seconds, compatible with mysql same function + return true -- unavailable + return false -- available +*/ +bool check_time_mmssff_range(pg_tm *tm, long long microseconds) +{ + return tm->tm_min > TIME_MAX_MINUTE || tm->tm_sec > TIME_MAX_SECOND || + microseconds > TIME_MAX_FRAC; +} + +/** + time_add_nanoseconds_with_round refers to mysql for compatible with mysql time_to_sec function. + the function do the round on nanoseconds of the time +*/ +bool time_add_nanoseconds_with_round(char* input_str, pg_tm *tm, long long rem, fsec_t* fsec, bool can_ignore) +{ + int errlevel = (SQL_MODE_STRICT() && !can_ignore) ? ERROR : WARNING; + /* We expect correct input data */ + if (rem >= MAX_NANO_SECOND) { + ereport(errlevel, (errmsg("Truncated incorrect time value: \"%s\"", input_str))); + } + + rem = align_to_nanoseconds(rem); + uint microseconds = rem / TIME_MS_TO_S_RADIX; + uint nanoseconds = rem % TIME_MS_TO_S_RADIX; + + if (check_time_mmssff_range(tm, microseconds)) { + ereport(WARNING, (errmsg("Truncated incorrect time value"))); + } + + if (nanoseconds < NANO_SECOND_ROUND_BASE) + return false; + + microseconds += (nanoseconds + NANO_SECOND_ROUND_BASE) / TIME_NANO_SECOND_TO_MICRO_SECOND_RADIX; + if (microseconds < MAX_MICRO_SECOND) + goto ret; + + microseconds %= MAX_MICRO_SECOND; + if (tm->tm_sec < TIME_MAX_SECOND) + { + tm->tm_sec++; + goto ret; + } + + tm->tm_sec= 0; + if (tm->tm_min < TIME_MAX_SECOND) + { + tm->tm_min++; + goto ret; + } + tm->tm_min= 0; + tm->tm_hour++; + + *fsec = microseconds * TIME_MS_TO_S_RADIX + nanoseconds; + +ret: + /* + We can get '838:59:59.000001' at this point, which + is bigger than the maximum possible value '838:59:59.000000'. + Checking only "hour > 838" is not enough. + Do full adjust_time_range(). + */ + bool warning = false; + adjust_time_range(tm, *fsec, warning); + if (warning == true) { + ereport(errlevel, (errmsg("Truncated incorrect time value: \"%s\"", input_str))); + } + return false; +} + #endif #ifdef DOLPHIN diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index f31f42ae7..88927e35b 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -12,6 +12,11 @@ DROP FUNCTION IF EXISTS pg_catalog.text_time_explicit(TEXT); DROP FUNCTION IF EXISTS pg_catalog.day(time without time zone); +DROP FUNCTION IF EXISTS pg_catalog.time_to_sec(int8); + +DROP FUNCTION IF EXISTS pg_catalog.time_to_sec(numeric); +CREATE OR REPLACE FUNCTION pg_catalog.time_to_sec(numeric) RETURNS int4 AS $$ SELECT pg_catalog.time_to_sec(cast($1 as text)) $$ LANGUAGE SQL; + DROP OPERATOR IF EXISTS pg_catalog.=(time, timestamp without time zone); DROP OPERATOR IF EXISTS pg_catalog.<>(time, timestamp without time zone); DROP OPERATOR IF EXISTS pg_catalog.<=(time, timestamp without time zone); diff --git a/contrib/dolphin/sql/db_b_date_time_functions3.sql b/contrib/dolphin/sql/db_b_date_time_functions3.sql index f34ed9bd5..9b62aa9e5 100644 --- a/contrib/dolphin/sql/db_b_date_time_functions3.sql +++ b/contrib/dolphin/sql/db_b_date_time_functions3.sql @@ -516,5 +516,250 @@ drop table test; -- test timediff(date,unknown) select timediff(date'2022-01-01','2022-01-02'); select timediff('2022-01-01',date'2022-01-02'); + +-- 测试time_to_sec异常边界 +select time_to_sec(8385960); +select time_to_sec(8385959); +select time_to_sec(-8385960); +select time_to_sec(-8385959); +select time_to_sec(10000101000000); +select time_to_sec(10000100000000); +select time_to_sec(100000101000000); +select time_to_sec(99999999999999); +select time_to_sec(99990100000000); +select time_to_sec(20220101010101); +select time_to_sec(99991231235959); +select time_to_sec(99991231240000); +select time_to_sec(10000101000001); +select time_to_sec(10000101000059); +select time_to_sec(10000101000060); +select time_to_sec(0); +select time_to_sec(100000000000000); +select time_to_sec('8385960'); +select time_to_sec('8385959'); +select time_to_sec('-8385960'); +select time_to_sec('-8385959'); +select time_to_sec('10000101000000'); +select time_to_sec('10000100000000'); +select time_to_sec('100000101000000'); +select time_to_sec('99999999999999'); +select time_to_sec('99990100000000'); +select time_to_sec('20220101010101'); +select time_to_sec('99991231235959'); +select time_to_sec('99991231240000'); +select time_to_sec('10000101000001'); +select time_to_sec('10000101000059'); +select time_to_sec('10000101000060'); +select time_to_sec('0'); +select time_to_sec('100000000000000'); +select time_to_sec(8385960.11); +select time_to_sec(8385959.11); +select time_to_sec(-8385960.11); +select time_to_sec(-8385959.11); +select time_to_sec(10000101000000.11); +select time_to_sec(10000100000000.11); +select time_to_sec(100000101000000.11); +select time_to_sec(99999999999999.11); +select time_to_sec(99990100000000.11); +select time_to_sec(20220101010101.11); +select time_to_sec(99991231235959.11); +select time_to_sec(99991231240000.11); +select time_to_sec(10000101000001.11); +select time_to_sec(10000101000059.11); +select time_to_sec(10000101000060.11); +select time_to_sec(0.11); +select time_to_sec(100000000000000.11); +select time_to_sec(10000101000001.99999); +select time_to_sec(10000101000001.999999); +select time_to_sec(10000101000001.9999994); +select time_to_sec(10000101000001.9999995); +select time_to_sec(10000101000001.999999999); +select time_to_sec(8385959.99999); +select time_to_sec(8385959.999999); +select time_to_sec(8385959.9999994); +select time_to_sec(8385959.9999995); +select time_to_sec(8385959.999999999); +select time_to_sec(-10000101000001.99999); +select time_to_sec(-10000101000001.999999); +select time_to_sec(-10000101000001.999994); +select time_to_sec(-10000101000001.999995); +select time_to_sec(-10000101000001.999999999); +select time_to_sec(-8385959.99999); +select time_to_sec(-8385959.999999); +select time_to_sec(-8385959.9999994); +select time_to_sec(-8385959.9999995); +select time_to_sec(-8385959.999999999); +select time_to_sec(-10000101000001); + + + +-- ä¸¥æ ¼æ¨¡å¼ insert +create table test2(funcname text, result text); +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''8385960'')', time_to_sec('8385960')); +insert into test2 values ('time_to_sec(''8385959'')', time_to_sec('8385959')); +insert into test2 values ('time_to_sec(''-8385960'')', time_to_sec('-8385960')); +insert into test2 values ('time_to_sec(''-8385959'')', time_to_sec('-8385959')); +insert into test2 values ('time_to_sec(''10000101000000'')', time_to_sec('10000101000000')); +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''100000101000000'')', time_to_sec('100000101000000')); +insert into test2 values ('time_to_sec(''99999999999999'')', time_to_sec('99999999999999')); +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''20220101010101'')', time_to_sec('20220101010101')); +insert into test2 values ('time_to_sec(''99991231235959'')', time_to_sec('99991231235959')); +insert into test2 values ('time_to_sec(''99991231240000'')', time_to_sec('99991231240000')); +insert into test2 values ('time_to_sec(''10000101000001'')', time_to_sec('10000101000001')); +insert into test2 values ('time_to_sec(''10000101000059'')', time_to_sec('10000101000059')); +insert into test2 values ('time_to_sec(''10000101000060'')', time_to_sec('10000101000060')); +insert into test2 values ('time_to_sec(''0'')', time_to_sec('0')); +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +insert into test2 values ('time_to_sec(8385960)', time_to_sec(8385960)); +insert into test2 values ('time_to_sec(8385959)', time_to_sec(8385959)); +insert into test2 values ('time_to_sec(-8385960)', time_to_sec(-8385960)); +insert into test2 values ('time_to_sec(-8385959)', time_to_sec(-8385959)); +insert into test2 values ('time_to_sec(10000101000000)', time_to_sec(10000101000000)); +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +insert into test2 values ('time_to_sec(100000101000000)', time_to_sec(100000101000000)); +insert into test2 values ('time_to_sec(99999999999999)', time_to_sec(99999999999999)); +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +insert into test2 values ('time_to_sec(20220101010101)', time_to_sec(20220101010101)); +insert into test2 values ('time_to_sec(99991231235959)', time_to_sec(99991231235959)); +insert into test2 values ('time_to_sec(99991231240000)', time_to_sec(99991231240000)); +insert into test2 values ('time_to_sec(10000101000001)', time_to_sec(10000101000001)); +insert into test2 values ('time_to_sec(10000101000059)', time_to_sec(10000101000059)); +insert into test2 values ('time_to_sec(10000101000060)', time_to_sec(10000101000060)); +insert into test2 values ('time_to_sec(0)', time_to_sec(0)); +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +insert into test2 values ('time_to_sec(8385960.11)', time_to_sec(8385960.11)); +insert into test2 values ('time_to_sec(8385959.11)', time_to_sec(8385959.11)); +insert into test2 values ('time_to_sec(-8385960.11)', time_to_sec(-8385960.11)); +insert into test2 values ('time_to_sec(-8385959.11)', time_to_sec(-8385959.11)); +insert into test2 values ('time_to_sec(10000101000000.11)', time_to_sec(10000101000000.11)); +insert into test2 values ('time_to_sec(10000100000000.11)', time_to_sec(10000100000000.11)); +insert into test2 values ('time_to_sec(100000101000000.11)', time_to_sec(100000101000000.11)); +insert into test2 values ('time_to_sec(99999999999999.11)', time_to_sec(99999999999999.11)); +insert into test2 values ('time_to_sec(99990100000000.11)', time_to_sec(99990100000000.11)); +insert into test2 values ('time_to_sec(20220101010101.11)', time_to_sec(20220101010101.11)); +insert into test2 values ('time_to_sec(99991231235959.11)', time_to_sec(99991231235959.11)); +insert into test2 values ('time_to_sec(99991231240000.11)', time_to_sec(99991231240000.11)); +insert into test2 values ('time_to_sec(10000101000001.11)', time_to_sec(10000101000001.11)); +insert into test2 values ('time_to_sec(10000101000059.11)', time_to_sec(10000101000059.11)); +insert into test2 values ('time_to_sec(10000101000060.11)', time_to_sec(10000101000060.11)); +insert into test2 values ('time_to_sec(0.11)', time_to_sec(0.11)); +insert into test2 values ('time_to_sec(100000000000000.11)', time_to_sec(100000000000000.11)); +insert into test2 values('time_to_sec(10000101000001.99999)', time_to_sec(10000101000001.99999)); +insert into test2 values('time_to_sec(10000101000001.999999)', time_to_sec(10000101000001.999999)); +insert into test2 values('time_to_sec(10000101000001.9999994)', time_to_sec(10000101000001.9999994)); +insert into test2 values('time_to_sec(10000101000001.9999995)', time_to_sec(10000101000001.9999995)); +insert into test2 values('time_to_sec(10000101000001.999999999)', time_to_sec(10000101000001.999999999)); +insert into test2 values('time_to_sec(8385959.99999)', time_to_sec(8385959.99999)); +insert into test2 values('time_to_sec(8385959.999999)', time_to_sec(8385959.999999)); +insert into test2 values('time_to_sec(8385959.9999994)', time_to_sec(8385959.9999994)); +insert into test2 values('time_to_sec(8385959.9999995)', time_to_sec(8385959.9999995)); +insert into test2 values('time_to_sec(8385959.999999999)', time_to_sec(8385959.999999999)); +insert into test2 values('time_to_sec(-10000101000001.99999)', time_to_sec(-10000101000001.99999)); +insert into test2 values('time_to_sec(-10000101000001.999999)', time_to_sec(-10000101000001.999999)); +insert into test2 values('time_to_sec(-10000101000001.999994)', time_to_sec(-10000101000001.999994)); +insert into test2 values('time_to_sec(-10000101000001.999995)', time_to_sec(-10000101000001.999995)); +insert into test2 values('time_to_sec(-10000101000001.999999999)', time_to_sec(-10000101000001.999999999)); +insert into test2 values('time_to_sec(-8385959.99999)', time_to_sec(-8385959.99999)); +insert into test2 values('time_to_sec(-8385959.999999)', time_to_sec(-8385959.999999)); +insert into test2 values('time_to_sec(-8385959.9999994)', time_to_sec(-8385959.9999994)); +insert into test2 values('time_to_sec(-8385959.9999995)', time_to_sec(-8385959.9999995)); +insert into test2 values('time_to_sec(-8385959.999999999)', time_to_sec(-8385959.999999999)); +insert into test2 values('time_to_sec(-10000101000001)', time_to_sec(-10000101000001)); +select * from test2 order by funcname; + +-- éžä¸¥æ ¼æ¨¡å¼ insert +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; +delete from test2 where 1=1; +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''8385960'')', time_to_sec('8385960')); +insert into test2 values ('time_to_sec(''8385959'')', time_to_sec('8385959')); +insert into test2 values ('time_to_sec(''-8385960'')', time_to_sec('-8385960')); +insert into test2 values ('time_to_sec(''-8385959'')', time_to_sec('-8385959')); +insert into test2 values ('time_to_sec(''10000101000000'')', time_to_sec('10000101000000')); +insert into test2 values ('time_to_sec(''10000100000000'')', time_to_sec('10000100000000')); +insert into test2 values ('time_to_sec(''100000101000000'')', time_to_sec('100000101000000')); +insert into test2 values ('time_to_sec(''99999999999999'')', time_to_sec('99999999999999')); +insert into test2 values ('time_to_sec(''99990100000000'')', time_to_sec('99990100000000')); +insert into test2 values ('time_to_sec(''20220101010101'')', time_to_sec('20220101010101')); +insert into test2 values ('time_to_sec(''99991231235959'')', time_to_sec('99991231235959')); +insert into test2 values ('time_to_sec(''99991231240000'')', time_to_sec('99991231240000')); +insert into test2 values ('time_to_sec(''10000101000001'')', time_to_sec('10000101000001')); +insert into test2 values ('time_to_sec(''10000101000059'')', time_to_sec('10000101000059')); +insert into test2 values ('time_to_sec(''10000101000060'')', time_to_sec('10000101000060')); +insert into test2 values ('time_to_sec(''0'')', time_to_sec('0')); +insert into test2 values ('time_to_sec(''100000000000000'')', time_to_sec('100000000000000')); +insert into test2 values ('time_to_sec(8385960)', time_to_sec(8385960)); +insert into test2 values ('time_to_sec(8385959)', time_to_sec(8385959)); +insert into test2 values ('time_to_sec(-8385960)', time_to_sec(-8385960)); +insert into test2 values ('time_to_sec(-8385959)', time_to_sec(-8385959)); +insert into test2 values ('time_to_sec(10000101000000)', time_to_sec(10000101000000)); +insert into test2 values ('time_to_sec(10000100000000)', time_to_sec(10000100000000)); +insert into test2 values ('time_to_sec(100000101000000)', time_to_sec(100000101000000)); +insert into test2 values ('time_to_sec(99999999999999)', time_to_sec(99999999999999)); +insert into test2 values ('time_to_sec(99990100000000)', time_to_sec(99990100000000)); +insert into test2 values ('time_to_sec(20220101010101)', time_to_sec(20220101010101)); +insert into test2 values ('time_to_sec(99991231235959)', time_to_sec(99991231235959)); +insert into test2 values ('time_to_sec(99991231240000)', time_to_sec(99991231240000)); +insert into test2 values ('time_to_sec(10000101000001)', time_to_sec(10000101000001)); +insert into test2 values ('time_to_sec(10000101000059)', time_to_sec(10000101000059)); +insert into test2 values ('time_to_sec(10000101000060)', time_to_sec(10000101000060)); +insert into test2 values ('time_to_sec(0)', time_to_sec(0)); +insert into test2 values ('time_to_sec(100000000000000)', time_to_sec(100000000000000)); +insert into test2 values ('time_to_sec(8385960.11)', time_to_sec(8385960.11)); +insert into test2 values ('time_to_sec(8385959.11)', time_to_sec(8385959.11)); +insert into test2 values ('time_to_sec(-8385960.11)', time_to_sec(-8385960.11)); +insert into test2 values ('time_to_sec(-8385959.11)', time_to_sec(-8385959.11)); +insert into test2 values ('time_to_sec(10000101000000.11)', time_to_sec(10000101000000.11)); +insert into test2 values ('time_to_sec(10000100000000.11)', time_to_sec(10000100000000.11)); +insert into test2 values ('time_to_sec(100000101000000.11)', time_to_sec(100000101000000.11)); +insert into test2 values ('time_to_sec(99999999999999.11)', time_to_sec(99999999999999.11)); +insert into test2 values ('time_to_sec(99990100000000.11)', time_to_sec(99990100000000.11)); +insert into test2 values ('time_to_sec(20220101010101.11)', time_to_sec(20220101010101.11)); +insert into test2 values ('time_to_sec(99991231235959.11)', time_to_sec(99991231235959.11)); +insert into test2 values ('time_to_sec(99991231240000.11)', time_to_sec(99991231240000.11)); +insert into test2 values ('time_to_sec(10000101000001.11)', time_to_sec(10000101000001.11)); +insert into test2 values ('time_to_sec(10000101000059.11)', time_to_sec(10000101000059.11)); +insert into test2 values ('time_to_sec(10000101000060.11)', time_to_sec(10000101000060.11)); +insert into test2 values ('time_to_sec(0.11)', time_to_sec(0.11)); +insert into test2 values ('time_to_sec(100000000000000.11)', time_to_sec(100000000000000.11)); +insert into test2 values('time_to_sec(10000101000001.99999)', time_to_sec(10000101000001.99999)); +insert into test2 values('time_to_sec(10000101000001.999999)', time_to_sec(10000101000001.999999)); +insert into test2 values('time_to_sec(10000101000001.9999994)', time_to_sec(10000101000001.9999994)); +insert into test2 values('time_to_sec(10000101000001.9999995)', time_to_sec(10000101000001.9999995)); +insert into test2 values('time_to_sec(10000101000001.999999999)', time_to_sec(10000101000001.999999999)); +insert into test2 values('time_to_sec(8385959.99999)', time_to_sec(8385959.99999)); +insert into test2 values('time_to_sec(8385959.999999)', time_to_sec(8385959.999999)); +insert into test2 values('time_to_sec(8385959.9999994)', time_to_sec(8385959.9999994)); +insert into test2 values('time_to_sec(8385959.9999995)', time_to_sec(8385959.9999995)); +insert into test2 values('time_to_sec(8385959.999999999)', time_to_sec(8385959.999999999)); +insert into test2 values('time_to_sec(-10000101000001.99999)', time_to_sec(-10000101000001.99999)); +insert into test2 values('time_to_sec(-10000101000001.999999)', time_to_sec(-10000101000001.999999)); +insert into test2 values('time_to_sec(-10000101000001.999994)', time_to_sec(-10000101000001.999994)); +insert into test2 values('time_to_sec(-10000101000001.999995)', time_to_sec(-10000101000001.999995)); +insert into test2 values('time_to_sec(-10000101000001.999999999)', time_to_sec(-10000101000001.999999999)); +insert into test2 values('time_to_sec(-8385959.99999)', time_to_sec(-8385959.99999)); +insert into test2 values('time_to_sec(-8385959.999999)', time_to_sec(-8385959.999999)); +insert into test2 values('time_to_sec(-8385959.9999994)', time_to_sec(-8385959.9999994)); +insert into test2 values('time_to_sec(-8385959.9999995)', time_to_sec(-8385959.9999995)); +insert into test2 values('time_to_sec(-8385959.999999999)', time_to_sec(-8385959.999999999)); +insert into test2 values('time_to_sec(-10000101000001)', time_to_sec(-10000101000001)); +select * from test2 order by funcname; + +drop table test2; + \c contrib_regression DROP DATABASE b_datetime_func_test3; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 1babfb54b..9ff7764e6 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -23,6 +23,12 @@ CREATE CAST(TEXT AS time) WITH FUNCTION time_cast_implicit(TEXT) AS ASSIGNMENT; DROP FUNCTION IF EXISTS pg_catalog.day(time without time zone); CREATE OR REPLACE FUNCTION pg_catalog.day(time without time zone) RETURNS int4 LANGUAGE C STABLE RETURNS NULL ON NULL INPUT as '$libdir/dolphin', 'dayofmonth_time'; +DROP FUNCTION IF EXISTS pg_catalog.time_to_sec(int8); +CREATE OR REPLACE FUNCTION pg_catalog.time_to_sec(int8) RETURNS int4 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int64_time_to_sec'; + +DROP FUNCTION IF EXISTS pg_catalog.time_to_sec(numeric); +CREATE OR REPLACE FUNCTION pg_catalog.time_to_sec(numeric) RETURNS int4 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'numeric_time_to_sec'; + --CREATE TIME_TIMESTAMP'S COMPARATION FUNCTION DROP FUNCTION IF EXISTS pg_catalog.time_eq_timestamp (time, timestamp without time zone) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.time_eq_timestamp (time, timestamp without time zone) RETURNS boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'time_eq_timestamp'; -- Gitee From 79d505ae892bb1d4347969684066f3e7c767bee6 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Thu, 16 Nov 2023 15:20:23 +0800 Subject: [PATCH 060/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=85=BC=E5=AE=B9B=E5=BA=93=E4=BD=BF?= =?UTF-8?q?=E7=94=A8cast=E7=B1=BB=E5=9E=8B=E8=BD=AC=E6=8D=A2=E8=BF=94?= =?UTF-8?q?=E5=9B=9E=E7=BB=93=E6=9E=9C=E4=B8=8Emysql=E4=B8=8D=E4=B8=80?= =?UTF-8?q?=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98.=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E5=85=BC=E5=AE=B9B=E5=BA=93=E4=BD=BF=E7=94=A8cast=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E8=BD=AC=E6=8D=A2=E8=BF=94=E5=9B=9E=E7=BB=93=E6=9E=9C?= =?UTF-8?q?=E4=B8=8Emysql=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90?= =?UTF-8?q?=E3=80=91:=20=E5=85=B6=E5=AE=9E=E4=B8=BB=E8=A6=81int64=5Fb=5Ffo?= =?UTF-8?q?rmat=5Ftimestamp=5Finternal=E4=B8=80=E4=B8=AA=E5=BC=82=E5=B8=B8?= =?UTF-8?q?=E5=88=86=E6=94=AF=E4=B8=AD=E6=B2=A1=E5=B0=86time=5Ferror=5Ftyp?= =?UTF-8?q?e=E6=A0=87=E8=AE=B0=E6=88=90=E9=94=99=E8=AF=AF=EF=BC=8C?= =?UTF-8?q?=E5=9B=A0=E6=AD=A4=E8=BF=94=E5=9B=9E=E4=BA=860=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20?= =?UTF-8?q?=E5=85=B6=E5=AE=9E=E4=B8=BB=E8=A6=81=E9=9C=80=E8=A6=81=E5=9C=A8?= =?UTF-8?q?int64=5Fb=5Fformat=5Ftimestamp=5Finternal=E5=B0=86time=5Ferror?= =?UTF-8?q?=5Ftype=E8=AE=BE=E7=BD=AE=E4=B8=BA=E9=94=99=E8=AF=AF=E5=B0=B1?= =?UTF-8?q?=E8=A1=8C=E4=BA=86=EF=BC=8C=E4=B8=8A=E5=B1=82=E5=88=A4=E6=96=AD?= =?UTF-8?q?int64=5Fb=5Fformat=5Ftimestamp=5Finternal=E6=98=AF=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E7=9A=84=E8=AF=9D=EF=BC=8C=E4=BC=9A=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E8=BF=94=E5=9B=9ENULL=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80?= =?UTF-8?q?=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengau?= =?UTF-8?q?ssorg/dashboard=3Fissue=3DI8FD9N=20=E3=80=90=E5=BC=80=E5=8F=91?= =?UTF-8?q?=E8=87=AA=E9=AA=8C=E6=8A=A5=E5=91=8A=E3=80=91:=20=E8=AF=B7?= =?UTF-8?q?=E9=99=84=E4=B8=8A=E8=87=AA=E9=AA=8C=E7=BB=93=E6=9E=9C(?= =?UTF-8?q?=E5=86=85=E5=AE=B9=E6=88=96=E8=80=85=E6=88=AA=E5=9B=BE)=20?= =?UTF-8?q?=E6=98=AF=E5=90=A6=E5=8F=AF=E4=BB=A5=E6=B7=BB=E5=8A=A0fastcheck?= =?UTF-8?q?=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B=EF=BC=8C=E5=A6=82=E6=98=AF?= =?UTF-8?q?=EF=BC=8C=E8=AF=B7=E8=A1=A5=E5=85=85fastcheck=E7=94=A8=E4=BE=8B?= =?UTF-8?q?=20->=20=E5=BD=93=E5=89=8D=E7=9A=84=E7=94=A8=E4=BE=8B=E5=B7=B2?= =?UTF-8?q?=E7=BB=8F=E8=A6=86=E7=9B=96=E4=BA=86=EF=BC=8C=E4=B8=8D=E9=9C=80?= =?UTF-8?q?=E8=A6=81=E6=96=B0=E5=A2=9E=E3=80=82=20=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E6=B6=89=E5=8F=8A=E8=B5=84=E6=96=99=E4=BF=AE=E6=94=B9=EF=BC=8C?= =?UTF-8?q?=E5=A6=82=E6=98=AF=EF=BC=8C=E5=9C=A8docs=E4=BB=93=E5=BA=93?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E8=B5=84=E6=96=99=20=20=20=20->=20=E4=B8=8D?= =?UTF-8?q?=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91?= =?UTF-8?q?=E5=8D=87=E7=BA=A7=E5=9C=BA=E6=99=AF(=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E8=A1=A8=E4=BF=AE=E6=94=B9=E3=80=81=E6=97=A5=E5=BF=97=E6=8C=81?= =?UTF-8?q?=E4=B9=85=E5=8C=96=E4=BB=A5=E5=8F=8A=E4=BF=AE=E6=94=B9=E6=89=A7?= =?UTF-8?q?=E8=A1=8C=E6=80=81=E6=95=B0=E6=8D=AE=E6=A0=BC=E5=BC=8F)=20=20->?= =?UTF-8?q?=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83?= =?UTF-8?q?=E8=99=91=E5=9C=A8=E7=BA=BF=E6=89=A9=E5=AE=B9=E7=AD=89=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E5=9C=BA=E6=99=AF=20=20=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E8=80=83=E8=99=91=E5=BC=82?= =?UTF-8?q?=E5=B8=B8=E5=9C=BA=E6=99=AF/=E5=B9=B6=E5=8F=91=E5=9C=BA?= =?UTF-8?q?=E6=99=AF/=E5=89=8D=E5=90=91=E5=85=BC=E5=AE=B9/=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=9C=BA=E6=99=AF=20=20->=20=E4=B8=8D=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=20=E6=98=AF=E5=90=A6=E5=AF=B9=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E4=BA=A7=E7=94=9F=E5=BD=B1=E5=93=8D=20=20=20?= =?UTF-8?q?->=20=E4=B8=8D=E6=B6=89=E5=8F=8A=20=E3=80=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E8=AF=B4=E6=98=8E=E3=80=91:=20=E6=97=A0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/timestamp_test.out | 12 ++++++------ contrib/dolphin/plugin_utils/adt/timestamp.cpp | 5 ++++- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/contrib/dolphin/expected/timestamp_test.out b/contrib/dolphin/expected/timestamp_test.out index 17e3c67e9..533978568 100644 --- a/contrib/dolphin/expected/timestamp_test.out +++ b/contrib/dolphin/expected/timestamp_test.out @@ -44,9 +44,9 @@ WARNING: timestamp out of range CONTEXT: referenced column: timestamp SQL function "bittodatetime" statement 1 referenced column: timestamp - timestamp ---------------------- - 0000-00-00 00:00:00 + timestamp +----------- + (1 row) select '2022-01-01'::bit(64)::timestamp; @@ -60,9 +60,9 @@ WARNING: timestamp out of range CONTEXT: referenced column: timestamptz SQL function "bittotimestamp" statement 1 referenced column: timestamptz - timestamptz ------------------------- - 0000-00-00 00:00:00-08 + timestamptz +------------- + (1 row) select 8385958.999999::bit(64)::time; diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 75d1d6d87..e1a673037 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -892,6 +892,9 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t if (ts < B_FORMAT_DATE_INT_MIN) { ereport(level, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif return TIMESTAMP_ZERO; } /* find out how many digits in ts */ @@ -967,7 +970,7 @@ Datum timestamp_to_datum(PG_FUNCTION_ARGS, bool hasTz, int64 ts) { TimeErrorType time_error_type = TIME_CORRECT; int64 result = integer_b_format_timestamp(hasTz, ts, fcinfo->can_ignore, &time_error_type); - if (fcinfo->ccontext == COERCION_IMPLICIT && time_error_type == TIME_INCORRECT) { + if (fcinfo->ccontext == COERCION_IMPLICIT && time_error_type == TIME_INCORRECT && ENABLE_B_CMPT_MODE) { PG_RETURN_NULL(); } PG_RETURN_TIMESTAMP(result); -- Gitee From c90a42cbfe9e89b1a8e4cfaaa2b654e65ac94c0e Mon Sep 17 00:00:00 2001 From: Mijamind Date: Thu, 16 Nov 2023 23:18:48 +0800 Subject: [PATCH 061/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91spq=20bugfix=20=E8=A7=A3=E5=86=B3jdbc?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5DB=E6=89=A7=E8=A1=8C=E5=88=86=E5=8C=BA?= =?UTF-8?q?=E8=A1=A8=E8=BF=9E=E8=A1=A8+union=E6=9F=A5=E8=AF=A2=E8=AF=AD?= =?UTF-8?q?=E5=8F=A5=E6=8A=A5=E9=94=99=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/src/spqplugin.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index 1d53be077..c6dc6d936 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -42,6 +42,9 @@ static bool check_rangetbl_support(List* rtable) Assert(IsA(rte, RangeTblEntry)); if (rte->rtekind == RTE_FUNCTION || rte->ispartrel) { return false; + } else if (rte->rtekind == RTE_SUBQUERY) { + Assert(rte->subquery != NULL); + return check_rangetbl_support(rte->subquery->rtable); } } return true; @@ -156,10 +159,6 @@ static bool should_spq_planner(Query *parse) elog(ERROR, "parse should not be null."); } - if (!parse->is_support_spq) { - return false; - } - if (parse->commandType != CMD_SELECT) { elog(DEBUG1, "spq can not support this commandType."); return false; @@ -188,7 +187,7 @@ PlannedStmt* spq_optimize_query(Query* parse, int cursorOptions, ParamListInfo b instr_time starttime; double totaltime = 0; t_thrd.spq_ctx.spq_role = ROLE_UTILITY; - if (should_spq_planner(parse)) { + if ((cursorOptions & CURSOR_OPT_SPQ_OK) && should_spq_planner(parse)) { t_thrd.spq_ctx.spq_role = ROLE_QUERY_COORDINTOR; t_thrd.spq_ctx.spq_session_id = u_sess->debug_query_id; t_thrd.spq_ctx.current_id = 0; -- Gitee From 9bc37b7161e4bc8446c7a58daca6bac000d13492 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Fri, 17 Nov 2023 15:30:57 +0800 Subject: [PATCH 062/434] =?UTF-8?q?create=20table=20like=E9=BB=98=E8=AE=A4?= =?UTF-8?q?=E5=A4=8D=E5=88=B6default=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/like_default_test.out | 122 +++++++++--------- .../include/plugin_nodes/parsenodes_common.h | 3 +- contrib/dolphin/plugin_parser/gram.y | 4 +- .../dolphin/plugin_parser/parse_utilcmd.cpp | 3 + contrib/dolphin/sql/like_default_test.sql | 4 +- 5 files changed, 71 insertions(+), 65 deletions(-) diff --git a/contrib/dolphin/expected/like_default_test.out b/contrib/dolphin/expected/like_default_test.out index b8d415df1..7437c39b9 100644 --- a/contrib/dolphin/expected/like_default_test.out +++ b/contrib/dolphin/expected/like_default_test.out @@ -35,8 +35,8 @@ Options: orientation=row, compression=no drop table if exists test_nv; -create table test_non(id int, name varchar(25)); -create table test_part(id int, name varchar(25)) +create table test_non(id int default 1, name varchar(25)); +create table test_part(id int default 1, name varchar(25)) partition by range(id) ( partition p1 values less than(100), @@ -60,7 +60,7 @@ create table test_non_like2 like test_non; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -80,7 +80,7 @@ create table test_part_like2 like test_part; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -104,7 +104,7 @@ create table if not exists test_non_like2 like test_non; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -124,7 +124,7 @@ create table if not exists test_part_like2 like test_part; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -148,7 +148,7 @@ create table test_non_like2 like test_non including indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -168,7 +168,7 @@ create table test_part_like2 like test_part including indexes; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -200,7 +200,7 @@ create table test_part_like2 like test_part including partition; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -234,7 +234,7 @@ create table test_part_like2 like test_part including partition including indexe Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -250,7 +250,7 @@ create table test_non_like1 (like test_non including all); Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -260,7 +260,7 @@ create table test_non_like2 like test_non including all; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -270,7 +270,7 @@ create table test_part_like1 (like test_part including all); Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -282,7 +282,7 @@ create table test_part_like2 like test_part including all; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -296,7 +296,7 @@ create table test_non_like1 (like test_non including all excluding indexes); Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -306,7 +306,7 @@ create table test_non_like2 like test_non including all excluding indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -316,7 +316,7 @@ create table test_part_like1 (like test_part including all excluding indexes); Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -328,7 +328,7 @@ create table test_part_like2 like test_part including all excluding indexes; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -342,7 +342,7 @@ create table test_non_like1 (like test_non including all excluding partition); Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -352,7 +352,7 @@ create table test_non_like2 like test_non including all excluding partition; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -362,7 +362,7 @@ create table test_part_like1 (like test_part including all excluding partition); Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -372,7 +372,7 @@ create table test_part_like2 like test_part including all excluding partition; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -384,7 +384,7 @@ create table test_non_like1 (like test_non including all excluding partition exc Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -394,7 +394,7 @@ create table test_non_like2 like test_non including all excluding partition excl Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -404,7 +404,7 @@ create table test_part_like1 (like test_part including all excluding partition e Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -414,7 +414,7 @@ create table test_part_like2 like test_part including all excluding partition ex Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -436,7 +436,7 @@ create table test_non_like2 like test_non excluding indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -456,7 +456,7 @@ create table test_part_like2 like test_part excluding indexes; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -480,7 +480,7 @@ create table test_non_like2 like test_non excluding partition; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -500,7 +500,7 @@ create table test_part_like2 like test_part excluding partition; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -522,7 +522,7 @@ create table test_non_like2 like test_non excluding reloptions; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -542,7 +542,7 @@ create table test_part_like2 like test_part excluding reloptions; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -566,7 +566,7 @@ create table test_non_like2 like test_non excluding partition excluding indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -586,7 +586,7 @@ create table test_part_like2 like test_part excluding partition excluding indexe Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -763,7 +763,7 @@ create table test_non_like2 like test_non; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like2_id_idx" btree (id) TABLESPACE pg_default @@ -785,7 +785,7 @@ create table test_part_like2 like test_part; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like2_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -811,7 +811,7 @@ create table if not exists test_non_like2 like test_non; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like2_id_idx" btree (id) TABLESPACE pg_default @@ -833,7 +833,7 @@ create table if not exists test_part_like2 like test_part; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like2_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -861,7 +861,7 @@ create table test_non_like2 like test_non including indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like2_id_idx" btree (id) TABLESPACE pg_default @@ -876,7 +876,7 @@ create table test_part_like2 like test_part including indexes; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like2_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -911,7 +911,7 @@ create table test_part_like2 like test_part including partition; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like2_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -949,7 +949,7 @@ create table test_part_like2 like test_part including partition including indexe Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like2_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -967,7 +967,7 @@ create table test_non_like1 (like test_non including all); Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like1_id_idx" btree (id) TABLESPACE pg_default @@ -979,7 +979,7 @@ create table test_non_like2 like test_non including all; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like2_id_idx" btree (id) TABLESPACE pg_default @@ -991,7 +991,7 @@ create table test_part_like1 (like test_part including all); Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like1_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -1005,7 +1005,7 @@ create table test_part_like2 like test_part including all; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_part_like2_id_tableoid_idx" btree (id) TABLESPACE pg_default @@ -1021,7 +1021,7 @@ create table test_non_like1 (like test_non including all excluding indexes); Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1031,7 +1031,7 @@ create table test_non_like2 like test_non including all excluding indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1041,7 +1041,7 @@ create table test_part_like1 (like test_part including all excluding indexes); Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -1053,7 +1053,7 @@ create table test_part_like2 like test_part including all excluding indexes; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -1067,7 +1067,7 @@ create table test_non_like1 (like test_non including all excluding partition); Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like1_id_idx" btree (id) TABLESPACE pg_default @@ -1079,7 +1079,7 @@ create table test_non_like2 like test_non including all excluding partition; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like2_id_idx" btree (id) TABLESPACE pg_default @@ -1101,7 +1101,7 @@ create table test_non_like1 (like test_non including all excluding partition exc Table "like_default_test.test_non_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1111,7 +1111,7 @@ create table test_non_like2 like test_non including all excluding partition excl Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1121,7 +1121,7 @@ create table test_part_like1 (like test_part including all excluding partition e Table "like_default_test.test_part_like1" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1131,7 +1131,7 @@ create table test_part_like2 like test_part including all excluding partition ex Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1153,7 +1153,7 @@ create table test_non_like2 like test_non excluding indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1173,7 +1173,7 @@ create table test_part_like2 like test_part excluding indexes; Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Partition By RANGE(id) Number of partitions: 5 (View pg_partition to check each partition range.) @@ -1197,7 +1197,7 @@ create table test_non_like2 like test_non excluding partition; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Indexes: "test_non_like2_id_idx" btree (id) TABLESPACE pg_default @@ -1235,7 +1235,7 @@ create table test_non_like2 like test_non excluding partition excluding indexes; Table "like_default_test.test_non_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no @@ -1255,7 +1255,7 @@ create table test_part_like2 like test_part excluding partition excluding indexe Table "like_default_test.test_part_like2" Column | Type | Modifiers | Storage | Stats target | Description --------+-----------------------+-----------+----------+--------------+------------- - id | integer | | plain | | + id | integer | default 1 | plain | | name | character varying(25) | | extended | | Has OIDs: no Options: orientation=row, compression=no diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 2257d5386..b5a73e575 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -1476,7 +1476,7 @@ typedef struct TableLikeClause { bits32 options; /* OR of TableLikeOption flags */ } TableLikeClause; -#define MAX_TABLE_LIKE_OPTIONS (14) +#define MAX_TABLE_LIKE_OPTIONS (15) typedef enum TableLikeOption { CREATE_TABLE_LIKE_DEFAULTS = 1 << 0, CREATE_TABLE_LIKE_CONSTRAINTS = 1 << 1, @@ -1493,6 +1493,7 @@ typedef enum TableLikeOption { CREATE_TABLE_LIKE_M_STYLE = 1 << 11, CREATE_TABLE_LIKE_EXCLUDING_INDEXES = 1 << 12, CREATE_TABLE_LIKE_EXCLUDING_PARTITION = 1 << 13, + CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS = 1 << 14, CREATE_TABLE_LIKE_MAX = 1 << MAX_TABLE_LIKE_OPTIONS, #endif CREATE_TABLE_LIKE_ALL = 0x7FFFFFFF diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e4f6f21f9..40a8c00d4 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -10623,11 +10623,13 @@ TableLikeOptionList: | TableLikeOptionList EXCLUDING TableLikeExcludingOption { if ($3 == CREATE_TABLE_LIKE_ALL) { - $$ = ($1 & ~$3) | CREATE_TABLE_LIKE_EXCLUDING_PARTITION | CREATE_TABLE_LIKE_EXCLUDING_INDEXES; + $$ = ($1 & ~$3) | CREATE_TABLE_LIKE_EXCLUDING_PARTITION | CREATE_TABLE_LIKE_EXCLUDING_INDEXES | CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS; } else if ($3 & CREATE_TABLE_LIKE_INDEXES) { $$ = ($1 & ~$3) | CREATE_TABLE_LIKE_EXCLUDING_INDEXES; } else if ($3 & CREATE_TABLE_LIKE_PARTITION) { $$ = ($1 & ~$3) | CREATE_TABLE_LIKE_EXCLUDING_PARTITION; + } else if ($3 & CREATE_TABLE_LIKE_DEFAULTS) { + $$ = ($1 & ~$3) | CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS; } else { $$ = $1 & ~$3; } diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index 7846ae5c9..2995682a6 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -1959,6 +1959,9 @@ static void transformTableLikeClause( if (!(table_like_clause-> options & CREATE_TABLE_LIKE_EXCLUDING_INDEXES)) { table_like_clause->options |= CREATE_TABLE_LIKE_INDEXES; } + if (!(table_like_clause-> options & CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS)) { + table_like_clause->options |= CREATE_TABLE_LIKE_DEFAULTS; + } if (!RELATION_ISNOT_REGULAR_PARTITIONED(relation) && !(table_like_clause-> options & CREATE_TABLE_LIKE_EXCLUDING_PARTITION)) { table_like_clause->options |= CREATE_TABLE_LIKE_PARTITION; diff --git a/contrib/dolphin/sql/like_default_test.sql b/contrib/dolphin/sql/like_default_test.sql index 913b0f508..4547ab00d 100644 --- a/contrib/dolphin/sql/like_default_test.sql +++ b/contrib/dolphin/sql/like_default_test.sql @@ -12,8 +12,8 @@ create table test_nv (id int, name nvarchar(10)); drop table if exists test_nv; -create table test_non(id int, name varchar(25)); -create table test_part(id int, name varchar(25)) +create table test_non(id int default 1, name varchar(25)); +create table test_part(id int default 1, name varchar(25)) partition by range(id) ( partition p1 values less than(100), -- Gitee From db205b4cede89065c65e3f7c8ee36ae1c8cea196 Mon Sep 17 00:00:00 2001 From: qianxue Date: Tue, 31 Oct 2023 03:35:12 -0400 Subject: [PATCH 063/434] =?UTF-8?q?AND=E4=B8=A4=E4=BE=A7=E6=94=AF=E6=8C=81?= =?UTF-8?q?timestamp=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/timestamp_test.out | 196 ++++++++++++++++++ .../dolphin/include/plugin_utils/timestamp.h | 1 + .../dolphin/plugin_utils/adt/timestamp.cpp | 28 +++ contrib/dolphin/sql/timestamp_test.sql | 41 +++- .../dolphin/sql_script/B_type_function.sql | 6 + 5 files changed, 271 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/timestamp_test.out b/contrib/dolphin/expected/timestamp_test.out index 533978568..5ea046b48 100644 --- a/contrib/dolphin/expected/timestamp_test.out +++ b/contrib/dolphin/expected/timestamp_test.out @@ -71,3 +71,199 @@ select 8385958.999999::bit(64)::time; 838:59:59 (1 row) +set dolphin.sql_mode=''; +-- true +select '2022-1-12 12:23:23'::timestamp and '20220112122324':: timestamp; + ?column? +---------- + t +(1 row) + +select '2023-1-12 12:23:23'::datetime and '20230112122324':: datetime; + ?column? +---------- + t +(1 row) + +select '2022-1-12 12:23:23'::timestamp and '20220112122324':: datetime; + ?column? +---------- + t +(1 row) + +select '2022-1-12 12:23:23'::datetime and '20220112122324':: timestamp; + ?column? +---------- + t +(1 row) + +select timestamp '2022-1-12 12:23:23' and timestamp '20220112122324'; + ?column? +---------- + t +(1 row) + +select datetime '2023-1-12 12:23:23' and datetime '20230112122324'; + ?column? +---------- + t +(1 row) + +select timestamp '2022-1-12 12:23:23' and datetime '20220112122324'; + ?column? +---------- + t +(1 row) + +select datetime '2022-1-12 12:23:23' and timestamp '20220112122324'; + ?column? +---------- + t +(1 row) + +-- false +select '0000-00-00 00:00:00'::timestamp and '20220112122324':: timestamp; + ?column? +---------- + f +(1 row) + +select '2023-1-12 12:23:23'::datetime and '00000000000000':: datetime; + ?column? +---------- + f +(1 row) + +select '2022-1-12 12:23:23'::timestamp and '00000000000000':: datetime; + ?column? +---------- + f +(1 row) + +select '2022-1-12 12:23:23'::datetime and '00000000000000':: timestamp; + ?column? +---------- + f +(1 row) + +select timestamp '2022-1-12 12:23:23' and timestamp '00000000000000'; + ?column? +---------- + f +(1 row) + +select datetime '2023-1-12 12:23:23' and datetime '00000000000000'; + ?column? +---------- + f +(1 row) + +select timestamp '2022-1-12 12:23:23' and datetime '00000000000000'; + ?column? +---------- + f +(1 row) + +select datetime '2022-1-12 12:23:23' and timestamp '00000000000000'; + ?column? +---------- + f +(1 row) + +-- true +select '2022-1-12 12:23:23'::timestamp or '20220112122324':: timestamp; + ?column? +---------- + t +(1 row) + +select '2023-1-12 12:23:23'::datetime or '20230112122324':: datetime; + ?column? +---------- + t +(1 row) + +select '2022-1-12 12:23:23'::timestamp or '20220112122324':: datetime; + ?column? +---------- + t +(1 row) + +select '2022-1-12 12:23:23'::datetime or '20220112122324':: timestamp; + ?column? +---------- + t +(1 row) + +select timestamp '2022-1-12 12:23:23' or timestamp '20220112122324'; + ?column? +---------- + t +(1 row) + +select datetime '2023-1-12 12:23:23' or datetime '20230112122324'; + ?column? +---------- + t +(1 row) + +select timestamp '2022-1-12 12:23:23' or datetime '20220112122324'; + ?column? +---------- + t +(1 row) + +select datetime '2022-1-12 12:23:23' or timestamp '20220112122324'; + ?column? +---------- + t +(1 row) + +select '0000-00-00 00:00:00'::timestamp or '20220112122324':: timestamp; + ?column? +---------- + t +(1 row) + +select '2023-1-12 12:23:23'::datetime or '00000000000000':: datetime; + ?column? +---------- + t +(1 row) + +select '2022-1-12 12:23:23'::timestamp or '00000000000000':: datetime; + ?column? +---------- + t +(1 row) + +select '2022-1-12 12:23:23'::datetime or '00000000000000':: timestamp; + ?column? +---------- + t +(1 row) + +select timestamp '2022-1-12 12:23:23' or timestamp '00000000000000'; + ?column? +---------- + t +(1 row) + +select datetime '2023-1-12 12:23:23' or datetime '00000000000000'; + ?column? +---------- + t +(1 row) + +select timestamp '2022-1-12 12:23:23' or datetime '00000000000000'; + ?column? +---------- + t +(1 row) + +select datetime '2022-1-12 12:23:23' or timestamp '00000000000000'; + ?column? +---------- + t +(1 row) + diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 8cd514a58..9928757b5 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -55,6 +55,7 @@ do { #define IZONE_BOUND1 INT64CONST(46740000000) /* 12:59:00 */ #define IZONE_BOUND2 INT64CONST(46800000000) /* 13:00:00 */ #define TIMESTAMP_ZERO (-211810291200000000) +#define TIMESTAMPTZ_ZERO (-211810320343000000) #define NORMAL_DATE 0 /* enable normal date, mainly work in lldiv_decode_tm_with_sql_mode */ #define ENABLE_ZERO_DAY 1 diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index e1a673037..abe13e0f2 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -11570,6 +11570,34 @@ Datum timestamptz_ge_time(PG_FUNCTION_ARGS) PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); } + +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_bool); +extern "C" DLL_PUBLIC Datum timestamptz_bool(PG_FUNCTION_ARGS); +Datum timestamptz_bool(PG_FUNCTION_ARGS) +{ + TimestampTz timestamptz = PG_GETARG_TIMESTAMPTZ(0); + char* tmp = NULL; + tmp = DatumGetCString(DirectFunctionCall1(timestamptz_out, timestamptz)); + + if (timestamptz == TIMESTAMP_ZERO || timestamptz == TIMESTAMPTZ_ZERO) + PG_RETURN_BOOL(false); + + PG_RETURN_BOOL(tmp ? true : false); +} + +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_bool); +extern "C" DLL_PUBLIC Datum timestamp_bool(PG_FUNCTION_ARGS); +Datum timestamp_bool(PG_FUNCTION_ARGS) +{ + Timestamp timestamp = PG_GETARG_TIMESTAMP(0); + char* tmp = NULL; + tmp = DatumGetCString(DirectFunctionCall1(timestamp_out, timestamp)); + + if (timestamp == TIMESTAMP_ZERO) + PG_RETURN_BOOL(false); + + PG_RETURN_BOOL(tmp ? true : false); +} #endif #endif diff --git a/contrib/dolphin/sql/timestamp_test.sql b/contrib/dolphin/sql/timestamp_test.sql index 207d02bda..08e2db656 100644 --- a/contrib/dolphin/sql/timestamp_test.sql +++ b/contrib/dolphin/sql/timestamp_test.sql @@ -10,4 +10,43 @@ set dolphin.b_compatibility_mode to on; select '2022-01-01'::bit(64)::datetime; select '2022-01-01'::bit(64)::timestamp; -select 8385958.999999::bit(64)::time; \ No newline at end of file +select 8385958.999999::bit(64)::time; + +set dolphin.sql_mode=''; +-- true +select '2022-1-12 12:23:23'::timestamp and '20220112122324':: timestamp; +select '2023-1-12 12:23:23'::datetime and '20230112122324':: datetime; +select '2022-1-12 12:23:23'::timestamp and '20220112122324':: datetime; +select '2022-1-12 12:23:23'::datetime and '20220112122324':: timestamp; +select timestamp '2022-1-12 12:23:23' and timestamp '20220112122324'; +select datetime '2023-1-12 12:23:23' and datetime '20230112122324'; +select timestamp '2022-1-12 12:23:23' and datetime '20220112122324'; +select datetime '2022-1-12 12:23:23' and timestamp '20220112122324'; + +-- false +select '0000-00-00 00:00:00'::timestamp and '20220112122324':: timestamp; +select '2023-1-12 12:23:23'::datetime and '00000000000000':: datetime; +select '2022-1-12 12:23:23'::timestamp and '00000000000000':: datetime; +select '2022-1-12 12:23:23'::datetime and '00000000000000':: timestamp; +select timestamp '2022-1-12 12:23:23' and timestamp '00000000000000'; +select datetime '2023-1-12 12:23:23' and datetime '00000000000000'; +select timestamp '2022-1-12 12:23:23' and datetime '00000000000000'; +select datetime '2022-1-12 12:23:23' and timestamp '00000000000000'; + +-- true +select '2022-1-12 12:23:23'::timestamp or '20220112122324':: timestamp; +select '2023-1-12 12:23:23'::datetime or '20230112122324':: datetime; +select '2022-1-12 12:23:23'::timestamp or '20220112122324':: datetime; +select '2022-1-12 12:23:23'::datetime or '20220112122324':: timestamp; +select timestamp '2022-1-12 12:23:23' or timestamp '20220112122324'; +select datetime '2023-1-12 12:23:23' or datetime '20230112122324'; +select timestamp '2022-1-12 12:23:23' or datetime '20220112122324'; +select datetime '2022-1-12 12:23:23' or timestamp '20220112122324'; +select '0000-00-00 00:00:00'::timestamp or '20220112122324':: timestamp; +select '2023-1-12 12:23:23'::datetime or '00000000000000':: datetime; +select '2022-1-12 12:23:23'::timestamp or '00000000000000':: datetime; +select '2022-1-12 12:23:23'::datetime or '00000000000000':: timestamp; +select timestamp '2022-1-12 12:23:23' or timestamp '00000000000000'; +select datetime '2023-1-12 12:23:23' or datetime '00000000000000'; +select timestamp '2022-1-12 12:23:23' or datetime '00000000000000'; +select datetime '2022-1-12 12:23:23' or timestamp '00000000000000'; diff --git a/contrib/dolphin/sql_script/B_type_function.sql b/contrib/dolphin/sql_script/B_type_function.sql index 7d9707eac..0f3e2e88b 100644 --- a/contrib/dolphin/sql_script/B_type_function.sql +++ b/contrib/dolphin/sql_script/B_type_function.sql @@ -306,3 +306,9 @@ CREATE OR REPLACE FUNCTION pg_catalog.b_not_between_and("any","any","any") retur DROP FUNCTION IF EXISTS pg_catalog.b_not_sym_between_and("any","any","any") cascade; CREATE OR REPLACE FUNCTION pg_catalog.b_not_sym_between_and("any","any","any") returns boolean LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'not_sym_between_and'; + +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_bool(timestamptz) returns boolean LANGUAGE C immutable strict as '$libdir/dolphin', 'timestamptz_bool'; +CREATE CAST (timestamptz as boolean) WITH FUNCTION timestamptz_bool(timestamptz) AS IMPLICIT; + +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_bool(timestamp(0) without time zone) returns boolean LANGUAGE C immutable strict as '$libdir/dolphin', 'timestamp_bool'; +CREATE CAST (timestamp(0) without time zone as boolean) WITH FUNCTION timestamp_bool(timestamp(0) without time zone) AS IMPLICIT; -- Gitee From 8bdc6c596f1c385efb428bd357477726b94de957 Mon Sep 17 00:00:00 2001 From: qianxue Date: Wed, 1 Nov 2023 06:04:36 -0400 Subject: [PATCH 064/434] =?UTF-8?q?AND=E4=B8=A4=E4=BE=A7=E6=94=AF=E6=8C=81?= =?UTF-8?q?timestamp=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B=5F1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../json_operator_test.out | 258 +++++++----------- .../dolphin/plugin_utils/adt/timestamp.cpp | 6 +- .../rollback_script/dolphin--3.0--2.0.sql | 7 +- .../json_operator_test.sql | 66 ++--- .../dolphin/sql_script/B_type_function.sql | 6 - .../upgrade_script/dolphin--2.0--3.0.sql | 10 + 6 files changed, 150 insertions(+), 203 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index 2709528ce..956fda7f2 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -359,13 +359,13 @@ CREATE TABLE test_json_type AS SELECT `datetime` << `json` AS `datetime<>json | 20230205191050 datetime<>json | 20230205191050 datetime(4)<>json | 20230205191050 timestamp<>json | 20230205191050 timestamp(4)<>datetime | 0 json<>datetime(4) | 0 json<>timestamp | 0 json<>timestamp(4) | 0 json< Date: Sat, 18 Nov 2023 15:08:25 +0800 Subject: [PATCH 065/434] Sync server code 8cbad5af1d17472b8f6b31eea783ba1c6fe1ad1d --- contrib/dolphin/include/plugin_nodes/parsenodes_common.h | 1 - contrib/dolphin/plugin_parser/analyze.cpp | 3 --- 2 files changed, 4 deletions(-) diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 2257d5386..44da49bb7 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -2152,7 +2152,6 @@ typedef struct Query { #ifdef USE_SPQ void* intoPolicy; ParentStmtType parentStmtType; - bool is_support_spq; #endif } Query; diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index bb8b8aff9..b39e3b108 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -3175,9 +3175,6 @@ static Query* transformSelectStmt(ParseState* pstate, SelectStmt* stmt, bool isF ListCell* l = NULL; qry->commandType = CMD_SELECT; -#ifdef USE_SPQ - qry->is_support_spq = true; -#endif if (stmt->startWithClause != NULL) { pstate->p_addStartInfo = true; -- Gitee From 2de7b76dfba8408dbc6afd24861dbac0d55309af Mon Sep 17 00:00:00 2001 From: Julong-Li <584147810@qq.com> Date: Mon, 20 Nov 2023 16:21:23 +0800 Subject: [PATCH 066/434] =?UTF-8?q?issue=E4=BF=AE=E6=94=B9:=E5=85=81?= =?UTF-8?q?=E8=AE=B8=E8=BF=9E=E7=BB=AD=E7=9A=84=E6=93=8D=E4=BD=9C=E7=AC=A6?= =?UTF-8?q?=E5=87=BA=E7=8E=B0=E4=BB=A5=E5=85=BC=E5=AE=B9mysql?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/operator_associativity.out | 107 ++++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_parser/gram.y | 3 +- .../dolphin/sql/operator_associativity.sql | 29 +++++ 4 files changed, 139 insertions(+), 2 deletions(-) create mode 100644 contrib/dolphin/expected/operator_associativity.out create mode 100644 contrib/dolphin/sql/operator_associativity.sql diff --git a/contrib/dolphin/expected/operator_associativity.out b/contrib/dolphin/expected/operator_associativity.out new file mode 100644 index 000000000..413cdb43c --- /dev/null +++ b/contrib/dolphin/expected/operator_associativity.out @@ -0,0 +1,107 @@ +create table test_op_associativity(a int); +insert into test_op_associativity values (12); +insert into test_op_associativity values (12); +set dolphin.sql_mode = ''; +select * from test_op_associativity where (length(a) > 1 || length(a) >1); + a +---- + 12 + 12 +(2 rows) + +set dolphin.sql_mode = 'pipes_as_concat'; +select * from test_op_associativity where (length(a) > 1 || length(a) >1); + a +--- +(0 rows) + +select 1 < 2 < 3; + ?column? +---------- + t +(1 row) + +select 2 < 1 < 3; + ?column? +---------- + t +(1 row) + +select 2 < (1 < 3); + ?column? +---------- + f +(1 row) + +select 3 > 2 > 1; + ?column? +---------- + f +(1 row) + +select -2 > -3 > 0; + ?column? +---------- + t +(1 row) + +select -2 > (-3 > 0); + ?column? +---------- + f +(1 row) + +select 1 <= 2 <= 2; + ?column? +---------- + t +(1 row) + +select 2 <= 1 <= 2; + ?column? +---------- + t +(1 row) + +select 2 <= (1 <= 2); + ?column? +---------- + f +(1 row) + +select 3 >= 2 >= 1; + ?column? +---------- + t +(1 row) + +select -2 >= -3 >= 0; + ?column? +---------- + t +(1 row) + +select -2 >= (-3 >= 0); + ?column? +---------- + f +(1 row) + +select 1 != 1 != 1; + ?column? +---------- + t +(1 row) + +select 1 != 0 != 1; + ?column? +---------- + f +(1 row) + +select 2 != 1 != 0; + ?column? +---------- + t +(1 row) + diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index fe1410cdc..72f198aaa 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -147,7 +147,7 @@ test: test_uuid_short test_sleep distinct dual_test fulltext_index test_text2boo test: operator_compatibility_test/numeric_operator_test_normal operator_compatibility_test/numeric_operator_test_min operator_compatibility_test/numeric_operator_test_max operator_compatibility_test/time_operator_test operator_compatibility_test/string_operator_test operator_compatibility_test/multi_type_operator_test -test: operator_compatibility_test/json_operator_test operator_compatibility_test/json_cmp_operator_test +test: operator_compatibility_test/json_operator_test operator_compatibility_test/json_cmp_operator_test operator_associativity test: test_show_status pad_char_to_full_length dump_dumpall_test timestamp_test test_mediumtext ai diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e4f6f21f9..d68d3622f 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -1323,7 +1323,8 @@ static inline void ChangeBpcharCastType(TypeName* typname); %left AND %right NOT %right '=' CmpNullOp COLON_EQUALS -%nonassoc '<' '>' CmpOp BINARY +%left '<' '>' CmpOp +%nonassoc BINARY %nonassoc LIKE ILIKE SIMILAR SOUNDS NOT_LIKE NOT_ILIKE NOT_SIMILAR %nonassoc ESCAPE %nonassoc OVERLAPS diff --git a/contrib/dolphin/sql/operator_associativity.sql b/contrib/dolphin/sql/operator_associativity.sql new file mode 100644 index 000000000..a055636b1 --- /dev/null +++ b/contrib/dolphin/sql/operator_associativity.sql @@ -0,0 +1,29 @@ +create table test_op_associativity(a int); +insert into test_op_associativity values (12); +insert into test_op_associativity values (12); + +set dolphin.sql_mode = ''; +select * from test_op_associativity where (length(a) > 1 || length(a) >1); + +set dolphin.sql_mode = 'pipes_as_concat'; +select * from test_op_associativity where (length(a) > 1 || length(a) >1); + +select 1 < 2 < 3; +select 2 < 1 < 3; +select 2 < (1 < 3); + +select 3 > 2 > 1; +select -2 > -3 > 0; +select -2 > (-3 > 0); + +select 1 <= 2 <= 2; +select 2 <= 1 <= 2; +select 2 <= (1 <= 2); + +select 3 >= 2 >= 1; +select -2 >= -3 >= 0; +select -2 >= (-3 >= 0); + +select 1 != 1 != 1; +select 1 != 0 != 1; +select 2 != 1 != 0; -- Gitee From 87aca051ec563db18b6bc9e67f08142b1834b58f Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Mon, 20 Nov 2023 20:11:27 +0800 Subject: [PATCH 067/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8DIndexElem=E7=BB=93?= =?UTF-8?q?=E6=9E=84=E4=BD=93=E6=88=90=E5=91=98=E5=88=9D=E5=A7=8B=E5=8C=96?= =?UTF-8?q?=E6=9C=89=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/src/indexing.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/timescaledb/src/indexing.cpp b/contrib/timescaledb/src/indexing.cpp index 721a78c98..2ff689a2c 100644 --- a/contrib/timescaledb/src/indexing.cpp +++ b/contrib/timescaledb/src/indexing.cpp @@ -183,7 +183,7 @@ create_default_indexes(Hypertable *ht, Dimension *time_dim, Dimension *space_dim .type = T_IndexElem, .name = get_open_dim_name(time_dim), .expr = get_open_dim_expr(time_dim), - .indexcolname = "", + .indexcolname = NULL, .collation = NULL, .opclass = NULL, .ordering = SORTBY_DESC, @@ -205,7 +205,7 @@ create_default_indexes(Hypertable *ht, Dimension *time_dim, Dimension *space_dim .type = T_IndexElem, .name = NameStr(space_dim->fd.column_name), .expr = NULL, - .indexcolname = "", + .indexcolname = NULL, .collation = NULL, .opclass = NULL, .ordering = SORTBY_ASC, -- Gitee From 79b1b044b718345d2813083aaf829bb58fb5ff76 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Mon, 20 Nov 2023 22:29:17 +0800 Subject: [PATCH 068/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dsubtime(8385959,'-1:00:00')=E5=92=8C=E9=9D=9E?= =?UTF-8?q?=E4=B8=A5=E6=A0=BC=E6=A8=A1=E5=BC=8F=E4=B8=8Binsert(subtime('83?= =?UTF-8?q?9:59:59',=20'837:59:59'))=E5=9C=BA=E6=99=AF=E4=B8=8B=E5=92=8Cmy?= =?UTF-8?q?sql=E8=A1=A8=E7=8E=B0=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98=20=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85?= =?UTF-8?q?=E5=AE=B9=E3=80=91:=20=E4=BF=AE=E5=A4=8Dsubtime(8385959,'-1:00:?= =?UTF-8?q?00')=E5=92=8C=E9=9D=9E=E4=B8=A5=E6=A0=BC=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=E4=B8=8Binsert(subtime('839:59:59',=20'837:59:59'))=E5=9C=BA?= =?UTF-8?q?=E6=99=AF=E4=B8=8B=E5=92=8Cmysql=E8=A1=A8=E7=8E=B0=E4=B8=8D?= =?UTF-8?q?=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20?= =?UTF-8?q?=E5=9C=A8mysql=E4=B8=AD=EF=BC=8C=E5=9C=A8subtime=E4=B8=AD?= =?UTF-8?q?=EF=BC=8C=E8=BD=AC=E6=8D=A2=E6=88=90time=E7=9A=84=E6=97=B6?= =?UTF-8?q?=E5=80=99=EF=BC=8C=E5=A6=82=E6=9E=9C=E8=BF=94=E5=9B=9E=E5=B0=8F?= =?UTF-8?q?=E4=BA=8E-838:59:59=EF=BC=8C=E5=88=99=E6=89=93=E5=8D=B0warning?= =?UTF-8?q?=EF=BC=8C=E5=B9=B6=E4=B8=94=E8=BF=94=E5=9B=9E-838:59:59?= =?UTF-8?q?=EF=BC=8C=E5=A6=82=E6=9E=9C=E5=A4=A7=E4=BA=8E838:59:59,?= =?UTF-8?q?=E5=88=99=E6=89=93=E5=8D=B0warning=EF=BC=8C=E5=B9=B6=E4=B8=94?= =?UTF-8?q?=E8=BF=94=E5=9B=9E838:59:59,=20=E5=A6=82=E4=B8=8B=E6=89=80?= =?UTF-8?q?=E7=A4=BA=EF=BC=9A=E4=BD=86=E6=98=AFopenguass=E6=B2=A1=E6=AD=A4?= =?UTF-8?q?=E9=80=BB=E8=BE=91=EF=BC=8C=E5=9B=A0=E6=AD=A4=E5=87=BA=E7=8E=B0?= =?UTF-8?q?=E4=BA=86-839=E7=9A=84=E7=BB=93=E6=9E=9C=EF=BC=8C=E6=AD=A4?= =?UTF-8?q?=E5=A4=96=EF=BC=8C=E5=8F=91=E7=8E=B0=E9=9D=9E=E4=B8=A5=E6=A0=BC?= =?UTF-8?q?=E6=A8=A1=E5=BC=8F=E4=B8=8Bsubtime=20insert=E4=B9=9F=E6=9C=89?= =?UTF-8?q?=E9=97=AE=E9=A2=98=EF=BC=8C=E4=B8=80=E5=B9=B6=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E3=80=82=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88?= =?UTF-8?q?=E3=80=91:=20=E5=8F=82=E8=80=83mysql=E7=9A=84adjust=5Ftime=5Fra?= =?UTF-8?q?nge=E6=96=B9=E6=B3=95=E8=BF=81=E7=A7=BB=E8=BF=87=E6=9D=A5?= =?UTF-8?q?=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82?= =?UTF-8?q?=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengaussorg/das?= =?UTF-8?q?hboard=3Fissue=3DI8H76T?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 16 +-- .../b_compatibility_time_funcs.out | 12 +- .../b_compatibility_time_funcs2.out | 122 ++++++++++++++++-- contrib/dolphin/expected/conv_cast_test.out | 2 +- .../dolphin/include/plugin_utils/timestamp.h | 3 + contrib/dolphin/plugin_utils/adt/date.cpp | 11 +- contrib/dolphin/plugin_utils/adt/datetime.cpp | 5 +- .../dolphin/plugin_utils/adt/timestamp.cpp | 45 ++++++- .../b_compatibility_time_funcs2.sql | 25 ++++ 9 files changed, 204 insertions(+), 37 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index 797c8b676..e5b5b8404 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -361,7 +361,7 @@ SELECT time'12:12:12.123456'; (1 row) SELECT time'34 22:59:59.999999'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "34 22:59:59.999999" CONTEXT: referenced column: time SELECT time'12:60:12.123456'; ERROR: date/time field value out of range: "12:60:12.123456" @@ -370,10 +370,10 @@ SELECT time'12:12:60.123456'; ERROR: date/time field value out of range: "12:12:60.123456" CONTEXT: referenced column: time SELECT time'34 23:00:00'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "34 23:00:00" CONTEXT: referenced column: time SELECT time'-34 23:00:00'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-34 23:00:00" CONTEXT: referenced column: time SELECT time'34 22:59:59'; time @@ -388,16 +388,16 @@ SELECT time'-34 22:59:59'; (1 row) SELECT time'34 22:59:59.999999'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "34 22:59:59.999999" CONTEXT: referenced column: time SELECT time'34 22:59:59.9999999999999999999999'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "34 22:59:59.9999999999999999999999" CONTEXT: referenced column: time SELECT time'-34 22:59:59.9999999999999999999999'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-34 22:59:59.9999999999999999999999" CONTEXT: referenced column: time SELECT time'-34 22:59:59.9999999999999999999999'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-34 22:59:59.9999999999999999999999" CONTEXT: referenced column: time SELECT time'838:59:59'; time @@ -406,7 +406,7 @@ SELECT time'838:59:59'; (1 row) SELECT time'839:00:00'; -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:00:00" CONTEXT: referenced column: time SELECT time'59:59'; time diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index 7a26aa7d2..2e98ca833 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -480,10 +480,10 @@ select * from func_test; --?.* SELECT SUBDATE(time'839:59:59', interval 2 hour); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:59:59" CONTEXT: referenced column: subdate SELECT SUBDATE(time'-838:59:59.9', interval 2 hour); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-838:59:59.9" CONTEXT: referenced column: subdate SELECT SUBDATE('839:59:59', interval 2 hour); WARNING: date/time field value out of range: "839:59:59" @@ -553,7 +553,7 @@ select hour('838:59:59'); (1 row) select hour('-840:59:59'); -WARNING: Incorrect time value +WARNING: date/time field value out of range: "-840:59:59" CONTEXT: referenced column: hour hour ------ @@ -561,7 +561,7 @@ CONTEXT: referenced column: hour (1 row) select hour('840:59:59'); -WARNING: Incorrect time value +WARNING: date/time field value out of range: "840:59:59" CONTEXT: referenced column: hour hour ------ @@ -606,7 +606,7 @@ select hour('838:59:59'); (1 row) select hour('-840:59:59'); -WARNING: Incorrect time value +WARNING: date/time field value out of range: "-840:59:59" CONTEXT: referenced column: hour hour ------ @@ -614,7 +614,7 @@ CONTEXT: referenced column: hour (1 row) select hour('840:59:59'); -WARNING: Incorrect time value +WARNING: date/time field value out of range: "840:59:59" CONTEXT: referenced column: hour hour ------ diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs2.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs2.out index 32328afb1..1ea720dbd 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs2.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs2.out @@ -50,7 +50,7 @@ insert into func_test2(functionName, result) values('subtime(''10000-1-1 20:59:5 ERROR: date/time field value out of range: "10000-1-1 20:59:59" CONTEXT: referenced column: result insert into func_test2(functionName, result) values('subtime(''9999-12-31 20:59:59'', ''839:59:59'')', subtime('9999-12-31 20:59:59', '839:59:59') ); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:59:59" CONTEXT: referenced column: result -- SUBTIME ( time, time ) -- 正常测试 @@ -90,10 +90,19 @@ insert into func_test2(functionName, result) values('subtime(''-838:59:59'', ''2 ERROR: time field value out of range CONTEXT: referenced column: result insert into func_test2(functionName, result) values('subtime(''839:59:59'', ''837:59:59'')', subtime('839:59:59', '837:59:59') ); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:59:59" CONTEXT: referenced column: result insert into func_test2(functionName, result) values('subtime(''-837:59:59'', ''-839:59:59'')', subtime('-837:59:59', '-839:59:59') ); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-839:59:59" +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(8385959, ''-1:00:00'')', subtime(8385959,'-1:00:00')); +ERROR: time field value out of range +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(8375959.9999999, ''-1:00:00'')', subtime(8375959.9999999,'-1:00:00')); +ERROR: time field value out of range +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(-8385959, ''1:00:00'')', subtime(-8385959,'1:00:00')); +ERROR: time field value out of range CONTEXT: referenced column: result -- dateæ ¼å¼å­—符串 insert into func_test2(functionName, result) values('subtime(''2000-01-01'', ''2022-01-01'')', subtime('2000-01-01', '2022-01-01') ); @@ -226,10 +235,10 @@ insert into func_test2(functionName, result) values('TIMEDIFF(''-838:59:59'', '' ERROR: time field value out of range CONTEXT: referenced column: result insert into func_test2(functionName, result) values('TIMEDIFF(''839:59:59'', ''837:59:59'')', TIMEDIFF('839:59:59', '837:59:59') ); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:59:59" CONTEXT: referenced column: result insert into func_test2(functionName, result) values('TIMEDIFF(''-837:59:59'', ''-839:59:59'')', TIMEDIFF('-837:59:59', '-839:59:59') ); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-839:59:59" CONTEXT: referenced column: result -- TIMEDIFF ( time, datetime ), TIMEDIFF ( time, datetime ) insert into func_test2(functionName, result) values('TIMEDIFF(''2000-02-28 20:59:59'', ''4:00'')', TIMEDIFF('2000-02-28 20:59:59', '4:00') ); @@ -271,10 +280,10 @@ insert into func_test2(functionName, result) values('TIME(''-838:59:59'')',TIME( -- 边界情况 insert into func_test2(functionName, result) values('TIME(''838:0:0'')',TIME('838:0:0')); insert into func_test2(functionName, result) values('TIME(''839:0:0'')',TIME('839:0:0')); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:0:0" CONTEXT: referenced column: result insert into func_test2(functionName, result) values('TIME(''-839:0:0'')',TIME('-839:0:0')); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-839:0:0" CONTEXT: referenced column: result -- å¼‚å¸¸æ ¼å¼ insert into func_test2(functionName, result) values('TIME(''0:-1:0'')',TIME('0:-1:0')); @@ -356,10 +365,10 @@ insert into func_test2(functionName, result) values('TIME_FORMAT(''-838:59:59'', -- 边界情况 insert into func_test2(functionName, result) values(' TIME_FORMAT(''838:0:0'', ''%T|%r|%H|%h|%I|%i|%S|%f|%p|%k'') ', TIME_FORMAT('838:0:0', '%T|%r|%H|%h|%I|%i|%S|%f|%p|%k')); insert into func_test2(functionName, result) values(' TIME_FORMAT(''839:0:0'', ''%T|%r|%H|%h|%I|%i|%S|%f|%p|%k'') ', TIME_FORMAT('839:0:0', '%T|%r|%H|%h|%I|%i|%S|%f|%p|%k')); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:0:0" CONTEXT: referenced column: result insert into func_test2(functionName, result) values(' TIME_FORMAT(''-839:0:0'', ''%T|%r|%H|%h|%I|%i|%S|%f|%p|%k'') ', TIME_FORMAT('-839:0:0', '%T|%r|%H|%h|%I|%i|%S|%f|%p|%k')); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "-839:0:0" CONTEXT: referenced column: result -- 分秒越界timeæ ¼å¼ insert into func_test2(functionName, result) values(' TIME_FORMAT(''0:-1:0'', ''%T|%r|%H|%h|%I|%i|%S|%f|%p|%k'') ', TIME_FORMAT('0:-1:0', '%T|%r|%H|%h|%I|%i|%S|%f|%p|%k')); @@ -503,7 +512,7 @@ insert into func_test2(functionName, result) values('TIMESTAMP(20031231120000.00 -- 边界值 insert into func_test2(functionName, result) values('TIMESTAMP(''1000-12-31'',''838:59:59'')', TIMESTAMP('1000-12-31', '838:59:59')); insert into func_test2(functionName, result) values('TIMESTAMP(''1000-01-01 00:00:00'',''839:00:00'')', TIMESTAMP('1000-12-31 00:00:00', '839:00:00')); -ERROR: Incorrect time value +ERROR: date/time field value out of range: "839:00:00" CONTEXT: referenced column: result insert into func_test2(functionName, result) values('TIMESTAMP(''9999-12-31'',''23:59:59.999999'')', TIMESTAMP('9999-12-31', '23:59:59.999999')); insert into func_test2(functionName, result) values('TIMESTAMP(''9999-12-31 00:00:00'',''24:00:00'')', TIMESTAMP('9999-12-31 00:00:00', '24:00:00')); @@ -1054,6 +1063,99 @@ select * from func_test2; TIMESTAMPADD(SECOND,-0.001,'2022-07-27 00:00:00') | 2022-07-26 23:59:59.999 --?.* +-- å•独select边界测试 +select subtime('838:59:59', '-25:00'); +WARNING: time field value out of range +CONTEXT: referenced column: subtime + subtime +----------- + 838:59:59 +(1 row) + +select subtime('-838:59:59', '25:00'); +WARNING: time field value out of range +CONTEXT: referenced column: subtime + subtime +------------ + -838:59:59 +(1 row) + +select subtime('839:59:59', '837:59:59'); +WARNING: date/time field value out of range: "839:59:59" +CONTEXT: referenced column: subtime + subtime +---------- + 01:00:00 +(1 row) + +select subtime('-837:59:59', '-839:59:59') ; +WARNING: date/time field value out of range: "-839:59:59" +CONTEXT: referenced column: subtime + subtime +---------- + 01:00:00 +(1 row) + +select subtime(8385959,'-1:00:00'); +WARNING: time field value out of range +CONTEXT: referenced column: subtime + subtime +----------- + 838:59:59 +(1 row) + +select subtime(8375959.9999999,'-1:00:00'); +WARNING: time field value out of range +CONTEXT: referenced column: subtime + subtime +----------- + 838:59:59 +(1 row) + +select subtime(-8385959,'1:00:00'); +WARNING: time field value out of range +CONTEXT: referenced column: subtime + subtime +------------ + -838:59:59 +(1 row) + +-- éžä¸¥æ ¼æ¨¡å¼ä¸‹çš„写测试 +truncate table func_test2; +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into func_test2(functionName, result) values('subtime(''838:59:59'', ''-25:00'')', subtime('838:59:59', '-25:00')); +WARNING: time field value out of range +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(''-838:59:59'', ''25:00'')', subtime('-838:59:59', '25:00')); +WARNING: time field value out of range +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(''839:59:59'', ''837:59:59'')', subtime('839:59:59', '837:59:59')); +WARNING: date/time field value out of range: "839:59:59" +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(''-837:59:59'', ''-839:59:59'')', subtime('-837:59:59', '-839:59:59')); +WARNING: date/time field value out of range: "-839:59:59" +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(8385959, ''-1:00:00'')', subtime(8385959,'-1:00:00')); +WARNING: time field value out of range +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(8375959.9999999, ''-1:00:00'')', subtime(8375959.9999999,'-1:00:00')); +WARNING: time field value out of range +CONTEXT: referenced column: result +insert into func_test2(functionName, result) values('subtime(-8385959, ''1:00:00'')', subtime(-8385959,'1:00:00')); +WARNING: time field value out of range +CONTEXT: referenced column: result +select * from func_test2 order by functionName; + functionName | result +--------------------------------------+------------ + subtime('-837:59:59', '-839:59:59') | 01:00:00 + subtime(8375959.9999999, '-1:00:00') | 838:59:59 + subtime(-8385959, '1:00:00') | -838:59:59 + subtime(8385959, '-1:00:00') | 838:59:59 + subtime('-838:59:59', '25:00') | -838:59:59 + subtime('838:59:59', '-25:00') | 838:59:59 + subtime('839:59:59', '837:59:59') | 01:00:00 +(7 rows) + drop schema b_time_funcs2 cascade; NOTICE: drop cascades to table func_test2 reset current_schema; diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index 6591f2f45..9255c2660 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -328,7 +328,7 @@ WARNING: value "4294967295" is out of range for type integer LINE 1: select '4294967295'::int4::time; ^ CONTEXT: referenced column: time -ERROR: Incorrect time value +ERROR: time out of range CONTEXT: referenced column: time select '4294967295'::int8::time; WARNING: invalid input syntax for type time: "4294967295" diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 8cd514a58..b369b8caf 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -155,6 +155,9 @@ static inline bool non_zero_date(const pg_tm *ltime) return ltime->tm_year || ltime->tm_mon || ltime->tm_mday; } +extern TimeADT adjust_time_range_with_warn(TimeADT time, bool can_ignore); +extern "C" DLL_PUBLIC Datum time_cast_implicit(PG_FUNCTION_ARGS); + #endif extern Datum datetime_text(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index ccd4b24e2..98c8ebd27 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -1690,14 +1690,14 @@ Datum time_cast(PG_FUNCTION_ARGS) return time_internal(fcinfo, input_str, TIME_CAST, &time_error_type); } - Datum time_cast_implicit(PG_FUNCTION_ARGS) { char* input_str = DatumGetCString(textout(fcinfo)); - return DirectFunctionCall1(time_in, CStringGetDatum(input_str)); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, input_str, TIME_CAST_IMPLICIT, &time_error_type); + return datum_internal; } - char* parser_function_input(Datum txt, Oid oid) { Oid typeOutput; @@ -1805,7 +1805,8 @@ Datum time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorTy } else if (SQL_MODE_NOT_STRICT_ON_INSERT()) { /* for case insert unavailable data, need to set the unavailable data to 0 to compatible with M */ DateTimeParseError(dterr, str, "time", true); - if (IsResetUnavailableDataTime(dterr, !SQL_MODE_STRICT() && !CMD_TAG_IS_SELECT())) { + if (IsResetUnavailableDataTime(dterr, !CMD_TAG_IS_SELECT() && + time_cast_type != TIME_CAST_IMPLICIT)) { *time_error_type = TIME_IGNORED_INCORRECT; PG_RETURN_TIMEADT(0); } else { @@ -4089,7 +4090,7 @@ bool check_pg_tm_time_part(pg_tm *tm, fsec_t fsec) * @in fsec - fractional second part in TIME * NOTIC: Ensure that all time parts in tm are not out of range before input by check_pg_tm_time_part() * - * @return TRUE if time is out of range, false otherwise. + * @return false if time is out of range, true otherwise. */ bool check_pg_tm_time_range(pg_tm *tm, fsec_t fsec) { diff --git a/contrib/dolphin/plugin_utils/adt/datetime.cpp b/contrib/dolphin/plugin_utils/adt/datetime.cpp index 70c323beb..b7f0498ed 100644 --- a/contrib/dolphin/plugin_utils/adt/datetime.cpp +++ b/contrib/dolphin/plugin_utils/adt/datetime.cpp @@ -2308,7 +2308,10 @@ int ValidateTimeForBDatabase(bool timeIn24, struct pg_tm* tm, fsec_t* fsec) if (tm->tm_hour >= B_FORMAT_TIME_BOUND || (tm->tm_hour == B_FORMAT_TIME_BOUND - 1 && tm->tm_min == MINS_PER_HOUR - 1 && tm->tm_sec == SECS_PER_MINUTE - 1 && *fsec)) { - ereport(ERROR, (errcode(DTERR_FIELD_OVERFLOW), errmsg("Incorrect time value"))); + // we cannot throw execetion directly for some case, + // for eample: function call or insert on non-restrict mode + // so we just need to return DTERR_FIELD_OVERFLOW and let it to be handled by caller + return DTERR_FIELD_OVERFLOW; } } return 0; diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index e1a673037..8100570d3 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -7087,9 +7087,16 @@ Oid convert_cstring_to_datetime_time(const char* str, Timestamp *datetime, TimeA } /* Not a timestamp. Try to convert str to time*/ - *time = DatumGetTimeADT( - DirectFunctionCall3(time_in, CStringGetDatum(start), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); - check_b_format_time_range_with_ereport(*time, can_ignore, result_isnull); + if (can_ignore) { + *time = DatumGetTimeADT( + DirectFunctionCall3Coll(time_cast_implicit, InvalidOid, PointerGetDatum(cstring_to_text(start)), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); + *time = adjust_time_range_with_warn(*time, can_ignore); + } else { + *time = DatumGetTimeADT( + DirectFunctionCall3(time_in, CStringGetDatum(start), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); + check_b_format_time_range_with_ereport(*time, can_ignore, result_isnull); + } return TIMEOID; } @@ -7327,7 +7334,7 @@ Datum subtime(PG_FUNCTION_ARGS) val_type1 = get_fn_expr_argtype(fcinfo->flinfo, 0); val_type2 = get_fn_expr_argtype(fcinfo->flinfo, 1); - val_type1 = convert_to_datetime_time(PG_GETARG_DATUM(0), val_type1, &datetime1, &time1); + val_type1 = convert_to_datetime_time(PG_GETARG_DATUM(0), val_type1, &datetime1, &time1, true); switch (val_type2) { case TIMESTAMPOID: @@ -7339,7 +7346,7 @@ Datum subtime(PG_FUNCTION_ARGS) val_type2 = TIMEOID; break; default: - val_type2 = convert_to_datetime_time(PG_GETARG_DATUM(1), val_type2, &datetime2, &time2); + val_type2 = convert_to_datetime_time(PG_GETARG_DATUM(1), val_type2, &datetime2, &time2, true); if (val_type2 == TIMESTAMPOID) { PG_RETURN_NULL(); } @@ -7351,7 +7358,7 @@ Datum subtime(PG_FUNCTION_ARGS) time1 = 0; /* time set to 00:00:00 */ case TIMEOID: { res_time = time1 - time2; - check_b_format_time_range_with_ereport(res_time); + res_time = adjust_time_range_with_warn(res_time, fcinfo->can_ignore); return DirectFunctionCall1(time_text, TimeGetDatum(res_time)); } case TIMESTAMPOID: { @@ -7375,6 +7382,32 @@ Datum subtime(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } +TimeADT adjust_time_range_with_warn(TimeADT time, bool can_ignore) +{ + int errlevel = (SQL_MODE_STRICT() && !can_ignore) ? ERROR : WARNING; + pg_tm result_tt = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + pg_tm* result_tm = &result_tt; + fsec_t fsec; + TimeADT time_result; + int32 timeSign = 1; + + if (time < 0) { + timeSign = -1; + time = -time; + } + + time2tm(time, result_tm, &fsec); + bool warning = false; + adjust_time_range(result_tm, fsec, warning); + if (warning == true) { + ereport(errlevel, (errcode(ERRCODE_DATETIME_FIELD_OVERFLOW), + errmsg("time field value out of range"))); + } + tm2time(result_tm, fsec, &time_result); + return timeSign == 1 ? time_result : -time_result; +} + + static bool get_time(Oid val_type, Datum value) { if (val_type == BITOID) { diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs2.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs2.sql index 601d9e368..9ad5ef370 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs2.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs2.sql @@ -64,6 +64,9 @@ insert into func_test2(functionName, result) values('subtime(''838:59:59'', ''-2 insert into func_test2(functionName, result) values('subtime(''-838:59:59'', ''25:00'')', subtime('-838:59:59', '25:00') ); insert into func_test2(functionName, result) values('subtime(''839:59:59'', ''837:59:59'')', subtime('839:59:59', '837:59:59') ); insert into func_test2(functionName, result) values('subtime(''-837:59:59'', ''-839:59:59'')', subtime('-837:59:59', '-839:59:59') ); +insert into func_test2(functionName, result) values('subtime(8385959, ''-1:00:00'')', subtime(8385959,'-1:00:00')); +insert into func_test2(functionName, result) values('subtime(8375959.9999999, ''-1:00:00'')', subtime(8375959.9999999,'-1:00:00')); +insert into func_test2(functionName, result) values('subtime(-8385959, ''1:00:00'')', subtime(-8385959,'1:00:00')); -- dateæ ¼å¼å­—符串 insert into func_test2(functionName, result) values('subtime(''2000-01-01'', ''2022-01-01'')', subtime('2000-01-01', '2022-01-01') ); -- éžå­—ç¬¦ä¸²ç±»åž‹å‚æ•°ç”¨ä¾‹ @@ -522,5 +525,27 @@ drop table t1; drop table t2; select * from func_test2; + +-- å•独select边界测试 +select subtime('838:59:59', '-25:00'); +select subtime('-838:59:59', '25:00'); +select subtime('839:59:59', '837:59:59'); +select subtime('-837:59:59', '-839:59:59') ; +select subtime(8385959,'-1:00:00'); +select subtime(8375959.9999999,'-1:00:00'); +select subtime(-8385959,'1:00:00'); + +-- éžä¸¥æ ¼æ¨¡å¼ä¸‹çš„写测试 +truncate table func_test2; +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into func_test2(functionName, result) values('subtime(''838:59:59'', ''-25:00'')', subtime('838:59:59', '-25:00')); +insert into func_test2(functionName, result) values('subtime(''-838:59:59'', ''25:00'')', subtime('-838:59:59', '25:00')); +insert into func_test2(functionName, result) values('subtime(''839:59:59'', ''837:59:59'')', subtime('839:59:59', '837:59:59')); +insert into func_test2(functionName, result) values('subtime(''-837:59:59'', ''-839:59:59'')', subtime('-837:59:59', '-839:59:59')); +insert into func_test2(functionName, result) values('subtime(8385959, ''-1:00:00'')', subtime(8385959,'-1:00:00')); +insert into func_test2(functionName, result) values('subtime(8375959.9999999, ''-1:00:00'')', subtime(8375959.9999999,'-1:00:00')); +insert into func_test2(functionName, result) values('subtime(-8385959, ''1:00:00'')', subtime(-8385959,'1:00:00')); +select * from func_test2 order by functionName; + drop schema b_time_funcs2 cascade; reset current_schema; -- Gitee From 868a3702c4671be250ad80b08cecb8dd18db80b5 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Tue, 21 Nov 2023 09:22:47 +0800 Subject: [PATCH 069/434] =?UTF-8?q?=E8=A7=A3=E5=86=B3timescaleDB=E8=87=AA?= =?UTF-8?q?=E5=AE=9A=E4=B9=89=E6=A8=A1=E5=BC=8F=E5=90=8D=E5=88=9B=E5=BB=BA?= =?UTF-8?q?=E8=B6=85=E8=A1=A8=E5=A4=B1=E8=B4=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/src/indexing.cpp | 2 +- contrib/timescaledb/tsl/src/compression/create.cpp | 2 +- contrib/timescaledb/tsl/src/continuous_aggs/create.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/timescaledb/src/indexing.cpp b/contrib/timescaledb/src/indexing.cpp index 2ff689a2c..c4ff69b80 100644 --- a/contrib/timescaledb/src/indexing.cpp +++ b/contrib/timescaledb/src/indexing.cpp @@ -140,7 +140,7 @@ create_default_index(Hypertable *ht, List *indexelems) IndexStmt stmt = { .type = T_IndexStmt, .missing_ok= false, - .schemaname = "public", + .schemaname = NameStr(ht->fd.schema_name), .idxname = NULL, .relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0), .accessMethod = DEFAULT_INDEX_TYPE, diff --git a/contrib/timescaledb/tsl/src/compression/create.cpp b/contrib/timescaledb/tsl/src/compression/create.cpp index 7b118700d..66441c3f9 100644 --- a/contrib/timescaledb/tsl/src/compression/create.cpp +++ b/contrib/timescaledb/tsl/src/compression/create.cpp @@ -396,7 +396,7 @@ create_compressed_table_indexes(Oid compresstable_relid, CompressColInfo *compre IndexStmt stmt = { .type = T_IndexStmt, .missing_ok = false, - .schemaname = "public", + .schemaname = NameStr(ht->fd.schema_name), .idxname = NULL, .relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0), .accessMethod = DEFAULT_INDEX_TYPE, diff --git a/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp b/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp index 8bbb03a29..7f3613dec 100644 --- a/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp +++ b/contrib/timescaledb/tsl/src/continuous_aggs/create.cpp @@ -395,7 +395,7 @@ mattablecolumninfo_add_mattable_index(MatTableColumnInfo *matcolinfo, Hypertable IndexStmt stmt = { .type = T_IndexStmt, .missing_ok = false, - .schemaname = "public", + .schemaname = NameStr(ht->fd.schema_name), .idxname = NULL, .relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0), .accessMethod = DEFAULT_INDEX_TYPE, -- Gitee From dd39c4ac02a169ff64e7ba956249d7fb3e6e7427 Mon Sep 17 00:00:00 2001 From: totaj Date: Tue, 21 Nov 2023 20:17:34 +0800 Subject: [PATCH 070/434] Sync server code. 68b573b9207d270a8a59f4c3428cbdec4dd7462e --- contrib/dolphin/include/builtin_funcs.ini | 12 ++ .../dolphin/include/plugin_nodes/parsenodes.h | 6 +- contrib/dolphin/plugin_executor/execQual.cpp | 21 ++- .../dolphin/plugin_optimizer/plan/planner.cpp | 2 + contrib/dolphin/plugin_parser/analyze.cpp | 20 +- contrib/dolphin/plugin_parser/gram.y | 8 +- .../dolphin/plugin_parser/parse_clause.cpp | 5 +- contrib/dolphin/plugin_parser/parser.cpp | 8 + .../dolphin/plugin_utils/adt/formatting.cpp | 3 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 173 ++++++++++++++---- 10 files changed, 202 insertions(+), 56 deletions(-) mode change 100644 => 100755 contrib/dolphin/include/builtin_funcs.ini diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini old mode 100644 new mode 100755 index 6d6c97754..b09a63b67 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -4025,6 +4025,18 @@ "gs_walwriter_flush_stat", 1, AddBuiltinFunc(_0(2863), _1("gs_walwriter_flush_stat"), _2(1), _3(false), _4(true), _5(gs_walwriter_flush_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 23), _21(17, 23, 28, 28, 28, 28, 31, 31, 31, 31, 28, 28, 31, 31, 28, 28, 1184, 1184), _22(17, 'i','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(17, "operation", "write_times", "sync_times", "total_xlog_sync_bytes", "total_actual_xlog_sync_bytes", "avg_write_bytes", "avg_actual_write_bytes", "avg_sync_bytes", "avg_actual_sync_bytes", "total_write_time", "total_sync_time", "avg_write_time", "avg_sync_time", "curr_init_xlog_segno", "curr_open_xlog_segno", "last_reset_time", "curr_time"), _24(NULL), _25("gs_walwriter_flush_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_stat_walsender", 1, + AddBuiltinFunc(_0(2864), _1("gs_stat_walsender"), _2(1), _3(false), _4(true), _5(gs_stat_walsender), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(1), _20(1, 23), _21(10, 23, 16, 25, 1184, 28, 1184, 1184, 1184, 28, 28), _22(10, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "operation", "is_enable_stat", "channel", "cur_time", "send_times", "first_send_time", "last_send_time", "last_reset_time", "avg_send_interval", "since_last_send_interval"), _24("({CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location -1 :constvalue 4 [ 2 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_stat_walsender"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_stat_walreceiver", 1, + AddBuiltinFunc(_0(2865), _1("gs_stat_walreceiver"), _2(1), _3(false), _4(true), _5(gs_stat_walreceiver), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(1), _20(1, 23), _21(11, 23, 16, 28, 28, 28, 28, 28, 1184, 1184, 1184, 1184), _22(11, 'i','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(11, "operation", "is_enable_stat", "buffer_current_size", "buffer_full_times", "wake_writer_times", "avg_wake_interval", "since_last_wake_interval", "first_wake_time", "last_wake_time", "last_reset_time", "cur_time"), _24("({CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location -1 :constvalue 4 [ 2 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_stat_walreceiver"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_stat_walrecvwriter", 1, + AddBuiltinFunc(_0(2868), _1("gs_stat_walrecvwriter"), _2(1), _3(false), _4(true), _5(gs_stat_walrecvwriter), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(1), _20(1, 23), _21(16, 23, 16, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 1184, 1184), _22(16, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(16, "operation", "is_enable_stat", "total_write_bytes", "write_times", "total_write_time", "avg_write_time", "avg_write_bytes", "total_sync_bytes", "sync_times", "total_sync_time", "avg_sync_time", "avg_sync_bytes", "current_xlog_segno", "newest_xlog_segno", "last_reset_time", "cur_time"), _24("({CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location -1 :constvalue 4 [ 2 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_stat_walrecvwriter"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_xlogdump_lsn", 1, AddBuiltinFunc(_0(2619), _1("gs_xlogdump_lsn"), _2(2), _3(true), _4(false), _5(gs_xlogdump_lsn), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 25, 25), _21(3, 25, 25, 25), _22(3, 'i', 'i', 'o'), _23(3, "start_lsn", "end_lsn", "output_filepath"), _24(NULL), _25("gs_xlogdump_lsn"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("dump xlog records to output file based on the given start_lsn and end_lsn"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index e2e6e1c12..3bf2e80b0 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -34,10 +34,8 @@ #include "tcop/dest.h" #include "nodes/parsenodes_common.h" -#ifdef USE_SPQ #define CURSOR_OPT_SPQ_OK 0x0200 /* SPQ Execution */ #define CURSOR_OPT_SPQ_FORCE 0x0400 /* Force to generate a SPQ plan */ -#endif /* * Relids @@ -498,6 +496,10 @@ typedef struct WindowClause { Node* endOffset; /* expression for ending bound, if any */ Index winref; /* ID referenced by window functions */ bool copiedOrder; /* did we copy orderClause from refname? */ +#ifdef USE_SPQ + bool rePartitionSPQ; /* did we reassign the tleSortGroupRef when constructing partition Clause */ + bool reOrderSPQ; /* did we reassign the tleSortGroupRef when constructing order Clause */ +#endif } WindowClause; /* diff --git a/contrib/dolphin/plugin_executor/execQual.cpp b/contrib/dolphin/plugin_executor/execQual.cpp index d75c721ec..88ec8af89 100644 --- a/contrib/dolphin/plugin_executor/execQual.cpp +++ b/contrib/dolphin/plugin_executor/execQual.cpp @@ -627,6 +627,9 @@ static Datum ExecEvalScalarVar(ExprState* exprstate, ExprContext* econtext, bool /* INDEX_VAR is handled by default case */ default: /* get the tuple from the relation being scanned */ slot = econtext->ecxt_scantuple; + if (u_sess->parser_cxt.in_userset) { + u_sess->parser_cxt.has_set_uservar = true; + } break; } @@ -726,6 +729,9 @@ static Datum ExecEvalScalarVarFast(ExprState* exprstate, ExprContext* econtext, /* INDEX_VAR is handled by default case */ default: /* get the tuple from the relation being scanned */ slot = econtext->ecxt_scantuple; + if (u_sess->parser_cxt.in_userset) { + u_sess->parser_cxt.has_set_uservar = true; + } break; } @@ -1110,6 +1116,9 @@ static Datum ExecEvalConst(ExprState* exprstate, ExprContext* econtext, bool* is } else { con = makeConst(UNKNOWNOID, -1, InvalidOid, -2, (Datum)0, true, false); } + if (u_sess->parser_cxt.in_userset) { + u_sess->parser_cxt.has_set_uservar = true; + } } else if (IsA(exprstate->expr, SetVariableExpr)) { SetVariableExpr* setvar = (SetVariableExpr*)transformSetVariableExpr((SetVariableExpr*)exprstate->expr); con = (Const*)setvar->value; @@ -1296,6 +1305,9 @@ static Datum ExecEvalUserSetElm(ExprState* exprstate, ExprContext* econtext, boo Node* res = NULL; char* value = NULL; + if (nodeTag(usestate->instate) != T_CaseExprState && DB_IS_CMPT(B_FORMAT)) + u_sess->parser_cxt.in_userset = true; + Datum result = ExecEvalExpr(usestate->instate, econtext, isNull, isDone); if (*isNull) { @@ -1335,6 +1347,7 @@ static Datum ExecEvalUserSetElm(ExprState* exprstate, ExprContext* econtext, boo } check_set_user_message(&elemcopy); + u_sess->parser_cxt.in_userset = false; return result; } @@ -1352,6 +1365,10 @@ static Datum ExecEvalParamExec(ExprState* exprstate, ExprContext* econtext, bool if (isDone != NULL) *isDone = ExprSingleResult; + + if (u_sess->parser_cxt.in_userset) { + u_sess->parser_cxt.has_set_uservar = true; + } /* * PARAM_EXEC params (internal executor parameters) are stored in the @@ -1362,7 +1379,9 @@ static Datum ExecEvalParamExec(ExprState* exprstate, ExprContext* econtext, bool /* Parameter not evaluated yet, so go do it */ ExecSetParamPlan((SubPlanState*)prm->execPlan, econtext); /* ExecSetParamPlan should have processed this param... */ - Assert(prm->execPlan == NULL); + if (!u_sess->parser_cxt.has_set_uservar || !DB_IS_CMPT(B_FORMAT)) { + Assert(prm->execPlan == NULL); + } prm->isConst = true; prm->valueType = expression->paramtype; } diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index 7e4b4aa56..dd41a3a9a 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -529,6 +529,8 @@ PlannedStmt* standard_planner(Query* parse, int cursorOptions, ParamListInfo bou bool use_tenant = false; List* parse_hint_warning = NIL; + if (cursorOptions & CURSOR_OPT_SPQ_OK) + cursorOptions &= ~CURSOR_OPT_SPQ_OK; //if it is pgxc plan for tsstore delete sql.errport if((!u_sess->attr.attr_sql.enable_stream_operator || !u_sess->opt_cxt.is_stream) && IS_PGXC_COORDINATOR) { checkTsstoreQuery(parse); diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index b39e3b108..e7c2bb470 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -1666,19 +1666,21 @@ static void SetUpsertAttrnoState(ParseState* pstate, List *targetList) for (int ni = 0; ni < len; ++ni) { ResTarget* res = (ResTarget*)lfirst(target); char* name = nullptr; - if (list_length(res->indirection) > 0) { - name = ((Value*)llast(res->indirection))->val.str; + if (list_length(res->indirection) > 0 && IsA(linitial(res->indirection), String)) { + name = strVal(linitial(res->indirection)); } else { name = res->name; } - for (int ci = 0; ci < colNum; ++ci) { - if (attr[ci].attisdropped) { - continue; - } - if (strcmp(name, attr[ci].attname.data) == 0) { - rstate->usExplicitAttrNos[ni] = ci + 1; - break; + if (name != NULL) { + for (int ci = 0; ci < colNum; ++ci) { + if (attr[ci].attisdropped) { + continue; + } + if (strcmp(name, attr[ci].attname.data) == 0) { + rstate->usExplicitAttrNos[ni] = ci + 1; + break; + } } } diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 31de68a9c..d6a428bc3 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -14572,7 +14572,7 @@ CreateTrigStmt: { ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("syntax error."))); + errmsg("or replace is not supported here."), parser_errposition(@2))); } if ($3 != NULL) { @@ -14644,8 +14644,7 @@ CreateTrigStmt: { ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("syntax error."))); - } + errmsg("or replace is not supported here."), parser_errposition(@2))); } if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) { ereport(errstate, @@ -14688,7 +14687,7 @@ CreateTrigStmt: { ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("syntax error."))); + errmsg("or replace is not supported here."), parser_errposition(@2))); } if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) { @@ -32605,6 +32604,7 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } errmsg("@var_name := expr is not yet supported in distributed database."))); #endif if (DB_IS_CMPT(B_FORMAT) && (u_sess->attr.attr_common.enable_set_variable_b_format || ENABLE_SET_VARIABLES)) { + u_sess->parser_cxt.has_equal_uservar = true; UserSetElem *n = makeNode(UserSetElem); n->name = list_make1((Node *)$1); n->val = (Expr *)$3; diff --git a/contrib/dolphin/plugin_parser/parse_clause.cpp b/contrib/dolphin/plugin_parser/parse_clause.cpp index bbcf35630..32b309506 100644 --- a/contrib/dolphin/plugin_parser/parse_clause.cpp +++ b/contrib/dolphin/plugin_parser/parse_clause.cpp @@ -2590,10 +2590,7 @@ bool has_not_null_constraint(ParseState* pstate,TargetEntry* tle) HeapTuple atttuple = SearchSysCacheCopy2(ATTNUM, ObjectIdGetDatum(reloid), Int16GetDatum(attno)); if (!HeapTupleIsValid(atttuple)) { - Assert(0); - ereport(ERROR, - (errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for attribute %u of relation %hd", reloid, attno))); + return false; } Form_pg_attribute attStruct = (Form_pg_attribute)GETSTRUCT(atttuple); bool attHasNotNull = attStruct->attnotnull; diff --git a/contrib/dolphin/plugin_parser/parser.cpp b/contrib/dolphin/plugin_parser/parser.cpp index 0cff866c8..18d26a9b5 100644 --- a/contrib/dolphin/plugin_parser/parser.cpp +++ b/contrib/dolphin/plugin_parser/parser.cpp @@ -55,6 +55,11 @@ static void resetForbidTruncateFlag() u_sess->parser_cxt.isForbidTruncate = false; } +static void resetHasSetUservarFlag() +{ + u_sess->parser_cxt.has_set_uservar = false; +} + /* * raw_parser * Given a query in string form, do lexical and grammatical analysis. @@ -82,6 +87,9 @@ List* raw_parser(const char* str, List** query_string_locationlist) /* reset u_sess->parser_cxt.isForbidTruncate */ resetForbidTruncateFlag(); + /* reset u_sess->parser_cxt.has_set_uservar */ + resetHasSetUservarFlag(); + /* initialize the flex scanner */ yyscanner = scanner_init(str, &yyextra.core_yy_extra, &ScanKeywords, ScanKeywordTokens); diff --git a/contrib/dolphin/plugin_utils/adt/formatting.cpp b/contrib/dolphin/plugin_utils/adt/formatting.cpp index 3fd9fc3b9..77d9f64eb 100644 --- a/contrib/dolphin/plugin_utils/adt/formatting.cpp +++ b/contrib/dolphin/plugin_utils/adt/formatting.cpp @@ -5295,8 +5295,7 @@ static NUMCacheEntry* NUM_cache_search(const char* str) static void NUM_cache_remove(NUMCacheEntry* ent) { if (ent == NULL) { - ereport(ERROR, - (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("The input NUMCacheEntry is invalid, which is Null."))); + return; } #ifdef DEBUG_TO_FROM_CHAR elog(DEBUG_elog_output, "REMOVING ENTRY (%s)", ent->str); diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index 4f2abd977..8491cc6bf 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -292,6 +292,9 @@ static void get_basic_select_query(Query* query, deparse_context* context, Tuple static void get_target_list(Query* query, List* targetList, deparse_context* context, TupleDesc resultDesc); static void get_setop_query(Node* setOp, Query* query, deparse_context* context, TupleDesc resultDesc); static Node* get_rule_sortgroupclause(Index ref, List* tlist, bool force_colno, deparse_context* context); +#ifdef USE_SPQ +static Node* get_rule_sortgroupclause_spq(Index ref, bool force_colno, deparse_context* context); +#endif static void get_rule_groupingset(GroupingSet* gset, List* targetlist, deparse_context* context); static void get_rule_orderby(List* orderList, List* targetList, bool force_colno, deparse_context* context); static void get_rule_windowclause(Query* query, deparse_context* context); @@ -6937,6 +6940,59 @@ static Node* get_rule_sortgroupclause(Index ref, List* tlist, bool force_colno, return expr; } +#ifdef USE_SPQ +/* + * Display a sort/group clause. + * + * Also returns the expression tree, so caller need not find it again. + */ + +static Node* get_rule_sortgroupclause_spq(Index ref, bool force_colno, deparse_context* context) +{ + StringInfo buf = context->buf; + TargetEntry* tle = NULL; + Node* expr = NULL; + List* tlist; + + deparse_namespace* dpns_spq = (deparse_namespace*)linitial(context->namespaces); + PlanState* ps = dpns_spq->planstate; + WindowAgg* node = NULL; + node = (WindowAgg*)ps->plan; + tlist = node->plan.lefttree->targetlist; + + if (tlist == NULL){ + return expr; + } + + tle = get_sortgroupref_tle_spq(ref, tlist); + expr = (Node*)tle->expr; + + deparse_namespace* dpns = NULL; + deparse_namespace save_dpns; + + dpns = (deparse_namespace*)list_nth(context->namespaces, ((Var*)expr)->varlevelsup); + push_child_plan(dpns, dpns->outer_planstate, &save_dpns); + + + /* + * Use column-number form if requested by caller. Otherwise, if + * expression is a constant, force it to be dumped with an explicit cast + * as decoration --- this is because a simple integer constant is + * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we + * dump it without any decoration. Otherwise, just dump the expression + * normally. + */ + if (force_colno || context->sortgroup_colno) { + Assert(!tle->resjunk); + appendStringInfo(buf, "%d", tle->resno); + } else if (expr && IsA(expr, Var)) + get_rule_expr(expr, context, true); + + pop_child_plan(dpns, &save_dpns); + + return expr; +} +#endif /* * @Description: Display a GroupingSet. @@ -7013,7 +7069,15 @@ static void get_rule_orderby(List* orderList, List* targetList, bool force_colno TypeCacheEntry* typentry = NULL; appendStringInfoString(buf, sep); - sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, force_colno, context); +#ifdef USE_SPQ + if (IS_SPQ_COORDINATOR && (list_length(context->windowClause) > 0) && + lfirst(list_head(context->windowClause)) != NULL && + ((WindowClause *)lfirst(list_head(context->windowClause)))->reOrderSPQ) { + sortexpr = get_rule_sortgroupclause_spq(srt->tleSortGroupRef, force_colno, context); + } else +#endif + sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, force_colno, context); + sortcoltype = exprType(sortexpr); /* See whether operator is default < or > for datatype */ typentry = lookup_type_cache(sortcoltype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); @@ -7095,7 +7159,12 @@ static void get_rule_windowspec(WindowClause* wc, List* targetList, deparse_cont SortGroupClause* grp = (SortGroupClause*)lfirst(l); appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); +#ifdef USE_SPQ + if (IS_SPQ_COORDINATOR && wc->rePartitionSPQ) { + get_rule_sortgroupclause_spq(grp->tleSortGroupRef, false, context); + } else +#endif + get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); sep = ", "; } needspace = true; @@ -7194,7 +7263,13 @@ static void get_rule_windowspec_listagg(WindowClause* wc, List* targetList, depa SortGroupClause* grp = (SortGroupClause*)lfirst(l); appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); +#ifdef USE_SPQ + if (IS_SPQ_COORDINATOR && wc->rePartitionSPQ) { + get_rule_sortgroupclause_spq(grp->tleSortGroupRef, false, context); + } else +#endif + get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); + sep = ", "; } needspace = true; @@ -10854,23 +10929,38 @@ static bool construct_partitionClause(WindowAgg* node, WindowClause* wc) * ressortgroupref refers to windowagg's tlist * partColIdx refers to subplan's tlist */ - ListCell *lc = NULL; - foreach(lc, node->plan.targetlist) { - TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); - if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && - _equalSimpleVar(tle->expr, window_agg_te->expr)) { - if (window_agg_te->ressortgroupref > 0) { - partcl->tleSortGroupRef = window_agg_te->ressortgroupref; - /* found it */ - break; +#ifdef USE_SPQ + wc->rePartitionSPQ = false; + if (IS_SPQ_COORDINATOR) { + if (IsA(tle->expr, Var)) { + Var* tle_expr = (Var*)tle->expr; + partcl->tleSortGroupRef = tle_expr->varattno; + wc->rePartitionSPQ = true; + } else { + list_free_ext(partitionClause); + return false; + } + } else +#endif + { + ListCell *lc = NULL; + foreach(lc, node->plan.targetlist) { + TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); + if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && + _equalSimpleVar(tle->expr, window_agg_te->expr)) { + if (window_agg_te->ressortgroupref > 0) { + partcl->tleSortGroupRef = window_agg_te->ressortgroupref; + /* found it */ + break; + } } } - } - if (lc == NULL) { - /* not found */ - list_free_ext(partitionClause); - return false; + if (lc == NULL) { + /* not found */ + list_free_ext(partitionClause); + return false; + } } partcl->eqop = node->partOperators[i]; @@ -10917,26 +11007,41 @@ static void construct_windowClause(deparse_context* context) } /* - * ressortgroupref refers to windowagg's tlist - * partColIdx refers to subplan's tlist - */ - ListCell *lc = NULL; - foreach(lc, node->plan.targetlist) { - TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); - if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && - _equalSimpleVar(tle->expr, window_agg_te->expr)) { - if (window_agg_te->ressortgroupref > 0) { - sortcl->tleSortGroupRef = window_agg_te->ressortgroupref; - /* found it */ - break; + * ressortgroupref refers to windowagg's tlist + * partColIdx refers to subplan's tlist + */ +#ifdef USE_SPQ + wc->reOrderSPQ = false; + if (IS_SPQ_COORDINATOR) { + if (IsA(tle->expr, Var)) { + Var* tle_expr = (Var*)tle->expr; + sortcl->tleSortGroupRef = tle_expr->varattno; + wc->reOrderSPQ = true; + } else { + list_free_ext(orderClause); + return; + } + } else +#endif + { + ListCell *lc = NULL; + foreach(lc, node->plan.targetlist) { + TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); + if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && + _equalSimpleVar(tle->expr, window_agg_te->expr)) { + if (window_agg_te->ressortgroupref > 0) { + sortcl->tleSortGroupRef = window_agg_te->ressortgroupref; + /* found it */ + break; + } } } - } - if (lc == NULL) { - list_free_ext(orderClause); - /* not found */ - return; + if (lc == NULL) { + list_free_ext(orderClause); + /* not found */ + return; + } } sortcl->sortop = node->ordOperators[i]; -- Gitee From e8667a1529136b44c3ced6ba6bb276fd499cd6b5 Mon Sep 17 00:00:00 2001 From: totaj Date: Tue, 21 Nov 2023 21:04:17 +0800 Subject: [PATCH 071/434] Fix test case unstable. --- .../dolphin/input/test_function_ddl_import_and_export.source | 4 ++-- .../dolphin/output/test_function_ddl_import_and_export.source | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/dolphin/input/test_function_ddl_import_and_export.source b/contrib/dolphin/input/test_function_ddl_import_and_export.source index 6ebaff87b..06ba53913 100644 --- a/contrib/dolphin/input/test_function_ddl_import_and_export.source +++ b/contrib/dolphin/input/test_function_ddl_import_and_export.source @@ -1,8 +1,8 @@ --create function测试 drop database if exists dump_function_db; drop database if exists restore_function_db; -create database dump_function_db with dbcompatibility = 'B'; -create database restore_function_db with dbcompatibility = 'B'; +create database dump_function_db lc_collate='C' dbcompatibility = 'B'; +create database restore_function_db lc_collate='C' dbcompatibility = 'B'; \c dump_function_db create user test_function_definer password 'Test@123'; diff --git a/contrib/dolphin/output/test_function_ddl_import_and_export.source b/contrib/dolphin/output/test_function_ddl_import_and_export.source index 45a97188a..339d9171d 100644 --- a/contrib/dolphin/output/test_function_ddl_import_and_export.source +++ b/contrib/dolphin/output/test_function_ddl_import_and_export.source @@ -3,8 +3,8 @@ drop database if exists dump_function_db; NOTICE: database "dump_function_db" does not exist, skipping drop database if exists restore_function_db; NOTICE: database "restore_function_db" does not exist, skipping -create database dump_function_db with dbcompatibility = 'B'; -create database restore_function_db with dbcompatibility = 'B'; +create database dump_function_db lc_collate='C' dbcompatibility = 'B'; +create database restore_function_db lc_collate='C' dbcompatibility = 'B'; \c dump_function_db create user test_function_definer password 'Test@123'; NOTICE: The iteration value of password is not recommended.Setting the iteration value too small reduces the security of the password, and setting it too large results in performance degradation. -- Gitee From 261bb47c2a1bf0ad40d26e4b06c8fb255311bc09 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Wed, 22 Nov 2023 09:40:30 +0800 Subject: [PATCH 072/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8F=AF=E9=80=9A?= =?UTF-8?q?=E8=BF=87=20INTERVAL=20'0=20day'=E6=96=B9=E5=BC=8F=E8=AE=BE?= =?UTF-8?q?=E7=BD=AE=E4=B8=BA0=EF=BC=8C=E6=9C=AA=E5=90=88=E7=90=86?= =?UTF-8?q?=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/og-timescaledb1.7.4.sql | 2 +- contrib/timescaledb/src/dimension.cpp | 25 +++++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 71fce18e1..74040b0e0 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -1959,7 +1959,7 @@ WITH ht_size as ( bsize.total_bytes FROM _timescaledb_catalog.hypertable ht LEFT OUTER JOIN pg_tables t ON ht.table_name=t.tablename AND ht.schema_name=t.schemaname - --tsbd 暂时注释 + --tsdb 暂时注释 LEFT OUTER JOIN @extschema@.hypertable_relation_size( NULL ) diff --git a/contrib/timescaledb/src/dimension.cpp b/contrib/timescaledb/src/dimension.cpp index 2714178b1..862e71207 100644 --- a/contrib/timescaledb/src/dimension.cpp +++ b/contrib/timescaledb/src/dimension.cpp @@ -554,8 +554,14 @@ dimension_tuple_update(TupleInfo *ti, void *data) heap_deform_tuple(ti->tuple, ti->desc, values, nulls); - Assert((dim->fd.num_slices <= 0 && dim->fd.interval_length > 0) || - (dim->fd.num_slices > 0 && dim->fd.interval_length <= 0)); + if (!((dim->fd.num_slices <= 0 && dim->fd.interval_length > 0) || + (dim->fd.num_slices > 0 && dim->fd.interval_length <= 0))) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid interval: must be greater than 0"), + errhint("Please change your chunk interval."))); + } values[AttrNumberGetAttrOffset(Anum_dimension_column_name)] = NameGetDatum(&dim->fd.column_name); @@ -628,7 +634,13 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c if (num_slices > 0) { /* Closed (hash) dimension */ - Assert(num_slices > 0 && interval_length <= 0); + + if (!(num_slices > 0 && interval_length <= 0)){ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid interval: must be greater than 0"), + errhint("Please change your chunk interval."))); + } values[AttrNumberGetAttrOffset(Anum_dimension_num_slices)] = Int16GetDatum(num_slices); values[AttrNumberGetAttrOffset(Anum_dimension_aligned)] = BoolGetDatum(false); nulls[AttrNumberGetAttrOffset(Anum_dimension_interval_length)] = true; @@ -636,7 +648,12 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c else { /* Open (time) dimension */ - Assert(num_slices <= 0 && interval_length > 0); + if (!(num_slices <= 0 && interval_length > 0)){ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid interval: must be between 1 and 9223372036854775807"), + errhint("Please change your chunk interval."))); + } values[AttrNumberGetAttrOffset(Anum_dimension_interval_length)] = Int64GetDatum(interval_length); values[AttrNumberGetAttrOffset(Anum_dimension_aligned)] = BoolGetDatum(true); -- Gitee From 5d4889f3f9d5cbbc56fc38ae232fa906395f12ca Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 24 Nov 2023 10:21:03 +0800 Subject: [PATCH 073/434] Fix varlena2xx bug. --- contrib/dolphin/expected/uint_cast2.out | 18 ++++++++++++++++++ contrib/dolphin/plugin_utils/adt/varlena.cpp | 5 ++++- contrib/dolphin/sql/uint_cast2.sql | 9 +++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/uint_cast2.out b/contrib/dolphin/expected/uint_cast2.out index e86845c69..007a9d728 100644 --- a/contrib/dolphin/expected/uint_cast2.out +++ b/contrib/dolphin/expected/uint_cast2.out @@ -896,5 +896,23 @@ select Varlena2Text(1); 1 (1 row) +select varlena2bit(1,10); + varlena2bit +------------- + 0000110001 +(1 row) + +create OR REPLACE procedure t_p(out ret varchar) as +begin +select '{"a":"b"}'::json into ret; +end; +/ +select t_p(); + t_p +----------- + {"a":"b"} +(1 row) + +drop procedure t_p; drop schema uint_cast2 cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index db5fef34c..c73669dc3 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10706,6 +10706,9 @@ Datum blob_any_value(PG_FUNCTION_ARGS) static char* AnyElementGetCString(Oid anyOid, Datum anyDatum) { + if (!OidIsValid(anyOid)) { + return DatumGetCString(DirectFunctionCall1(textout, anyDatum)); + } char* data = NULL; Oid typeOutput = InvalidOid; bool typIsVarlena = false; @@ -10786,7 +10789,7 @@ Datum Varlena2Text(PG_FUNCTION_ARGS) Datum Varlena2Bit(PG_FUNCTION_ARGS) { char* data = NULL; - data = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(0))); + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); int32 typmod = PG_GETARG_INT32(1); Datum bits; Datum result; diff --git a/contrib/dolphin/sql/uint_cast2.sql b/contrib/dolphin/sql/uint_cast2.sql index 7b267ed39..72fa02500 100644 --- a/contrib/dolphin/sql/uint_cast2.sql +++ b/contrib/dolphin/sql/uint_cast2.sql @@ -446,6 +446,15 @@ select varlena2numeric(1); select Varlena2Bpchar(1); select Varlena2Varchar(1); select Varlena2Text(1); +select varlena2bit(1,10); + +create OR REPLACE procedure t_p(out ret varchar) as +begin +select '{"a":"b"}'::json into ret; +end; +/ +select t_p(); +drop procedure t_p; drop schema uint_cast2 cascade; reset current_schema; \ No newline at end of file -- Gitee From 6c04a62b1c4b2f2757fe53a747c84a8ff01a26dc Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Sat, 25 Nov 2023 14:05:29 +0800 Subject: [PATCH 074/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dissue=20I8I9CH=E6=89=80=E7=A4=BA=E7=9A=84date?= =?UTF-8?q?=5Fformat=E5=92=8Cto=5Fchar=E7=9A=84=E9=97=AE=E9=A2=98.=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dissue=20I8I9CH=E6=89=80=E7=A4=BA=E7=9A=84date?= =?UTF-8?q?=5Fformat=E5=92=8Cto=5Fchar=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20date?= =?UTF-8?q?=5Fformat=E6=98=AF=E5=9B=A0=E4=B8=BA=E6=B2=A1=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?time=E7=B1=BB=E5=9E=8B=E5=8F=82=E6=95=B0=E7=9A=84date=5Fformat?= =?UTF-8?q?=EF=BC=8C=E8=B5=B0=E4=BA=86numeric=E7=B1=BB=E5=9E=8B=E7=9A=84da?= =?UTF-8?q?te=5Fformat=EF=BC=8C=E5=A4=84=E7=90=86=E6=97=B6=E5=80=99?= =?UTF-8?q?=E5=87=BA=E9=94=99=E4=BA=86=E3=80=82tochar=E5=87=BA=E9=94=99?= =?UTF-8?q?=E6=98=AF=E5=9B=A0=E4=B8=BA3.0.0=E7=89=88=E6=9C=AC=E8=B5=B0?= =?UTF-8?q?=E7=9A=84=E6=98=AFinterval=5Fto=5Fchar,=20=E4=BD=86=E6=98=AF5.0?= =?UTF-8?q?.0=E7=89=88=E6=9C=AC=E5=9B=A0=E4=B8=BA=E6=94=AF=E6=8C=81time=5F?= =?UTF-8?q?to=5Fnumeric,=20=E5=9B=A0=E6=AD=A4=E8=B5=B0=E4=BA=86to=5Fchar?= =?UTF-8?q?=5Fnumeric=EF=BC=8C=E5=9B=A0=E6=AD=A4=E5=A4=84=E7=90=86?= =?UTF-8?q?=E6=97=B6=E5=80=99=E5=87=BA=E9=94=99=E4=BA=86=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20?= =?UTF-8?q?=E6=94=AF=E6=8C=81time=E7=B1=BB=E5=9E=8B=E7=9A=84date=5Fformat?= =?UTF-8?q?=E5=92=8Cto=5Fchar=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80?= =?UTF-8?q?=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengau?= =?UTF-8?q?ssorg/dashboard=3Fissue=3DI8I9CH?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 22 +++++++++++ .../dolphin/plugin_utils/adt/timestamp.cpp | 39 ++++++++++++++++++- .../rollback_script/dolphin--3.0--2.0.sql | 3 ++ .../b_compatibility_time_funcs3.sql | 10 +++++ .../upgrade_script/dolphin--2.0--3.0.sql | 6 +++ 5 files changed, 79 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index 1a9117097..c68fb7198 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -1493,6 +1493,28 @@ select time('23:55:56.1234'); 23:55:56.1234 (1 row) +set dolphin.b_compatibility_mode = true; +create table test_time(t time); +insert into test_time values ('11:12:23.123456'); +insert into test_time values ('22:11:33'); +insert into test_time values ('-22:11:33'); +select date_format(t, '%H%i%s.%f') from test_time; + date_format +--------------- + 111223.000000 + 221133.000000 + 014827.000000 +(3 rows) + +select to_char(t, 'hh24miss') from test_time; + to_char +----------- + 111223 + 221133 + -22-11-33 +(3 rows) + +drop table test_time; drop schema b_time_funcs3 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table func_test3 diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 69a073336..339afcd80 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -313,6 +313,8 @@ PG_FUNCTION_INFO_V1_PUBLIC(date_format_text); extern "C" DLL_PUBLIC Datum date_format_text(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(date_format_numeric); extern "C" DLL_PUBLIC Datum date_format_numeric(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(date_format_time); +extern "C" DLL_PUBLIC Datum date_format_time(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(str_to_date); extern "C" DLL_PUBLIC Datum str_to_date(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(from_unixtime_with_one_arg); @@ -5625,7 +5627,7 @@ Datum timestamptz_datetime(PG_FUNCTION_ARGS) { TimestampTz timestamp = PG_GETARG_TIMESTAMPTZ(0); Timestamp result; - struct pg_tm tt, *tm = &tt; + pg_tm tt, *tm = &tt; fsec_t fsec; int tz; @@ -9780,6 +9782,41 @@ Datum date_format_numeric(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(result_text); } + +Datum date_format_time(PG_FUNCTION_ARGS) +{ + TimeADT timeVal = PG_GETARG_TIMEADT(0); + text *format_text = PG_GETARG_TEXT_PP(1); + char buf[MAXDATELEN]; /* string for temporary storage */ + char *format = NULL; /* format string */ + char *str = NULL; /* return string */ + int remain = 0; /* remaining buffer size of variable str */ + struct pg_tm tt, *tm = &tt; + fsec_t fsec; + errno_t rc = memset_s(tm, sizeof(*tm), 0, sizeof(*tm)); + securec_check(rc, "\0", "\0"); + fsec = 0; + if (timeVal < 0) { +#ifdef HAVE_INT64_TIMESTAMP + timeVal = USECS_PER_DAY + timeVal; +#else + timeVal = SECS_PER_DAY + timeVal; +#endif + } + time2tm(timeVal, tm, &fsec); + format = text_to_cstring(format_text); + int format_len = strlen(format); + remain = get_result_len(format, format_len); + str = (char*)palloc(remain + 1); + + if (!date_format_internal(str, buf, format, format_len, remain, tm, fsec)) { + PG_RETURN_NULL(); + } + text *result_text = cstring_to_text(str); + pfree(str); + PG_RETURN_TEXT_P(result_text); +} + /** * find the type to return */ diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index bfb6bde18..a50e209d9 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -132,3 +132,6 @@ drop CAST IF EXISTS (timestamptz as boolean); drop CAST IF EXISTS (timestamp(0) without time zone as boolean); DROP FUNCTION IF EXISTS pg_catalog.timestamptz_bool(timestamptz); DROP FUNCTION IF EXISTS pg_catalog.timestamp_bool(timestamp(0) without time zone); + +DROP FUNCTION IF EXISTS pg_catalog.date_format (time without time zone, text); +DROP FUNCTION IF EXISTS pg_catalog.to_char(time without time zone, text); diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index d10f05cc4..f8d89f692 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -395,5 +395,15 @@ select time('0:0:0'); select time('-1:-1:-1'); select time('23:55:56.1234'); +set dolphin.b_compatibility_mode = true; +create table test_time(t time); +insert into test_time values ('11:12:23.123456'); +insert into test_time values ('22:11:33'); +insert into test_time values ('-22:11:33'); + +select date_format(t, '%H%i%s.%f') from test_time; +select to_char(t, 'hh24miss') from test_time; + +drop table test_time; drop schema b_time_funcs3 cascade; reset current_schema; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 1b9f9b9d0..207b4e819 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -211,3 +211,9 @@ CREATE CAST (timestamptz as boolean) WITH FUNCTION timestamptz_bool(timestamptz) CREATE OR REPLACE FUNCTION pg_catalog.timestamp_bool(timestamp(0) without time zone) returns boolean LANGUAGE C immutable strict as '$libdir/dolphin', 'timestamp_bool'; CREATE CAST (timestamp(0) without time zone as boolean) WITH FUNCTION timestamp_bool(timestamp(0) without time zone) AS ASSIGNMENT; + +DROP FUNCTION IF EXISTS pg_catalog.date_format (time without time zone, text); +CREATE OR REPLACE FUNCTION pg_catalog.date_format (time without time zone, text) RETURNS TEXT LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'date_format_time'; + +DROP FUNCTION IF EXISTS pg_catalog.to_char(time without time zone, text); +CREATE OR REPLACE FUNCTION pg_catalog.to_char(time without time zone, text) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT pg_catalog.to_char($1::interval, $2) $$; -- Gitee From 566f35bc828bd6dfd20e73a8fa8afd98bd1d4a49 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Sat, 25 Nov 2023 18:04:10 +0800 Subject: [PATCH 075/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dset=5Fextension=5Find?= =?UTF-8?q?ex=E5=9C=A8=E5=B8=A6dolphin=E5=92=8Cwhale=E5=90=8C=E6=97=B6?= =?UTF-8?q?=E5=AD=98=E5=9C=A8=E7=9A=84=E6=97=B6=E5=80=99=E8=AE=BE=E7=BD=AE?= =?UTF-8?q?=E6=9C=89=E8=AF=AF=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/whale/Makefile | 1 - contrib/whale/include/plugin_postgres.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/whale/Makefile b/contrib/whale/Makefile index be9fecc5b..47a81282c 100644 --- a/contrib/whale/Makefile +++ b/contrib/whale/Makefile @@ -88,7 +88,6 @@ extra_clean: make clean -C $(executor) make clean -C $(storage) make clean -C $(plan) - make clean -C $(commands) make clean -C $(pl) make clean -C $(orafce) clean: extra_clean diff --git a/contrib/whale/include/plugin_postgres.h b/contrib/whale/include/plugin_postgres.h index 271f8c6c3..7b8584aed 100644 --- a/contrib/whale/include/plugin_postgres.h +++ b/contrib/whale/include/plugin_postgres.h @@ -53,6 +53,7 @@ extern "C" DLL_PUBLIC void whale_invoke(void); extern "C" DLL_PUBLIC void init_plugin_object(); extern "C" DLL_PUBLIC void init_session_vars(void); extern "C" DLL_PUBLIC void create_whale_extension(); +extern "C" DLL_PUBLIC void set_extension_index(uint32 index); typedef struct ASqlPluginContext { bool enableACmptMode; -- Gitee From 4168a60e9abcd5b2a9586e25fc54e9b8b128467d Mon Sep 17 00:00:00 2001 From: = Date: Sun, 26 Nov 2023 16:53:10 +0800 Subject: [PATCH 076/434] fix I8JHC5 date compare error in b compatible mode --- contrib/dolphin/expected/test_datatype.out | 56 ++++++++++++++++++++ contrib/dolphin/plugin_parser/parse_oper.cpp | 6 ++- contrib/dolphin/sql/test_datatype.sql | 17 ++++++ 3 files changed, 77 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/expected/test_datatype.out b/contrib/dolphin/expected/test_datatype.out index 05db8ed0c..c2bf447e5 100644 --- a/contrib/dolphin/expected/test_datatype.out +++ b/contrib/dolphin/expected/test_datatype.out @@ -362,5 +362,61 @@ select varlenatoset(c2,gettypeid('set_tab_c2_set')) from set_tab order by 1; drop function gettypeid; drop table set_tab; +---- +set dolphin.b_compatibility_mode = true; +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +create table t1 (a char(16), b date, c datetime); +insert into t1 SET a='test 2000-01-01', b='2000-01-01', c='2000-01-01'; +select * from t1 where c = '2000-01-01'; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +select * from t1 where b = '2000-01-01'; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +select * from t1 where c = '2000-01-01'::datetime; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +select * from t1 where b = '2000-01-01'::date; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +set dolphin.b_compatibility_mode = on; +select * from t1 where c = '2000-01-01'; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +select * from t1 where b = '2000-01-01'; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +select * from t1 where c = '2000-01-01'::datetime; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +select * from t1 where b = '2000-01-01'::date; + a | b | c +------------------+------------+--------------------- + test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 +(1 row) + +drop table t1; drop schema b_datatype_test cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_parser/parse_oper.cpp b/contrib/dolphin/plugin_parser/parse_oper.cpp index a6819c129..f25293a83 100644 --- a/contrib/dolphin/plugin_parser/parse_oper.cpp +++ b/contrib/dolphin/plugin_parser/parse_oper.cpp @@ -582,7 +582,9 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n /** * In order to make 'date ^ unknown' operate as date_text_xor(), we change unknown into text */ - if (GetSessionContext()->enableBCmptMode) { + char * oprname = strVal(linitial(opname)); + + if (GetSessionContext()->enableBCmptMode && strcmp("^", oprname) == 0) { if (ltypeId == UNKNOWNOID && rtypeId == DATEOID) { ltypeId = TEXTOID; } else if (ltypeId == DATEOID && rtypeId == UNKNOWNOID) { @@ -592,7 +594,7 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n /** * In order to make 'time ^ unknown' operate as time_text_xor(), we change unknown into text */ - if (GetSessionContext()->enableBCmptMode) { + if (GetSessionContext()->enableBCmptMode && strcmp("^", oprname) == 0) { if (ltypeId == UNKNOWNOID && rtypeId == TIMEOID) { ltypeId = TEXTOID; } else if (ltypeId == TIMEOID && rtypeId == UNKNOWNOID) { diff --git a/contrib/dolphin/sql/test_datatype.sql b/contrib/dolphin/sql/test_datatype.sql index 8b73eb38f..db0c1c430 100644 --- a/contrib/dolphin/sql/test_datatype.sql +++ b/contrib/dolphin/sql/test_datatype.sql @@ -130,5 +130,22 @@ select varlenatoset(c2,gettypeid('set_tab_c2_set')) from set_tab order by 1; drop function gettypeid; drop table set_tab; +---- +set dolphin.b_compatibility_mode = true; +drop table if exists t1; +create table t1 (a char(16), b date, c datetime); +insert into t1 SET a='test 2000-01-01', b='2000-01-01', c='2000-01-01'; +select * from t1 where c = '2000-01-01'; +select * from t1 where b = '2000-01-01'; +select * from t1 where c = '2000-01-01'::datetime; +select * from t1 where b = '2000-01-01'::date; +set dolphin.b_compatibility_mode = on; +select * from t1 where c = '2000-01-01'; +select * from t1 where b = '2000-01-01'; +select * from t1 where c = '2000-01-01'::datetime; +select * from t1 where b = '2000-01-01'::date; + +drop table t1; + drop schema b_datatype_test cascade; reset current_schema; \ No newline at end of file -- Gitee From e0b3fb0b89175959a62d57a3a5cadb0031627985 Mon Sep 17 00:00:00 2001 From: = Date: Sun, 26 Nov 2023 20:01:18 +0800 Subject: [PATCH 077/434] change code to pass the code check --- contrib/dolphin/plugin_parser/parse_oper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/dolphin/plugin_parser/parse_oper.cpp b/contrib/dolphin/plugin_parser/parse_oper.cpp index f25293a83..113ce42aa 100644 --- a/contrib/dolphin/plugin_parser/parse_oper.cpp +++ b/contrib/dolphin/plugin_parser/parse_oper.cpp @@ -582,7 +582,7 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n /** * In order to make 'date ^ unknown' operate as date_text_xor(), we change unknown into text */ - char * oprname = strVal(linitial(opname)); + char* oprname = strVal(linitial(opname)); if (GetSessionContext()->enableBCmptMode && strcmp("^", oprname) == 0) { if (ltypeId == UNKNOWNOID && rtypeId == DATEOID) { -- Gitee From 562cf4daee25758ee91e4e1743d0fd86daf6e0bf Mon Sep 17 00:00:00 2001 From: = Date: Mon, 27 Nov 2023 11:56:46 +0800 Subject: [PATCH 078/434] use DeconstructQualifiedName instead of linitial --- contrib/dolphin/expected/test_datatype.out | 2 +- contrib/dolphin/plugin_parser/parse_oper.cpp | 8 +++++--- contrib/dolphin/sql/test_datatype.sql | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/contrib/dolphin/expected/test_datatype.out b/contrib/dolphin/expected/test_datatype.out index c2bf447e5..119f5125e 100644 --- a/contrib/dolphin/expected/test_datatype.out +++ b/contrib/dolphin/expected/test_datatype.out @@ -392,7 +392,7 @@ select * from t1 where b = '2000-01-01'::date; test 2000-01-01 | 2000-01-01 | 2000-01-01 00:00:00 (1 row) -set dolphin.b_compatibility_mode = on; +set dolphin.b_compatibility_mode = off; select * from t1 where c = '2000-01-01'; a | b | c ------------------+------------+--------------------- diff --git a/contrib/dolphin/plugin_parser/parse_oper.cpp b/contrib/dolphin/plugin_parser/parse_oper.cpp index 113ce42aa..32012b218 100644 --- a/contrib/dolphin/plugin_parser/parse_oper.cpp +++ b/contrib/dolphin/plugin_parser/parse_oper.cpp @@ -582,9 +582,11 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n /** * In order to make 'date ^ unknown' operate as date_text_xor(), we change unknown into text */ - char* oprname = strVal(linitial(opname)); + char* schemaname = NULL; + char* opername = NULL; + DeconstructQualifiedName(opname, &schemaname, &opername); - if (GetSessionContext()->enableBCmptMode && strcmp("^", oprname) == 0) { + if (GetSessionContext()->enableBCmptMode && strcmp("^", opername) == 0) { if (ltypeId == UNKNOWNOID && rtypeId == DATEOID) { ltypeId = TEXTOID; } else if (ltypeId == DATEOID && rtypeId == UNKNOWNOID) { @@ -594,7 +596,7 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n /** * In order to make 'time ^ unknown' operate as time_text_xor(), we change unknown into text */ - if (GetSessionContext()->enableBCmptMode && strcmp("^", oprname) == 0) { + if (GetSessionContext()->enableBCmptMode && strcmp("^", opername) == 0) { if (ltypeId == UNKNOWNOID && rtypeId == TIMEOID) { ltypeId = TEXTOID; } else if (ltypeId == TIMEOID && rtypeId == UNKNOWNOID) { diff --git a/contrib/dolphin/sql/test_datatype.sql b/contrib/dolphin/sql/test_datatype.sql index db0c1c430..08b5e4203 100644 --- a/contrib/dolphin/sql/test_datatype.sql +++ b/contrib/dolphin/sql/test_datatype.sql @@ -139,7 +139,7 @@ select * from t1 where c = '2000-01-01'; select * from t1 where b = '2000-01-01'; select * from t1 where c = '2000-01-01'::datetime; select * from t1 where b = '2000-01-01'::date; -set dolphin.b_compatibility_mode = on; +set dolphin.b_compatibility_mode = off; select * from t1 where c = '2000-01-01'; select * from t1 where b = '2000-01-01'; select * from t1 where c = '2000-01-01'::datetime; -- Gitee From d58dbf20699bc6969b441e53c14d5f88569e1bf1 Mon Sep 17 00:00:00 2001 From: = Date: Mon, 27 Nov 2023 15:54:35 +0800 Subject: [PATCH 079/434] change code to reduce the complexity based on xiaobin comment --- contrib/dolphin/plugin_parser/parse_oper.cpp | 43 ++++++++++---------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/contrib/dolphin/plugin_parser/parse_oper.cpp b/contrib/dolphin/plugin_parser/parse_oper.cpp index 32012b218..329511bfa 100644 --- a/contrib/dolphin/plugin_parser/parse_oper.cpp +++ b/contrib/dolphin/plugin_parser/parse_oper.cpp @@ -579,28 +579,27 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n rtypeId = BLOBOID; } } - /** - * In order to make 'date ^ unknown' operate as date_text_xor(), we change unknown into text - */ - char* schemaname = NULL; - char* opername = NULL; - DeconstructQualifiedName(opname, &schemaname, &opername); - - if (GetSessionContext()->enableBCmptMode && strcmp("^", opername) == 0) { - if (ltypeId == UNKNOWNOID && rtypeId == DATEOID) { - ltypeId = TEXTOID; - } else if (ltypeId == DATEOID && rtypeId == UNKNOWNOID) { - rtypeId = TEXTOID; - } - } - /** - * In order to make 'time ^ unknown' operate as time_text_xor(), we change unknown into text - */ - if (GetSessionContext()->enableBCmptMode && strcmp("^", opername) == 0) { - if (ltypeId == UNKNOWNOID && rtypeId == TIMEOID) { - ltypeId = TEXTOID; - } else if (ltypeId == TIMEOID && rtypeId == UNKNOWNOID) { - rtypeId = TEXTOID; + if (GetSessionContext()->enableBCmptMode) { + char* schemaname = NULL; + char* opername = NULL; + DeconstructQualifiedName(opname, &schemaname, &opername); + if (strcmp("^", opername) == 0) { + /** + * In order to make 'date ^ unknown' operate as date_text_xor(), we change unknown into text + */ + if (ltypeId == UNKNOWNOID && rtypeId == DATEOID) { + ltypeId = TEXTOID; + } else if (ltypeId == DATEOID && rtypeId == UNKNOWNOID) { + rtypeId = TEXTOID; + } + /** + * In order to make 'time ^ unknown' operate as time_text_xor(), we change unknown into text + */ + if (ltypeId == UNKNOWNOID && rtypeId == TIMEOID) { + ltypeId = TEXTOID; + } else if (ltypeId == TIMEOID && rtypeId == UNKNOWNOID) { + rtypeId = TEXTOID; + } } } #endif -- Gitee From 7f588d07bb9c82c0202863d5d8083d6fa8211b17 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Tue, 28 Nov 2023 09:55:38 +0800 Subject: [PATCH 080/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=B7=B2=E7=9F=A5?= =?UTF-8?q?=E9=97=AE=E9=A2=98=E4=BB=A5=E5=8F=8A=E5=86=85=E9=83=A8=E6=B5=8B?= =?UTF-8?q?=E5=87=BA=E7=9A=84=E7=BC=BA=E9=99=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/src/dimension.cpp | 6 +++--- contrib/timescaledb/src/hypertable.cpp | 6 +++--- contrib/timescaledb/src/process_utility.cpp | 8 ++++++++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/contrib/timescaledb/src/dimension.cpp b/contrib/timescaledb/src/dimension.cpp index 862e71207..fb8e1e725 100644 --- a/contrib/timescaledb/src/dimension.cpp +++ b/contrib/timescaledb/src/dimension.cpp @@ -559,7 +559,7 @@ dimension_tuple_update(TupleInfo *ti, void *data) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("invalid interval: must be greater than 0"), + errmsg("invalid interval: Must be greater than 0"), errhint("Please change your chunk interval."))); } @@ -638,7 +638,7 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c if (!(num_slices > 0 && interval_length <= 0)){ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("invalid interval: must be greater than 0"), + errmsg("invalid interval: Must be greater than 0"), errhint("Please change your chunk interval."))); } values[AttrNumberGetAttrOffset(Anum_dimension_num_slices)] = Int16GetDatum(num_slices); @@ -651,7 +651,7 @@ dimension_insert_relation(Relation rel, int32 hypertable_id, Name colname, Oid c if (!(num_slices <= 0 && interval_length > 0)){ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("invalid interval: must be between 1 and 9223372036854775807"), + errmsg("invalid interval: Must be greater than 0"), errhint("Please change your chunk interval."))); } values[AttrNumberGetAttrOffset(Anum_dimension_interval_length)] = diff --git a/contrib/timescaledb/src/hypertable.cpp b/contrib/timescaledb/src/hypertable.cpp index 15bba7473..6f4e563c3 100644 --- a/contrib/timescaledb/src/hypertable.cpp +++ b/contrib/timescaledb/src/hypertable.cpp @@ -1311,10 +1311,10 @@ static void hypertable_create_schema(const char *schema_name) { CreateSchemaStmt stmt = { - .type = {}, - .missing_ok = false, + .type = T_CreateSchemaStmt, + .missing_ok = true, .schemaname = (char *) schema_name, - .authid = "", + .authid = NULL, .hasBlockChain = false, .schemaElts = NIL, .temptype = {}, diff --git a/contrib/timescaledb/src/process_utility.cpp b/contrib/timescaledb/src/process_utility.cpp index 3d875b494..73f2926e0 100644 --- a/contrib/timescaledb/src/process_utility.cpp +++ b/contrib/timescaledb/src/process_utility.cpp @@ -913,6 +913,14 @@ process_drop_hypertable(ProcessUtilityArgs *args, DropStmt *stmt) errhint("Please drop the corresponding uncompressed hypertable " "instead."))); + + if (stmt->behavior != DROP_CASCADE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Dropping hypertables without CASCADE is not supported"), + errhint("Please use drop hypertable.... CASCADE " + "instead."))); + /* * We need to drop hypertable chunks before the hypertable to avoid the need * to CASCADE such drops; -- Gitee From ac33be310058a22c4eed2355046cd6ac78f4bb0b Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Tue, 28 Nov 2023 14:51:33 +0800 Subject: [PATCH 081/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9bit=E5=88=B0time?= =?UTF-8?q?=E7=9A=84=E6=98=BE=E5=BC=8F=E8=BD=AC=E6=8D=A2=E8=A7=84=E5=88=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 50 +++++++++++++++++++ contrib/dolphin/expected/timestamp_test.out | 38 +++++++------- .../dolphin/plugin_parser/parse_coerce.cpp | 18 +++++++ contrib/dolphin/plugin_utils/adt/date.cpp | 3 +- .../rollback_script/dolphin--3.0--2.0.sql | 5 ++ .../b_compatibility_time_funcs3.sql | 8 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 16 ++++++ 7 files changed, 116 insertions(+), 22 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index c68fb7198..a2e66a8b7 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -660,6 +660,56 @@ select time('23:55:56.1234'); 23:55:56.1234 (1 row) +select cast('5555555' as time); + time +----------- + 555:55:55 +(1 row) + +select cast('55555555' as time); +WARNING: invalid input syntax for type time: "55555555" +CONTEXT: referenced column: time + time +----------- + 838:59:59 +(1 row) + +select cast('555555555' as time); +WARNING: invalid input syntax for type time: "555555555" +CONTEXT: referenced column: time + time +----------- + 838:59:59 +(1 row) + +select cast('5555555555' as time); + time +------ + +(1 row) + +select cast('6666666' as time); +WARNING: invalid input syntax for type time: "6666666" +CONTEXT: referenced column: time + time +------ + +(1 row) + +select cast('66666666' as time); +WARNING: invalid input syntax for type time: "66666666" +CONTEXT: referenced column: time + time +------ + +(1 row) + +select cast('6666666666' as time); + time +------ + +(1 row) + set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test1 values('23:65:66'); ERROR: date/time field value out of range: "23:65:66" diff --git a/contrib/dolphin/expected/timestamp_test.out b/contrib/dolphin/expected/timestamp_test.out index 5ea046b48..5bb8950dd 100644 --- a/contrib/dolphin/expected/timestamp_test.out +++ b/contrib/dolphin/expected/timestamp_test.out @@ -36,39 +36,35 @@ set dolphin.b_compatibility_mode to on; select '2022-01-01'::bit(64)::datetime; WARNING: bit string length 80 does not match type bit(64) CONTEXT: referenced column: timestamp -WARNING: bigint out of range +WARNING: invalid input syntax for type timestamp: "ÿÿÿÿÿÿÿÿ" CONTEXT: referenced column: timestamp -SQL function "bittodatetime" statement 1 +SQL function "bit_cast_datetime" statement 1 referenced column: timestamp -WARNING: timestamp out of range -CONTEXT: referenced column: timestamp -SQL function "bittodatetime" statement 1 -referenced column: timestamp - timestamp ------------ - + timestamp +--------------------- + 0000-00-00 00:00:00 (1 row) select '2022-01-01'::bit(64)::timestamp; WARNING: bit string length 80 does not match type bit(64) CONTEXT: referenced column: timestamptz -WARNING: bigint out of range +WARNING: invalid input syntax for type timestamp: "ÿÿÿÿÿÿÿÿ" CONTEXT: referenced column: timestamptz -SQL function "bittotimestamp" statement 1 +SQL function "bit_cast_timestamp" statement 1 referenced column: timestamptz -WARNING: timestamp out of range -CONTEXT: referenced column: timestamptz -SQL function "bittotimestamp" statement 1 -referenced column: timestamptz - timestamptz -------------- - + timestamptz +------------------------ + 0000-00-00 00:00:00-08 (1 row) select 8385958.999999::bit(64)::time; - time ------------ - 838:59:59 +WARNING: invalid input syntax for type time: "õ§" +CONTEXT: referenced column: time +SQL function "bit_cast_time" statement 1 +referenced column: time + time +---------- + 00:00:00 (1 row) set dolphin.sql_mode=''; diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index 281b9d985..eb628c823 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -3400,6 +3400,22 @@ Oid findEnumCastFunction(Oid targetTypeId) return funcid; } +Oid findBitCastTimeFunction(Oid targetTypeId, Oid funcid) +{ + switch (targetTypeId) { + case DATEOID: + return get_func_oid("bit_cast_date", PG_CATALOG_NAMESPACE, NULL); + case TIMESTAMPOID: + return get_func_oid("bit_cast_datetime", PG_CATALOG_NAMESPACE, NULL); + case TIMESTAMPTZOID: + return get_func_oid("bit_cast_timestamp", PG_CATALOG_NAMESPACE, NULL); + case TIMEOID: + return get_func_oid("bit_cast_time", PG_CATALOG_NAMESPACE, NULL); + default: + return funcid; + } +} + bool IsEquivalentEnums(Oid enumOid1, Oid enumOid2) { if (enumOid1 == enumOid2) { @@ -3464,6 +3480,8 @@ void TryFindSpecifiedCastFunction(const Oid sourceTypeId, const Oid targetTypeId *funcId = get_func_oid("text_time_explicit", PG_CATALOG_NAMESPACE, NULL); } else if (ENABLE_B_CMPT_MODE && targetTypeId == INT8OID) { *funcId = findSignedExplicitCastFunction(sourceTypeId, defaultFuncId); + } else if (sourceTypeId == BITOID) { + *funcId = findBitCastTimeFunction(targetTypeId, defaultFuncId); } else { *funcId = findUnsignedExplicitCastFunction(targetTypeId, sourceTypeId, defaultFuncId); } diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 98c8ebd27..73feea1b8 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -1816,7 +1816,7 @@ Datum time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorTy if (time_cast_type == TEXT_TIME_EXPLICIT || time_cast_type == TIME_IN) { DateTimeParseError(dterr, str, "time", true); tm = &tt; // switch to M*'s parsing result - if (dterr != DTERR_TZDISP_OVERFLOW) { + if (dterr != DTERR_TZDISP_OVERFLOW && null_func_result) { *time_error_type = TIME_INCORRECT; } } else if (time_cast_type == TIME_CAST) { @@ -1834,6 +1834,7 @@ Datum time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorTy /* * the following logic is unified for time parsing. */ + *time_error_type = null_func_result ? TIME_INCORRECT : *time_error_type; tm2time(tm, fsec, &result); AdjustTimeForTypmod(&result, typmod); result *= timeSign; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index a50e209d9..da442bcb0 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -1,3 +1,8 @@ +DROP FUNCTION IF EXISTS pg_catalog.bit_cast_time(bit); +DROP FUNCTION IF EXISTS pg_catalog.bit_cast_timestamp(bit); +DROP FUNCTION IF EXISTS pg_catalog.bit_cast_datetime(bit); +DROP FUNCTION IF EXISTS pg_catalog.bit_cast_date(bit); + DROP FUNCTION IF EXISTS pg_catalog.dolphin_invoke(); CREATE FUNCTION pg_catalog.dolphin_invoke() RETURNS VOID AS '$libdir/dolphin','dolphin_invoke' LANGUAGE C STRICT; diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index f8d89f692..be6458f73 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -250,6 +250,14 @@ select time('0:0:0'); select time('-1:-1:-1'); select time('23:55:56.1234'); +select cast('5555555' as time); +select cast('55555555' as time); +select cast('555555555' as time); +select cast('5555555555' as time); + +select cast('6666666' as time); +select cast('66666666' as time); +select cast('6666666666' as time); set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date'; insert into test1 values('23:65:66'); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 207b4e819..614eb844a 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -217,3 +217,19 @@ CREATE OR REPLACE FUNCTION pg_catalog.date_format (time without time zone, text) DROP FUNCTION IF EXISTS pg_catalog.to_char(time without time zone, text); CREATE OR REPLACE FUNCTION pg_catalog.to_char(time without time zone, text) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT pg_catalog.to_char($1::interval, $2) $$; + +CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_date(bit) +RETURNS date LANGUAGE SQL IMMUTABLE STRICT as +'select cast(cast($1 as text) as date)'; + +CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_datetime(bit) +RETURNS timestamp without time zone LANGUAGE SQL IMMUTABLE STRICT as +'select cast(cast($1 as text) as timestamp without time zone)'; + +CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_timestamp(bit) +RETURNS timestamptz LANGUAGE SQL IMMUTABLE STRICT as +'select cast(cast($1 as text) as timestamptz)'; + +CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_time(bit) +RETURNS time without time zone LANGUAGE SQL IMMUTABLE STRICT as +'select cast(cast($1 as text) as time without time zone)'; \ No newline at end of file -- Gitee From 9d20dd7b2e8fa32330a0d5101e32c742e5291868 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Tue, 28 Nov 2023 21:18:09 +0800 Subject: [PATCH 082/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dissue=20I8IXNB=E6=89=80=E7=A4=BA=E7=9A=84?= =?UTF-8?q?=E5=85=BC=E5=AE=B9B=E5=BA=93=E5=BB=BA=E8=A1=A8=E6=97=A0?= =?UTF-8?q?=E6=B3=95=E4=BD=BF=E7=94=A8default=E5=8A=A0=E6=8B=AC=E5=8F=B7?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE=E5=A4=8Dissue=20I8IXNB?= =?UTF-8?q?=E6=89=80=E7=A4=BA=E7=9A=84=E5=85=BC=E5=AE=B9B=E5=BA=93?= =?UTF-8?q?=E5=BB=BA=E8=A1=A8=E6=97=A0=E6=B3=95=E4=BD=BF=E7=94=A8default?= =?UTF-8?q?=E5=8A=A0=E6=8B=AC=E5=8F=B7=E7=9A=84=E9=97=AE=E9=A2=98=E3=80=82?= =?UTF-8?q?=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20?= =?UTF-8?q?=E4=B9=8B=E5=89=8D=E4=B8=BA=E4=BA=86=E5=85=BC=E5=AE=B9mysql?= =?UTF-8?q?=E7=9A=84=20default()=E5=87=BD=E6=95=B0=EF=BC=8C=E5=9B=A0?= =?UTF-8?q?=E6=AD=A4=E5=BB=BA=E8=A1=A8=E8=AF=AD=E5=8F=A5=E4=B8=AD=E4=B8=8D?= =?UTF-8?q?=E6=94=AF=E6=8C=81default=E5=B8=A6=E6=8B=AC=E5=8F=B7=E7=9A=84?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E3=80=82=E5=9B=A0=E4=B8=BAcreate=E8=AF=AD?= =?UTF-8?q?=E5=8F=A5=E4=B8=ADdefault=E5=B8=A6=E6=8B=AC=E5=8F=B7=E7=9A=84?= =?UTF-8?q?=E5=90=AB=E4=B9=89=E4=B8=BA=E4=BC=98=E5=85=88=E7=BA=A7=EF=BC=8C?= =?UTF-8?q?=E4=BD=86=E6=98=AFselect=E4=B8=AD=E7=9A=84default=E5=B8=A6?= =?UTF-8?q?=E6=8B=AC=E5=8F=B7=E4=B8=BA=E5=87=BD=E6=95=B0=E5=8F=82=E6=95=B0?= =?UTF-8?q?=EF=BC=8C=E6=97=A0=E6=B3=95=E5=8C=BA=E5=88=86=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20?= =?UTF-8?q?=E5=85=B6=E5=AE=9E=E4=B8=BB=E8=A6=81=E6=98=AF=E5=9B=9E=E5=90=88?= =?UTF-8?q?mr=20https://gitee.com/totaj/Plugin/commit/f034ef702f1abb074994?= =?UTF-8?q?86dfe6c576dd59364cf4=EF=BC=8C=E5=8F=A6=E5=A4=96=E5=9B=9E?= =?UTF-8?q?=E9=80=80https://gitee.com/opengauss/Plugin/pulls/925=E5=92=8Ch?= =?UTF-8?q?ttps://gitee.com/opengauss/Plugin/pulls/1033=20=E3=80=90?= =?UTF-8?q?=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20?= =?UTF-8?q?https://e.gitee.com/opengaussorg/dashboard=3Fissue=3DI8I9CH?= =?UTF-8?q?=EF=BC=8C=20=E5=A6=82=E6=9E=9C=E5=88=A4=E6=96=AD=E8=AF=AD?= =?UTF-8?q?=E5=8F=A5=E4=B8=AD=E6=9C=89create=E5=92=8Calter=E5=85=B3?= =?UTF-8?q?=E9=94=AE=E8=AF=8D=EF=BC=8C=E5=B0=B1=E8=B5=B0pg=E7=9A=84?= =?UTF-8?q?=E5=B0=86=E6=8B=AC=E5=8F=B7=E5=BD=93=E6=88=90=E4=BC=98=E5=85=88?= =?UTF-8?q?=E7=BA=A7=E8=A7=A3=E9=87=8A=EF=BC=8C=E5=90=A6=E5=88=99=E5=B0=B1?= =?UTF-8?q?=E5=BD=93=E6=88=90=E5=87=BD=E6=95=B0=E5=8F=82=E6=95=B0=E8=A7=A3?= =?UTF-8?q?=E9=87=8A=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../case_sensitive_test/schema_test.out | 4 +- contrib/dolphin/expected/default_function.out | 53 ++++++++++++++ contrib/dolphin/expected/test_default.out | 70 +++++++++---------- contrib/dolphin/include/plugin_postgres.h | 1 + contrib/dolphin/plugin_parser/gram.y | 1 + contrib/dolphin/plugin_parser/parse_expr.cpp | 2 +- contrib/dolphin/plugin_parser/parser.cpp | 13 ++++ contrib/dolphin/plugin_postgres.cpp | 1 + .../dolphin/plugin_utils/adt/ruleutils.cpp | 66 +---------------- contrib/dolphin/sql/default_function.sql | 29 ++++++++ 10 files changed, 137 insertions(+), 103 deletions(-) diff --git a/contrib/dolphin/expected/case_sensitive_test/schema_test.out b/contrib/dolphin/expected/case_sensitive_test/schema_test.out index cf5cc50af..e8483e0af 100644 --- a/contrib/dolphin/expected/case_sensitive_test/schema_test.out +++ b/contrib/dolphin/expected/case_sensitive_test/schema_test.out @@ -50,11 +50,11 @@ WARNING: TEST is not a valid encoding name. default value set ALTER SCHEMA test_1 WITHOUT BLOCKCHAIN; ALTER SCHEMA Test_1 WITHOUT BLOCKCHAIN; ALTER SCHEMA test_1 CHARSET DEFAULT; -ERROR: syntax error at or near "DEFAULT;" +ERROR: syntax error at or near "DEFAULT" LINE 1: ALTER SCHEMA test_1 CHARSET DEFAULT; ^ ALTER SCHEMA Test_1 CHARSET DEFAULT; -ERROR: syntax error at or near "DEFAULT;" +ERROR: syntax error at or near "DEFAULT" LINE 1: ALTER SCHEMA Test_1 CHARSET DEFAULT; ^ ALTER SCHEMA test_1 RENAME TO Test_1_BAK; diff --git a/contrib/dolphin/expected/default_function.out b/contrib/dolphin/expected/default_function.out index 271e0d6d7..3ed2c21f2 100644 --- a/contrib/dolphin/expected/default_function.out +++ b/contrib/dolphin/expected/default_function.out @@ -112,5 +112,58 @@ select default(b) from a; ERROR: Invalid default value. DETAIL: the 2th column of a doesn't have a default value CONTEXT: referenced column: mode_b_default +drop table t1; +create table t1(a int default(1+2)); +select default(a) from t1; + mode_b_default +---------------- +(0 rows) + +insert into t1 values(default); +select default(a) from t1; + mode_b_default +---------------- + 3 +(1 row) + +create table t3 as select default(a) from t1; +select * from t3; + mode_b_default +---------------- + 3 +(1 row) + +create table t4(a int default(abs(4+1) - abs(2-1))); +insert into t4 values (1); +select default(a) from t4; + mode_b_default +---------------- + 4 +(1 row) + +create table t5(id int default (2 + (2 *3)), name int default (2 * (2 + 3))); +insert into t5 values (default); +create table t6(id int default (8 + (1 *3)), name int default (8 * (1 + 3))) as select default(id) as id, default (name) as name from t5; +select * from t6; + id | name +----+------ + 8 | 10 +(1 row) + +create table t7(c1 varchar(20) default concat('hello', ' world'), c2 varchar(20) default concat('hello', ' world'), c3 timestamp(6) default now(), c4 text default repeat('hello',2), +c5 bytea default E'\\000'::bytea, c6 int default 1+(2*3), c7 timestamp default timeofday()::timestamp, c8 number(9,1) default 10+238/5*3, c9 date default current_date, c10 date); +insert into t7 values (default); +select * from t7; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 +-------------+-------------+------------------------+------------+------+----+------------------------+-------+------------+----- +--?.* +(1 row) + +drop table t1; +drop table t3; +drop table t4; +drop table t5; +drop table t6; +drop table t7; \c postgres drop database if exists db_default; diff --git a/contrib/dolphin/expected/test_default.out b/contrib/dolphin/expected/test_default.out index 1e220461c..bb0d7b88d 100644 --- a/contrib/dolphin/expected/test_default.out +++ b/contrib/dolphin/expected/test_default.out @@ -4,18 +4,18 @@ create schema test_default; set current_schema = test_default; create table t1(a timestamp default pg_systimestamp()::timestamp(0)); \d t1; - Table "test_default.t1" - Column | Type | Modifiers ---------+-----------------------------+-------------------------------------------------------- - a | timestamp(0) with time zone | default pg_systimestamp()::timestamp(0) with time zone + Table "test_default.t1" + Column | Type | Modifiers +--------+-----------------------------+---------------------------------------------------------- + a | timestamp(0) with time zone | default (pg_systimestamp())::timestamp(0) with time zone show create table t1; - Table | Create Table --------+------------------------------------------------------------------------------------------ - t1 | SET search_path = test_default; + - | CREATE TABLE t1 ( + - | a timestamp(0) with time zone DEFAULT pg_systimestamp()::timestamp(0) with time zone+ - | ) + + Table | Create Table +-------+-------------------------------------------------------------------------------------------- + t1 | SET search_path = test_default; + + | CREATE TABLE t1 ( + + | a timestamp(0) with time zone DEFAULT (pg_systimestamp())::timestamp(0) with time zone+ + | ) + | WITH (orientation=row, compression=no); (1 row) @@ -40,37 +40,37 @@ show create table t2; create table t3(a timestamp default pg_systimestamp()::timestamp(0), b int GENERATED ALWAYS AS (length((a)::text)) STORED); \d t3 - Table "test_default.t3" - Column | Type | Modifiers ---------+-----------------------------+-------------------------------------------------------- - a | timestamp(0) with time zone | default pg_systimestamp()::timestamp(0) with time zone + Table "test_default.t3" + Column | Type | Modifiers +--------+-----------------------------+---------------------------------------------------------- + a | timestamp(0) with time zone | default (pg_systimestamp())::timestamp(0) with time zone b | integer | generated always as (length((a)::text)) stored show create table t3; - Table | Create Table --------+------------------------------------------------------------------------------------------- - t3 | SET search_path = test_default; + - | CREATE TABLE t3 ( + - | a timestamp(0) with time zone DEFAULT pg_systimestamp()::timestamp(0) with time zone,+ - | b integer GENERATED ALWAYS AS (length((a)::text)) STORED + - | ) + + Table | Create Table +-------+--------------------------------------------------------------------------------------------- + t3 | SET search_path = test_default; + + | CREATE TABLE t3 ( + + | a timestamp(0) with time zone DEFAULT (pg_systimestamp())::timestamp(0) with time zone,+ + | b integer GENERATED ALWAYS AS (length((a)::text)) STORED + + | ) + | WITH (orientation=row, compression=no); (1 row) create table t4(a timestamp default pg_systimestamp()::timestamp(0) on update current_timestamp); \d t4 - Table "test_default.t4" - Column | Type | Modifiers ---------+-----------------------------+---------------------------------------------------------------------------------------------------- - a | timestamp(0) with time zone | default pg_systimestamp()::timestamp(0) with time zone on update b_db_statement_start_timestamp(0) + Table "test_default.t4" + Column | Type | Modifiers +--------+-----------------------------+------------------------------------------------------------------------------------------------------ + a | timestamp(0) with time zone | default (pg_systimestamp())::timestamp(0) with time zone on update b_db_statement_start_timestamp(0) show create table t4; - Table | Create Table --------+------------------------------------------------------------------------------------------------------------------------- - t4 | SET search_path = test_default; + - | CREATE TABLE t4 ( + - | a timestamp(0) with time zone DEFAULT pg_systimestamp()::timestamp(0) with time zone ON UPDATE CURRENT_TIMESTAMP(0)+ - | ) + + Table | Create Table +-------+--------------------------------------------------------------------------------------------------------------------------- + t4 | SET search_path = test_default; + + | CREATE TABLE t4 ( + + | a timestamp(0) with time zone DEFAULT (pg_systimestamp())::timestamp(0) with time zone ON UPDATE CURRENT_TIMESTAMP(0)+ + | ) + | WITH (orientation=row, compression=no); (1 row) @@ -107,11 +107,11 @@ show create table test; -------+---------------------------------------------------------- test | SET search_path = test_default; + | CREATE TABLE test ( + - | id bigint DEFAULT -1 NOT NULL, + - | name integer DEFAULT -1 NOT NULL, + - | col1 real DEFAULT -1.1, + + | id bigint DEFAULT (-1)::bigint NOT NULL, + + | name integer DEFAULT (-1) NOT NULL, + + | col1 real DEFAULT (-1.1)::real, + | col2 double precision DEFAULT 2.2::double precision,+ - | col3 real DEFAULT -1100 + + | col3 real DEFAULT (-1100)::real + | ) + | WITH (orientation=row, compression=no); (1 row) diff --git a/contrib/dolphin/include/plugin_postgres.h b/contrib/dolphin/include/plugin_postgres.h index 99e4757c2..019d13100 100644 --- a/contrib/dolphin/include/plugin_postgres.h +++ b/contrib/dolphin/include/plugin_postgres.h @@ -165,6 +165,7 @@ typedef struct BSqlPluginContext { bool is_binary_proto; bool is_ast_stmt; bool group_by_error; + bool is_create_alter_stmt; #endif } bSqlPluginContext; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index d6a428bc3..0297252ee 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -39321,6 +39321,7 @@ parser_init(base_yy_extra_type *yyext) yyext->core_yy_extra.paren_depth = 0; GetSessionContext()->isUpsert = false; GetSessionContext()->is_schema_name = false; + GetSessionContext()->is_create_alter_stmt = false; } static Expr * diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index bb168089b..a13ddc6ab 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -1970,7 +1970,7 @@ static Node* HandleDefaultFunction(ParseState* pstate, FuncCall* fn) tempFuncName = TextDatumGetCString(adsrcVal); char* firstLocation = strchr(tempFuncName, '('); bool temp_result = false; - if (firstLocation != NULL) { + if (firstLocation != NULL && firstLocation - tempFuncName > 0) { int funcNameLength = firstLocation - tempFuncName; char* funcName = (char*)palloc0(funcNameLength + 1); errno_t rc = memcpy_s(funcName, funcNameLength, tempFuncName, funcNameLength); diff --git a/contrib/dolphin/plugin_parser/parser.cpp b/contrib/dolphin/plugin_parser/parser.cpp index 18d26a9b5..b5d2c2424 100644 --- a/contrib/dolphin/plugin_parser/parser.cpp +++ b/contrib/dolphin/plugin_parser/parser.cpp @@ -771,6 +771,9 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) } break; case DEFAULT: + if (GetSessionContext()->is_create_alter_stmt) { + break; + } /* * DEFAULT must be reduced to one token, to allow START as table / column alias. */ @@ -880,6 +883,16 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) break; } break; + case CREATE: + case ALTER: + if (pg_strncasecmp(yyextra->core_yy_extra.scanbuf, "alter", strlen("alter")) == 0 || + pg_strncasecmp(yyextra->core_yy_extra.scanbuf, "create", strlen("create")) == 0) { + GetSessionContext()->is_create_alter_stmt = true; + } + break; + case SELECT: + GetSessionContext()->is_create_alter_stmt = false; + break; #endif default: break; diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index e0a6ec9f9..9c0f4b575 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -920,6 +920,7 @@ void init_session_vars(void) cxt->is_binary_proto = false; cxt->is_ast_stmt = false; cxt->group_by_error = false; + cxt->is_create_alter_stmt = false; DefineCustomBoolVariable("dolphin.b_compatibility_mode", "Enable mysql behavior override opengauss's when collision happens.", diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index 8491cc6bf..e09b17cec 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -337,9 +337,6 @@ static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node* processIndirection(Node* node, deparse_context* context, bool printit); static void printSubscripts(ArrayRef* aref, deparse_context* context); static char* get_relation_name(Oid relid); -#ifdef DOLPHIN -static bool getHasDefault(char* exprstr, TupleDesc tupdesc, AttrDefault* defval); -#endif static char* generate_relation_name(Oid relid, List* namespaces); static char* generate_function_name( Oid funcid, int nargs, List* argnames, Oid* argtypes, bool was_variadic, bool* use_variadic_p); @@ -4277,46 +4274,9 @@ Datum pg_get_expr(PG_FUNCTION_ARGS) } else relname = NULL; -#ifdef DOLPHIN - char* exprstr = NULL; - Relation rel = NULL; - TupleDesc tupdesc = NULL; - AttrDefault* defval = NULL; - bool hasDefault = false; - - if (OidIsValid(relid)) { - exprstr = text_to_cstring(expr); - rel = heap_open(relid, AccessShareLock); - tupdesc = RelationGetDescr(rel); - if (tupdesc->constr != NULL && tupdesc->constr->defval != NULL) { - defval = tupdesc->constr->defval; - hasDefault = getHasDefault(exprstr, tupdesc, defval); - } - pfree_ext(exprstr); - heap_close(rel, AccessShareLock); - } - PG_RETURN_TEXT_P(pg_get_expr_worker(expr, relid, relname, hasDefault ? 1 : 0)); -#else PG_RETURN_TEXT_P(pg_get_expr_worker(expr, relid, relname, 0)); -#endif } -#ifdef DOLPHIN -static bool getHasDefault(char* exprstr, TupleDesc tupdesc, AttrDefault* defval) -{ - for (size_t i = 0; i < tupdesc->constr->num_defval; i++) { - /* Determine whether the column is a generated column */ - if (defval[i].adbin != NULL && defval[i].generatedCol != ATTRIBUTE_GENERATED_STORED && - /* Determine whether the default value expression of the table is the same as the expr */ - strcmp(defval[i].adbin, exprstr) == 0) { - return true; - } - } - - return false; -} -#endif - Datum pg_get_expr_ext(PG_FUNCTION_ARGS) { text* expr = PG_GETARG_TEXT_P(0); @@ -11190,11 +11150,6 @@ static void get_const_expr(Const* constval, deparse_context* context, int showty bool isfloat = false; bool needlabel = false; bool skip_collation = false; -#ifdef DOLPHIN - bool without_cast = false; - const char *left_bracket = PRETTY_PAREN(context) ? "" : "("; - const char *right_bracket = PRETTY_PAREN(context) ? "" : ")"; -#endif if (constval->constisnull || constval->ismaxvalue) { /* * Always label the type of a NULL/MAXVALUE constant to @@ -11270,23 +11225,13 @@ static void get_const_expr(Const* constval, deparse_context* context, int showty if (strspn(extval, "0123456789+-eE.") == strlen(extval)) { if (!iseq) { if (extval[0] == '+' || extval[0] == '-') { -#ifdef DOLPHIN - without_cast = PRETTY_PAREN(context); - appendStringInfo(buf, "%s%s%s", left_bracket, priStr, right_bracket); -#else appendStringInfo(buf, "(%s)", priStr); -#endif } else { appendStringInfoString(buf, priStr); } } else { if (extval[0] == '+' || extval[0] == '-') { -#ifdef DOLPHIN - without_cast = PRETTY_PAREN(context); - appendStringInfo(buf, "%s%s%s", left_bracket, extval, right_bracket); -#else appendStringInfo(buf, "(%s)", extval); -#endif } else { appendStringInfoString(buf, extval); } @@ -11324,12 +11269,7 @@ static void get_const_expr(Const* constval, deparse_context* context, int showty */ if (strspn(extval, "0123456789+-eE.") == strlen(extval)) { if (extval[0] == '+' || extval[0] == '-') { -#ifdef DOLPHIN - without_cast = PRETTY_PAREN(context); - appendStringInfo(buf, "%s%s%s", left_bracket, extval, right_bracket); -#else appendStringInfo(buf, "(%s)", extval); -#endif } else { appendStringInfoString(buf, extval); } @@ -11396,11 +11336,7 @@ static void get_const_expr(Const* constval, deparse_context* context, int showty needlabel = true; break; } - if ( -#ifdef DOLPHIN - !without_cast && -#endif - (needlabel || showtype > 0)) + if (needlabel || showtype > 0) { appendStringInfo(buf, "::%s", format_type_with_typemod(constval->consttype, constval->consttypmod)); } diff --git a/contrib/dolphin/sql/default_function.sql b/contrib/dolphin/sql/default_function.sql index 499c55627..feaf5f6e9 100644 --- a/contrib/dolphin/sql/default_function.sql +++ b/contrib/dolphin/sql/default_function.sql @@ -41,5 +41,34 @@ insert into a set a=1, b=1; update a set a = default(b); select default(b) from a; +drop table t1; +create table t1(a int default(1+2)); +select default(a) from t1; +insert into t1 values(default); +select default(a) from t1; +create table t3 as select default(a) from t1; +select * from t3; + +create table t4(a int default(abs(4+1) - abs(2-1))); +insert into t4 values (1); +select default(a) from t4; + +create table t5(id int default (2 + (2 *3)), name int default (2 * (2 + 3))); +insert into t5 values (default); +create table t6(id int default (8 + (1 *3)), name int default (8 * (1 + 3))) as select default(id) as id, default (name) as name from t5; +select * from t6; + +create table t7(c1 varchar(20) default concat('hello', ' world'), c2 varchar(20) default concat('hello', ' world'), c3 timestamp(6) default now(), c4 text default repeat('hello',2), +c5 bytea default E'\\000'::bytea, c6 int default 1+(2*3), c7 timestamp default timeofday()::timestamp, c8 number(9,1) default 10+238/5*3, c9 date default current_date, c10 date); +insert into t7 values (default); +select * from t7; + +drop table t1; +drop table t3; +drop table t4; +drop table t5; +drop table t6; +drop table t7; + \c postgres drop database if exists db_default; -- Gitee From 4c1696b5cde9f9dfe8b84bae05e1c35cf11fc645 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Tue, 28 Nov 2023 14:53:10 +0800 Subject: [PATCH 083/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E4=BF=AE=E5=A4=8DI8INTDm?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../translate/CTranslatorDXLToPlStmt.cpp | 33 +++++++++++-------- contrib/whale/include/plugin_postgres.h | 3 +- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index 006bcae0d..19428af97 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -3511,7 +3511,7 @@ CTranslatorDXLToPlStmt::TranslateDXLAppend( //plan->nMotionNodes = 0; append->appendplans = NIL; - int max_num_exec_nodes = 0; + int max_num_exec_nodes = 0; // translate children CDXLTranslateContext child_context(m_mp, false, output_context->GetColIdToParamIdMap()); @@ -3524,19 +3524,17 @@ CTranslatorDXLToPlStmt::TranslateDXLAppend( SPQOS_ASSERT(NULL != child_plan && "child plan cannot be NULL"); - /* SPQ: add exec_nodes for plan */ - if (ul == EdxlappendIndexFirstChild) { - plan->exec_nodes = ng_get_dest_execnodes(child_plan); - max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); - } - - if (max_num_exec_nodes < list_length(plan->exec_nodes->nodeList)) { - plan->exec_nodes = ng_get_dest_execnodes(child_plan); - max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); - } - append->appendplans = spqdb::LAppend(append->appendplans, child_plan); - //plan->nMotionNodes += child_plan->nMotionNodes; + + /* SPQ: add exec_nodes for plan */ + if (child_plan->exec_nodes == NULL) { + SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, + SPQOS_WSZ_LIT("exec_nodes cannot be NULL")); + } + if (max_num_exec_nodes < list_length(plan->exec_nodes->nodeList)) { + plan->exec_nodes = ng_get_dest_execnodes(child_plan); + max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); + } } CDXLNode *project_list_dxlnode = (*append_dxlnode)[EdxlappendIndexProjList]; @@ -4042,6 +4040,10 @@ CTranslatorDXLToPlStmt::TranslateDXLSequence( /* SPQ: add exec_nodes for plan */ int max_num_exec_nodes = 0; plan->exec_nodes = ng_get_dest_execnodes(last_child_plan); + if (plan->exec_nodes == NULL) { + SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, + SPQOS_WSZ_LIT("exec_nodes cannot be NULL in last_child of Sequence")); + } max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); // translate the rest of the children for (ULONG ul = 1; ul < arity - 1; ul++) @@ -4052,7 +4054,10 @@ CTranslatorDXLToPlStmt::TranslateDXLSequence( child_dxlnode, &child_context, ctxt_translation_prev_siblings); psequence->subplans = spqdb::LAppend(psequence->subplans, child_plan); - //plan->nMotionNodes += child_plan->nMotionNodes; + if (child_plan->exec_nodes == NULL) { + SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, + SPQOS_WSZ_LIT("exec_nodes cannot be NULL in Sequence")); + } if (max_num_exec_nodes < list_length(child_plan->exec_nodes->nodeList)) { plan->exec_nodes = ng_get_dest_execnodes(child_plan); max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); diff --git a/contrib/whale/include/plugin_postgres.h b/contrib/whale/include/plugin_postgres.h index 7b8584aed..6272e9148 100644 --- a/contrib/whale/include/plugin_postgres.h +++ b/contrib/whale/include/plugin_postgres.h @@ -73,7 +73,6 @@ typedef struct ASqlPluginContext { int pipe_sid; /* session id */ orafce_pipe* pipes = NULL; LWLockId shmem_lockid; -// LWLockTranche tranche; /* putline.c */ bool is_server_output = false; char *buffer = NULL; @@ -129,4 +128,4 @@ typedef struct ASqlPluginContext { ASqlPluginContext* GetSessionContext(); -#endif \ No newline at end of file +#endif -- Gitee From 409725e1433dbf35fefe6ae3096b4229968a10f7 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Mon, 27 Nov 2023 09:51:28 +0800 Subject: [PATCH 084/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=20SPQ=E5=A4=9A=E6=9C=BA=E5=B9=B6=E8=A1=8C?= =?UTF-8?q?=E6=94=AF=E6=8C=81=E7=B4=A2=E5=BC=95=E7=B1=BB=E6=89=AB=E6=8F=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../include/executor/spq_bitmapheapscan.h | 21 + .../include/executor/spq_indexonlyscan.h | 20 + .../include/executor/spq_indexscan.h | 21 + .../spq_plugin/include/executor/spq_seqscan.h | 1 + contrib/spq_plugin/include/spq/spq_mutate.h | 8 +- .../translate/CContextDXLToPlStmt.h | 18 + contrib/spq_plugin/src/executor/Makefile | 2 +- .../src/executor/spq_bitmapheapscan.cpp | 515 ++++++++++++++++++ .../src/executor/spq_indexonlyscan.cpp | 292 ++++++++++ .../spq_plugin/src/executor/spq_indexscan.cpp | 195 +++++++ contrib/spq_plugin/src/guc_spq.cpp | 32 ++ contrib/spq_plugin/src/spq/spq_mutate.cpp | 136 +++-- contrib/spq_plugin/src/spq/spq_plan.cpp | 29 +- .../include/naucrates/traceflags/traceflags.h | 7 +- .../CPhysicalLeftOuterIndexNLJoin.cpp | 11 + .../src/translate/CTranslatorExprToDXL.cpp | 15 +- .../config/CConfigParamMapping.cpp | 4 + .../src/spq_optimizer_util/spq_wrappers.cpp | 6 +- .../translate/CContextDXLToPlStmt.cpp | 58 +- .../translate/CTranslatorDXLToPlStmt.cpp | 172 +++--- .../translate/CTranslatorRelcacheToDXL.cpp | 18 +- contrib/spq_plugin/src/spqplugin.cpp | 9 + contrib/whale/include/plugin_postgres.h | 4 +- 23 files changed, 1470 insertions(+), 124 deletions(-) create mode 100644 contrib/spq_plugin/include/executor/spq_bitmapheapscan.h create mode 100644 contrib/spq_plugin/include/executor/spq_indexonlyscan.h create mode 100644 contrib/spq_plugin/include/executor/spq_indexscan.h create mode 100644 contrib/spq_plugin/src/executor/spq_bitmapheapscan.cpp create mode 100644 contrib/spq_plugin/src/executor/spq_indexonlyscan.cpp create mode 100644 contrib/spq_plugin/src/executor/spq_indexscan.cpp diff --git a/contrib/spq_plugin/include/executor/spq_bitmapheapscan.h b/contrib/spq_plugin/include/executor/spq_bitmapheapscan.h new file mode 100644 index 000000000..0d5e01955 --- /dev/null +++ b/contrib/spq_plugin/include/executor/spq_bitmapheapscan.h @@ -0,0 +1,21 @@ +/* ------------------------------------------------------------------------- +* spq_bitmap_heapscan.h +* +* Portions Copyright (c) 2023, Huawei Technologies Co.,Ltd. +* +* ------------------------------------------------------------------------- + */ +#ifndef SPQ_BITMAPHEAPSCAN_H +#define SPQ_BITMAPHEAPSCAN_H + +#include "nodes/execnodes.h" +#include "executor/node/nodeSpqBitmapHeapscan.h" + +IndexScanState* ExecSpqInitBitmapHeapScan(BitmapHeapScan* node, EState* estate, int eflags); +TupleTableSlot* ExecSpqBitmapHeapScan(PlanState* state); +TupleTableSlot* BitmapHeapTblNext(BitmapHeapScanState* node); + +void init_spqbitmapheapscan_hook(); +void restore_spqbitmapheapscan_hook(); + +#endif // SPQ_BITMAPHEAPSCAN_H diff --git a/contrib/spq_plugin/include/executor/spq_indexonlyscan.h b/contrib/spq_plugin/include/executor/spq_indexonlyscan.h new file mode 100644 index 000000000..12434be99 --- /dev/null +++ b/contrib/spq_plugin/include/executor/spq_indexonlyscan.h @@ -0,0 +1,20 @@ +/* ------------------------------------------------------------------------- +* spq_indexonlyscan.h +* +* Portions Copyright (c) 2023 Huawei Technologies Co.,Ltd. +* ------------------------------------------------------------------------- + */ +#ifndef SPQ_INDEXONLYSCAN_H +#define SPQ_INDEXONLYSCAN_H + +#include "nodes/execnodes.h" +#include "executor/node/nodeSpqIndexonlyscan.h" + +IndexScanState* ExecInitSpqIndexOnlyScan(SpqIndexScan* node, EState* estate, int eflags); +TupleTableSlot* ExecSpqIndexOnlyScan(PlanState* state); +TupleTableSlot* SpqIndexOnlyNext(IndexOnlyScanState* node); + +void init_spqindexonlyscan_hook(); +void restore_spqindexonlyscan_hook(); + +#endif // SPQ_INDEXONLYSCAN_H diff --git a/contrib/spq_plugin/include/executor/spq_indexscan.h b/contrib/spq_plugin/include/executor/spq_indexscan.h new file mode 100644 index 000000000..10006f905 --- /dev/null +++ b/contrib/spq_plugin/include/executor/spq_indexscan.h @@ -0,0 +1,21 @@ +/* ------------------------------------------------------------------------- +* spq_indexscan.h +* +* Portions Copyright (c) 2023 Huawei Technologies Co.,Ltd. +* +* ------------------------------------------------------------------------- + */ +#ifndef SPQ_INDEXSCAN_H +#define SPQ_INDEXSCAN_H + +#include "nodes/execnodes.h" +#include "executor/node/nodeSpqIndexscan.h" + +IndexScanState* ExecInitSpqIndexScan(SpqIndexScan* node, EState* estate, int eflags); +TupleTableSlot* ExecSpqIndexScan(PlanState* state); +TupleTableSlot* SpqIndexNext(IndexScanState* node); + +void init_spqindexscan_hook(); +void restore_spqindexscan_hook(); + +#endif // SPQ_INDEXSCAN_H diff --git a/contrib/spq_plugin/include/executor/spq_seqscan.h b/contrib/spq_plugin/include/executor/spq_seqscan.h index 11a96ccbf..1e1e2ed61 100644 --- a/contrib/spq_plugin/include/executor/spq_seqscan.h +++ b/contrib/spq_plugin/include/executor/spq_seqscan.h @@ -20,4 +20,5 @@ void ExecReScanSpqSeqScan(SpqSeqScanState* node); void init_spqseqscan_hook(); void restore_spqseqscan_hook(); +extern void GetInstanceIDAndSliceNumber(PlannedStmt* stmt, int totaldop, int &sliceNumber, int &instanceID); #endif // SPQ_SEQSCAN_H \ No newline at end of file diff --git a/contrib/spq_plugin/include/spq/spq_mutate.h b/contrib/spq_plugin/include/spq/spq_mutate.h index 32770c2a1..d96bc92fb 100644 --- a/contrib/spq_plugin/include/spq/spq_mutate.h +++ b/contrib/spq_plugin/include/spq/spq_mutate.h @@ -19,12 +19,18 @@ #include "nodes/nodes.h" #include "nodes/plannodes.h" + +typedef struct SpqSliceContext { + PlannedStmt *result; + int curentIndex; +} SpqSliceContext; + extern void collect_shareinput_producers(PlannerInfo *root, Plan *plan); extern Plan *replace_shareinput_targetlists(PlannerInfo *root, Plan *plan); extern Plan *apply_shareinput_xslice(Plan *plan, PlannerInfo *root); extern void remove_subquery_in_RTEs(Node *node); extern bool is_plan_node(Node *node); extern void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal *glob); -extern Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top); +extern Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *result, Plan *plan, bool &top); #endif /* SPQ_MUTATE_H */ diff --git a/contrib/spq_plugin/include/spq_optimizer_util/translate/CContextDXLToPlStmt.h b/contrib/spq_plugin/include/spq_optimizer_util/translate/CContextDXLToPlStmt.h index e54281f7f..f2b34bb1e 100644 --- a/contrib/spq_plugin/include/spq_optimizer_util/translate/CContextDXLToPlStmt.h +++ b/contrib/spq_plugin/include/spq_optimizer_util/translate/CContextDXLToPlStmt.h @@ -117,6 +117,10 @@ private: // list of all subplan entries List **m_subplan_entries_list; + List *m_subplan_sliceids_list; + + List *m_slices_list; + PlanSlice *m_current_slice; // index of the target relation in the rtable or 0 if not a DML statement ULONG m_result_relation_index; @@ -199,6 +203,20 @@ public: void IncrementPartitionSelectors(ULONG scan_id); void AddSubplan(Plan *); + PlanSlice *GetSlices(int *numSlices_p); + // add a slice table entry + int AddSlice(PlanSlice *); + int *GetSubplanSliceIdArray(); + + PlanSlice *GetCurrentSlice() const + { + return m_current_slice; + } + + void SetCurrentSlice(PlanSlice *slice) + { + m_current_slice = slice; + } // add CTAS information void AddCtasInfo(IntoClause *into_clause, GpPolicy *distribution_policy); diff --git a/contrib/spq_plugin/src/executor/Makefile b/contrib/spq_plugin/src/executor/Makefile index ae9626435..48b0a717b 100644 --- a/contrib/spq_plugin/src/executor/Makefile +++ b/contrib/spq_plugin/src/executor/Makefile @@ -9,6 +9,6 @@ override CPPFLAGS := -I$(abs_top_srcdir)/contrib/ndpplugin/component/thread \ -I$(abs_top_srcdir)/src/include \ $(CPPFLAGS) -OBJS = spq_seqscan.o +OBJS = spq_seqscan.o spq_indexscan.o spq_indexonlyscan.o spq_bitmapheapscan.o include $(top_srcdir)/src/gausskernel/common.mk \ No newline at end of file diff --git a/contrib/spq_plugin/src/executor/spq_bitmapheapscan.cpp b/contrib/spq_plugin/src/executor/spq_bitmapheapscan.cpp new file mode 100644 index 000000000..0e3abe84a --- /dev/null +++ b/contrib/spq_plugin/src/executor/spq_bitmapheapscan.cpp @@ -0,0 +1,515 @@ +/* ------------------------------------------------------------------------- +* +* spq_bitmapheapscan.cpp +* Routines to support indexed scans of relations +* +* Portions Copyright (c) 2023 Huawei Technologies Co.,Ltd. +* +* +* IDENTIFICATION +* spq_bitmapheapscan.cpp +* +* ------------------------------------------------------------------------- +* +* INTERFACE ROUTINES +* ExecSpqBitmapHeapNext workhorse for above +* ExecSpqInitBitmapHeapScan creates and initializes state info. + */ +#include "postgres.h" +#include "knl/knl_variable.h" +#include "executor/executor.h" + +#include "access/relscan.h" +#include "executor/node/nodeBitmapHeapscan.h" +#include "executor/spq_bitmapheapscan.h" +#include "executor/spq_seqscan.h" +#include "access/tableam.h" +#include "commands/cluster.h" +#include "storage/tcap.h" + +THR_LOCAL init_spqbitmapheapscan_hook_type backup_init_spqbitmapheapscan_hook = NULL; +THR_LOCAL exec_spqbitmapheapscan_hook_type backup_exec_spqbitmapheapscan_hook = NULL; + +/* ---------------------------------------------------------------- + * SpqBitmapHeapTblNext + * + * Retrieve next tuple from the BitmapHeapScan node's currentRelation + * ---------------------------------------------------------------- + */ +static TupleTableSlot* SpqBitmapHeapTblNext(BitmapHeapScanState* node) +{ + ExprContext* econtext = NULL; + TableScanDesc scan = NULL; + TIDBitmap* tbm = NULL; + TBMHandler tbm_handler; + TBMIterator* tbmiterator = NULL; + TBMIterateResult* tbmres = NULL; + HBktTblScanDesc hpscan = NULL; + +#ifdef USE_PREFETCH + TBMIterator* prefetch_iterator = NULL; +#endif + TupleTableSlot* slot = NULL; + + /* + * extract necessary information from index scan node + */ + econtext = node->ss.ps.ps_ExprContext; + slot = node->ss.ss_ScanTupleSlot; + if (node->ss.ss_currentRelation != NULL && RelationIsPartitionedHashBucketTable(node->ss.ss_currentRelation)) { + Assert(node->ss.ss_currentScanDesc != NULL); + hpscan = (HBktTblScanDesc)node->ss.ss_currentScanDesc; + scan = (TableScanDesc)hpscan->currBktScan; + } else { + scan = GetTableScanDesc(node->ss.ss_currentScanDesc, node->ss.ss_currentRelation); + } + tbm = node->tbm; + tbmiterator = node->tbmiterator; + tbmres = node->tbmres; +#ifdef USE_PREFETCH + prefetch_iterator = node->prefetch_iterator; +#endif + + /* + * If we haven't yet performed the underlying index scan, do it, and begin + * the iteration over the bitmap. + * + * For prefetching, we use *two* iterators, one for the pages we are + * actually scanning and another that runs ahead of the first for + * prefetching. node->prefetch_pages tracks exactly how many pages ahead + * the prefetch iterator is. Also, node->prefetch_target tracks the + * desired prefetch distance, which starts small and increases up to the + * GUC-controlled maximum, target_prefetch_pages. This is to avoid doing + * a lot of prefetching in a scan that stops after a few tuples because of + * a LIMIT. + */ + if (tbm == NULL) { + tbm = (TIDBitmap*)MultiExecProcNode(outerPlanState(node)); + tbm_handler = tbm_get_handler(tbm); + + if (tbm == NULL || !IsA(tbm, TIDBitmap)) { + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmodule(MOD_EXECUTOR), + errmsg("unrecognized result from subplan for BitmapHeapScan."))); + } + + node->tbm = tbm; + node->tbmiterator = tbmiterator = tbm_handler._begin_iterate(tbm); + node->tbmres = tbmres = NULL; + +#ifdef USE_PREFETCH + if (u_sess->storage_cxt.target_prefetch_pages > 0) { + node->prefetch_iterator = prefetch_iterator = tbm_handler._begin_iterate(tbm); + node->prefetch_pages = 0; + node->prefetch_target = -1; + } +#endif + } + + /* + * Now tbm is not NULL, we have enough information to + * determine whether need to assign hpscan. Also need + * to make sure we are not scanning a virtual hashbucket + * table. + */ + if (hpscan == NULL && tbm_is_crossbucket(tbm) && RELATION_OWN_BUCKET(node->ss.ss_currentScanDesc->rs_rd)) { + hpscan = (HBktTblScanDesc)node->ss.ss_currentScanDesc; + } + + for (;;) { + /* + * Get next page of results if needed + */ + if (tbmres == NULL) { + node->tbmres = tbmres = tbm_iterate(tbmiterator); + if (tbmres == NULL) { + /* no more entries in the bitmap */ + break; + } + +#ifdef USE_PREFETCH + if (node->prefetch_pages > 0) { + /* The main iterator has closed the distance by one page */ + node->prefetch_pages--; + } else if (prefetch_iterator != NULL) { + /* Do not let the prefetch iterator get behind the main one */ + TBMIterateResult* tbmpre = tbm_iterate(prefetch_iterator); + + if (tbmpre == NULL || tbmpre->blockno != tbmres->blockno) { + ereport(ERROR, + (errcode(ERRCODE_DATA_EXCEPTION), + errmodule(MOD_EXECUTOR), + errmsg("prefetch and main iterators are out of sync for BitmapHeapScan."))); + } + } +#endif /* USE_PREFETCH */ + + int rc = TableScanBitmapNextTargetRel(scan, node); + if (rc != 0) { + /* + * If the current partition is invalid, + * the next page is directly processed. + */ + tbmres = NULL; +#ifdef USE_PREFETCH + if (rc == 1) { + BitmapHeapPrefetchNext(node, scan, tbm, &prefetch_iterator); + } +#endif /* USE_PREFETCH */ + continue; + } + + /* update bucket scan */ + if (hpscan != NULL && scan != hpscan->currBktScan) { + scan = hpscan->currBktScan; + } + + /* + * Fetch the current table page and identify candidate tuples. + */ + + HeapScanDesc heap_scan = (HeapScanDesc) scan; + if (NULL == heap_scan->spq_scan) { + if (!TableScanBitmapNextBlock(scan, tbmres, &node->ss.ps.state->have_current_xact_date)) { + node->tbmres = tbmres = NULL; + continue; + } + } + else { + BlockNumber unitno = SPQSCAN_BlockNum2UnitNum(tbmres->blockno); + if ((unitno % heap_scan->spq_scan->slice_num) != heap_scan->spq_scan->instance_id) + { + heap_scan->rs_base.rs_ntuples = 0; + } + else{ + if (!TableScanBitmapNextBlock(scan, tbmres, &node->ss.ps.state->have_current_xact_date)) { + node->tbmres = tbmres = NULL; + continue; + } + } + } + + if (tbmres->ntuples >= 0) { + node->exact_pages++; + } else { + node->lossy_pages++; + } + +#ifdef USE_PREFETCH + + /* + * Increase prefetch target if it's not yet at the max. Note that + * we will increase it to zero after fetching the very first + * page/tuple, then to one after the second tuple is fetched, then + * it doubles as later pages are fetched. + */ + if (node->prefetch_target >= u_sess->storage_cxt.target_prefetch_pages) + /* don't increase any further */; + else if (node->prefetch_target >= u_sess->storage_cxt.target_prefetch_pages / 2) + node->prefetch_target = u_sess->storage_cxt.target_prefetch_pages; + else if (node->prefetch_target > 0) + node->prefetch_target *= 2; + else + node->prefetch_target++; +#endif /* USE_PREFETCH */ + } else { + /* + * Continuing in previously obtained page. + */ + +#ifdef USE_PREFETCH + + /* + * Try to prefetch at least a few pages even before we get to the + * second page if we don't stop reading after the first tuple. + */ + if (node->prefetch_target < u_sess->storage_cxt.target_prefetch_pages) + node->prefetch_target++; +#endif /* USE_PREFETCH */ + } + +#ifdef USE_PREFETCH + BitmapHeapPrefetchNext(node, scan, tbm, &prefetch_iterator); +#endif /* USE_PREFETCH */ + + /* + * Attempt to fetch tuple from AM. + */ + if (!TableScanBitmapNextTuple(scan, tbmres, slot)) { + /* nothing more to look at on this page */ + node->tbmres = tbmres = NULL; + continue; + } + + /* + * If we are using lossy info, we have to recheck the qual conditions + * at every tuple. + */ + if (tbmres->recheck) { + econtext->ecxt_scantuple = slot; + ResetExprContext(econtext); + + if (!ExecQual(node->bitmapqualorig, econtext)) { + /* Fails recheck, so drop it and loop back for another */ + InstrCountFiltered2(node, 1); + (void)ExecClearTuple(slot); + continue; + } + } + + /* OK to return this tuple */ + return slot; + } + + /* + * if we get here it means we are at the end of the scan.. + */ + return ExecClearTuple(slot); +} + +TableScanDesc add_spq_scan(BitmapHeapScanState* node, TableScanDesc scan) { + EState* estate = node->ss.ps.state; + HeapScanDesc heap_scan = (HeapScanDesc) scan; + SPQScanDesc spq_scan = NULL; + int slice_num = 0; + int instance_id = 0; + + spq_scan = (SPQScanDescData*)palloc0(sizeof(SPQScanDescData)); + + GetInstanceIDAndSliceNumber(estate->es_plannedstmt, ((Plan*)node->ss.ps.plan)->dop, slice_num, instance_id); + + spq_scan->slice_num = slice_num; + spq_scan->instance_id = instance_id; + heap_scan->spq_scan = spq_scan; + + return (TableScanDesc) scan; +} + +static inline void InitSpqBitmapHeapScanNextMtd(BitmapHeapScanState* bmstate) +{ + if (RELATION_OWN_BUCKET(bmstate->ss.ss_currentRelation)) { + bmstate->ss.ScanNextMtd = (ExecScanAccessMtd)BitmapHbucketTblNext; + return; + } + bmstate->ss.ScanNextMtd = (ExecScanAccessMtd)SpqBitmapHeapTblNext; +} + +/* ---------------------------------------------------------------- + * ExecInitSpqBitmapHeapScan + * + * Initializes the scan's state information. + * ---------------------------------------------------------------- + */ +BitmapHeapScanState* ExecInitSpqBitmapHeapScan(SpqBitmapHeapScan* spqnode, EState* estate, int eflags) +{ + BitmapHeapScan* node = (BitmapHeapScan*) spqnode; + BitmapHeapScanState* scanstate = NULL; + Relation currentRelation; + bool isUstoreRel = false; + Snapshot scanSnap; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); + + /* + * Assert caller didn't ask for an unsafe snapshot --- see comments at + * head of file. + */ + Assert(IsMVCCSnapshot(estate->es_snapshot)); + + /* + * create state structure + */ + scanstate = makeNode(BitmapHeapScanState); + scanstate->ss.ps.plan = (Plan*)node; + scanstate->ss.ps.state = estate; + + scanstate->tbm = NULL; + scanstate->tbmiterator = NULL; + scanstate->tbmres = NULL; + scanstate->exact_pages = 0.0; + scanstate->lossy_pages = 0.0; + scanstate->prefetch_iterator = NULL; + scanstate->prefetch_pages = 0; + scanstate->prefetch_target = 0; + scanstate->ss.isPartTbl = node->scan.isPartTbl; + scanstate->ss.currentSlot = 0; + scanstate->ss.partScanDirection = node->scan.partScanDirection; + scanstate->ss.ps.ExecProcNode = ExecBitmapHeapScan; + + /* initialize Global partition index scan information */ + GPIScanInit(&scanstate->gpi_scan); + + /* initialize cross-bucket index scan information */ + cbi_scan_init(&scanstate->cbi_scan); + + /* + * Miscellaneous initialization + * + * create expression context for node + */ + ExecAssignExprContext(estate, &scanstate->ss.ps); + + /* + * initialize child expressions + */ + if (estate->es_is_flt_frame) { + scanstate->ss.ps.qual = (List*)ExecInitQualByFlatten(node->scan.plan.qual, (PlanState*)scanstate); + scanstate->bitmapqualorig = (List*)ExecInitQualByFlatten(node->bitmapqualorig, (PlanState*)scanstate); + } else { + scanstate->ss.ps.targetlist = (List*)ExecInitExprByRecursion((Expr*)node->scan.plan.targetlist, (PlanState*)scanstate); + scanstate->ss.ps.qual = (List*)ExecInitExprByRecursion((Expr*)node->scan.plan.qual, (PlanState*)scanstate); + scanstate->bitmapqualorig = (List*)ExecInitExprByRecursion((Expr*)node->bitmapqualorig, (PlanState*)scanstate); + } + + /* + * open the base relation and acquire appropriate lock on it. + */ + currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid); + + scanstate->ss.ss_currentRelation = currentRelation; + scanstate->gpi_scan->parentRelation = currentRelation; + + isUstoreRel = RelationIsUstoreFormat(currentRelation); + + /* + * tuple table initialization + */ + ExecInitResultTupleSlot(estate, &scanstate->ss.ps, currentRelation->rd_tam_ops); + ExecInitScanTupleSlot(estate, &scanstate->ss, currentRelation->rd_tam_ops); + + InitSpqBitmapHeapScanNextMtd(scanstate); + + /* + * Choose user-specified snapshot if TimeCapsule clause exists, otherwise + * estate->es_snapshot instead. + */ + scanSnap = TvChooseScanSnap(currentRelation, &node->scan, &scanstate->ss); + + /* + * Even though we aren't going to do a conventional seqscan, it is useful + * to create a HeapScanDesc --- most of the fields in it are usable. + */ + if (scanstate->ss.isPartTbl) { + scanstate->ss.ss_currentScanDesc = NULL; + ExecInitPartitionForBitmapHeapScan(scanstate, estate); + + if (node->scan.itrs > 0) { + Partition partition = NULL; + Relation partitiontrel = NULL; + + if (scanstate->ss.partitions != NIL) { + /* construct a dummy table relation with the next table partition for scan */ + partition = (Partition)list_nth(scanstate->ss.partitions, 0); + partitiontrel = partitionGetRelation(currentRelation, partition); + scanstate->ss.ss_currentPartition = partitiontrel; + + /* + * Verify if a DDL operation that froze all tuples in the relation + * occured after taking the snapshot. Skip for explain only commands. + */ + if (isUstoreRel && !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) { + TransactionId relfrozenxid64 = InvalidTransactionId; + getPartitionRelxids(partitiontrel, &relfrozenxid64); + if (TransactionIdPrecedes(FirstNormalTransactionId, scanSnap->xmax) && + !TransactionIdIsCurrentTransactionId(relfrozenxid64) && + TransactionIdPrecedes(scanSnap->xmax, relfrozenxid64)) { + ereport(ERROR, (errcode(ERRCODE_SNAPSHOT_INVALID), + (errmsg("Snapshot too old, BitmapHeapScan is PartTbl, the info: snapxmax is %lu, " + "snapxmin is %lu, csn is %lu, relfrozenxid64 is %lu, globalRecycleXid is %lu.", + scanSnap->xmax, scanSnap->xmin, scanSnap->snapshotcsn, relfrozenxid64, + g_instance.undo_cxt.globalRecycleXid)))); + } + } + + if (IS_SPQ_EXECUTOR && scanstate->ss.ps.plan->spq_scan_partial){ + scanstate->ss.ss_currentScanDesc = add_spq_scan(scanstate, scan_handler_tbl_beginscan_bm( + partitiontrel, scanSnap, 0, NULL, &scanstate->ss)); + } + else + scanstate->ss.ss_currentScanDesc = + scan_handler_tbl_beginscan_bm(partitiontrel, scanSnap, 0, NULL, &scanstate->ss); + + + } + } + } else { + if (!isUstoreRel) { + if (IS_SPQ_EXECUTOR && scanstate->ss.ps.plan->spq_scan_partial) { + scanstate->ss.ss_currentScanDesc = add_spq_scan(scanstate, scan_handler_tbl_beginscan_bm( + currentRelation, scanSnap, 0, NULL, &scanstate->ss)); + } + else + scanstate->ss.ss_currentScanDesc = + scan_handler_tbl_beginscan_bm(currentRelation, scanSnap, 0, NULL, &scanstate->ss); + } else { + /* + * Verify if a DDL operation that froze all tuples in the relation + * occured after taking the snapshot. Skip for explain only commands. + */ + if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY)) { + TransactionId relfrozenxid64 = InvalidTransactionId; + getRelationRelxids(currentRelation, &relfrozenxid64); + if (TransactionIdPrecedes(FirstNormalTransactionId, scanSnap->xmax) && + !TransactionIdIsCurrentTransactionId(relfrozenxid64) && + TransactionIdPrecedes(scanSnap->xmax, relfrozenxid64)) { + ereport(ERROR, (errcode(ERRCODE_SNAPSHOT_INVALID), + (errmsg("Snapshot too old, BitmapHeapScan is not PartTbl, the info: snapxmax is %lu, " + "snapxmin is %lu, csn is %lu, relfrozenxid64 is %lu, globalRecycleXid is %lu.", + scanSnap->xmax, scanSnap->xmin, scanSnap->snapshotcsn, relfrozenxid64, + g_instance.undo_cxt.globalRecycleXid)))); + } + } + + scanstate->ss.ss_currentScanDesc = UHeapBeginScan(currentRelation, scanSnap, 0, NULL); + } + } + if (scanstate->ss.ss_currentScanDesc == NULL) { + scanstate->ss.ps.stubType = PST_Scan; + } + + /* + * get the scan type from the relation descriptor. + */ + ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation)); + + /* + * Initialize result tuple type and projection info. + */ + ExecAssignResultTypeFromTL( + &scanstate->ss.ps, + scanstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->td_tam_ops); + + ExecAssignScanProjectionInfo(&scanstate->ss); + + Assert(scanstate->ss.ps.ps_ResultTupleSlot->tts_tupleDescriptor->td_tam_ops); + + /* + * initialize child nodes + * + * We do this last because the child nodes will open indexscans on our + * relation's indexes, and we want to be sure we have acquired a lock on + * the relation first. + */ + outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); + + /* + * all done. + */ + return scanstate; +} + +void init_spqbitmapheapscan_hook() +{ + backup_init_spqbitmapheapscan_hook = init_bitmapheapscan_hook; + backup_exec_spqbitmapheapscan_hook = exec_bitmapheapscan_hook; + init_bitmapheapscan_hook = ExecInitSpqBitmapHeapScan; + exec_bitmapheapscan_hook = ExecBitmapHeapScan; +} + +void restore_spqbitmapheapscan_hook() +{ + init_bitmapheapscan_hook = backup_init_spqbitmapheapscan_hook; + exec_bitmapheapscan_hook = backup_exec_spqbitmapheapscan_hook; +} diff --git a/contrib/spq_plugin/src/executor/spq_indexonlyscan.cpp b/contrib/spq_plugin/src/executor/spq_indexonlyscan.cpp new file mode 100644 index 000000000..0ded86a5d --- /dev/null +++ b/contrib/spq_plugin/src/executor/spq_indexonlyscan.cpp @@ -0,0 +1,292 @@ +/* ------------------------------------------------------------------------- + * + * spq_indexonlyscan.cpp + * Routines to support indexed scans of relations + * + * Portions Copyright (c) 2023 Huawei Technologies Co.,Ltd. + * + * + * IDENTIFICATION + * spq_indexonlyscan.cpp + * + * ------------------------------------------------------------------------- + * + * INTERFACE ROUTINES + * ExecSpqIndexOnlyScan scans a relation using an index + * SpqIndexOnlyNext retrieve next tuple using index + */ +#include "postgres.h" +#include "knl/knl_variable.h" +#include "executor/executor.h" +#include "executor/node/nodeIndexonlyscan.h" +#include "executor/spq_indexonlyscan.h" +#include "executor/spq_seqscan.h" +#include "access/tableam.h" +#include "access/visibilitymap.h" +#include "storage/predicate.h" + +THR_LOCAL init_spqindexonlyscan_hook_type backup_init_spqindexonlyscan_hook = NULL; +THR_LOCAL exec_spqindexonlyscan_hook_type backup_exec_spqindexonlyscan_hook = NULL; + +static inline void ReleaseNodeVMBuffer(IndexOnlyScanState* node) +{ + if (node != NULL && (node->ioss_VMBuffer != InvalidBuffer)) { + ReleaseBuffer(node->ioss_VMBuffer); + node->ioss_VMBuffer = InvalidBuffer; + } +} + +inline bool ExecGPIGetNextPartRelation(IndexOnlyScanState* node, IndexScanDesc indexScan) +{ + if (IndexScanNeedSwitchPartRel(indexScan)) { + /* Release VM buffer pin, if any. */ + ReleaseNodeVMBuffer(node); + /* Change the heapRelation in indexScanDesc to Partition Relation of current index */ + if (!GPIGetNextPartRelation(indexScan->xs_gpi_scan, CurrentMemoryContext, AccessShareLock)) { + return false; + } + indexScan->heapRelation = indexScan->xs_gpi_scan->fakePartRelation; + } + + return true; +} + +bool IndexOnlyRecheck(IndexOnlyScanState* node, TupleTableSlot* slot) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("EvalPlanQual recheck is not supported in index-only scans"))); + return false; /* keep compiler quiet */ +} + +/* ---------------------------------------------------------------- + * SpqIndexOnlyNext + * + * Retrieve a tuple from the IndexOnlyScan node's index. + * ---------------------------------------------------------------- + */ +TupleTableSlot* SpqIndexOnlyNext(IndexOnlyScanState* node) +{ + EState* estate = NULL; + ExprContext* econtext = NULL; + ScanDirection direction; + IndexScanDesc scandesc; + TupleTableSlot* slot = NULL; + TupleTableSlot* tmpslot = NULL; + ItemPointer tid; + bool isVersionScan = node->ss.isVersionScan; + bool isUHeap = false; + + /* + * extract necessary information from index scan node + */ + estate = node->ss.ps.state; + direction = estate->es_direction; + /* flip direction if this is an overall backward scan */ + if (ScanDirectionIsBackward(((IndexOnlyScan*)node->ss.ps.plan)->indexorderdir)) { + if (ScanDirectionIsForward(direction)) + direction = BackwardScanDirection; + else if (ScanDirectionIsBackward(direction)) + direction = ForwardScanDirection; + } + scandesc = node->ioss_ScanDesc; + econtext = node->ss.ps.ps_ExprContext; + slot = node->ss.ss_ScanTupleSlot; + + if (IS_SPQ_EXECUTOR && scandesc->spq_scan == NULL && node->ss.ps.plan->spq_scan_partial) { + SPQScanDesc spq_scan = NULL; + spq_scan = (SPQScanDescData*)palloc0(sizeof(SPQScanDescData)); + GetInstanceIDAndSliceNumber(estate->es_plannedstmt, ((Plan *)node->ss.ps.plan)->dop, + spq_scan->slice_num, + spq_scan->instance_id); + scandesc->spq_scan = spq_scan; + } + + isUHeap = RelationIsUstoreFormat(node->ss.ss_currentRelation); + if (isUHeap) { + tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation), + false, scandesc->heapRelation->rd_tam_ops); + } + /* + * OK, now that we have what we need, fetch the next tuple. + */ + while ((tid = scan_handler_idx_getnext_tid(scandesc, direction)) != NULL) { + HeapTuple tuple = NULL; + IndexScanDesc indexScan = GetIndexScanDesc(scandesc); + + CHECK_FOR_INTERRUPTS(); + + /* + * We can skip the heap fetch if the TID references a heap page on + * which all tuples are known visible to everybody. In any case, + * we'll use the index tuple not the heap tuple as the data source. + * + * Note on Memory Ordering Effects: visibilitymap_test does not lock + * the visibility map buffer, and therefore the result we read here + * could be slightly stale. However, it can't be stale enough to + * matter. It suffices to show that (1) there is a read barrier + * between the time we read the index TID and the time we test the + * visibility map; and (2) there is a write barrier between the time + * some other concurrent process clears the visibility map bit and the + * time it inserts the index TID. Since acquiring or releasing a + * LWLock interposes a full barrier, this is easy to show: (1) is + * satisfied by the release of the index buffer content lock after + * reading the TID; and (2) is satisfied by the acquisition of the + * buffer content lock in order to insert the TID. + */ + if (!ExecGPIGetNextPartRelation(node, indexScan)) { + continue; + } + if (!ExecCBIFixHBktRel(scandesc, &node->ioss_VMBuffer)) { + continue; + } + + if (isUHeap) { + /* ustore with multi-version ubtree only recheck IndexTuple when xs_recheck_itup is set */ + if (indexScan->xs_recheck_itup) { + node->ioss_HeapFetches++; + if (!IndexFetchUHeap(indexScan, tmpslot, &node->ss.ps.state->have_current_xact_date)) { + continue; /* this TID indicate no visible tuple */ + } + if (!RecheckIndexTuple(indexScan, tmpslot)) { + continue; /* the visible version not match the IndexTuple */ + } + } + } else if (isVersionScan || + !visibilitymap_test(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), &node->ioss_VMBuffer)) { + /* IMPORTANT: We ALWAYS visit the heap to check visibility in VERSION SCAN. */ + /* + * Rats, we have to visit the heap to check visibility. + */ + node->ioss_HeapFetches++; + if (!IndexFetchSlot(indexScan, slot, isUHeap, &node->ss.ps.state->have_current_xact_date)) { +#ifdef DEBUG_INPLACE + /* Now ustore does not support hash bucket table */ + Assert(indexScan == scandesc); + /* Record whether the invisible heap tuple is all dead or not */ + if (indexScan->kill_prior_tuple) + INPLACEHEAPSTAT_COUNT_INDEX_FETCH_TUPLE(INPLACEHEAP_TUPLE_INVISIBLE_ALL_DEAD); + else + INPLACEHEAPSTAT_COUNT_INDEX_FETCH_TUPLE(INPLACEHEAP_TUPLE_INVISIBLE_NOT_ALL_DEAD); +#endif + continue; /* no visible tuple, try next index entry */ + } + +#ifdef DEBUG_INPLACE + Assert(indexScan == scandesc); + Assert(!indexScan->kill_prior_tuple); + /* Record Heap Tuple is visible */ + INPLACEHEAPSTAT_COUNT_INDEX_FETCH_TUPLE(INPLACEHEAP_TUPLE_VISIBLE); +#endif + + + /* + * Only MVCC snapshots are supported here, so there should be no + * need to keep following the HOT chain once a visible entry has + * been found. If we did want to allow that, we'd need to keep + * more state to remember not to call index_getnext_tid next time. + */ + if (indexScan->xs_continue_hot) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("non-MVCC snapshots are not supported in index-only scans"))); + + /* + * Note: at this point we are holding a pin on the heap page, as + * recorded in scandesc->xs_cbuf. We could release that pin now, + * but it's not clear whether it's a win to do so. The next index + * entry might require a visit to the same heap page. + */ + } + + /* + * Fill the scan tuple slot with data from the index. + */ + StoreIndexTuple(slot, indexScan->xs_itup, indexScan->xs_itupdesc); + + /* + * If the index was lossy, we have to recheck the index quals. + * (Currently, this can never happen, but we should support the case + * for possible future use, eg with GiST indexes.) + */ + if (indexScan->xs_recheck) { + econtext->ecxt_scantuple = slot; + ResetExprContext(econtext); + if (!ExecQual(node->indexqual, econtext, false)) { + /* Fails recheck, so drop it and loop back for another */ + InstrCountFiltered2(node, 1); + continue; + } + } + + /* + * Predicate locks for index-only scans must be acquired at the page + * level when the heap is not accessed, since tuple-level predicate + * locks need the tuple's xmin value. If we had to visit the tuple + * anyway, then we already have the tuple-level lock and can skip the + * page lock. + */ + if (tuple == NULL) + PredicateLockPage(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), estate->es_snapshot); + if (isUHeap) { + ExecDropSingleTupleTableSlot(tmpslot); + } + return slot; + } + + /* + * if we get here it means the index scan failed so we are at the end of + * the scan.. + */ + if (isUHeap) { + ExecDropSingleTupleTableSlot(tmpslot); + } + return ExecClearTuple(slot); +} + +/* ---------------------------------------------------------------- + * ExecIndexScan(node) + * ---------------------------------------------------------------- + */ +TupleTableSlot* ExecSpqIndexOnlyScan(PlanState* state) +{ + IndexOnlyScanState* node = castNode(IndexOnlyScanState, state); + /* + * If we have runtime keys and they've not already been set up, do it now. + */ + if (node->ioss_NumRuntimeKeys != 0 && !node->ioss_RuntimeKeysReady) { + if (node->ss.isPartTbl) { + if (PointerIsValid(node->ss.partitions)) { + node->ss.ss_ReScan = true; + + ExecReScan((PlanState*)node); + } + } else { + ExecReScan((PlanState*)node); + } + } + + return ExecScan(&node->ss, (ExecScanAccessMtd)SpqIndexOnlyNext, (ExecScanRecheckMtd)IndexOnlyRecheck); +} + + +IndexOnlyScanState* ExecInitSpqIndexOnlyScan(SpqIndexOnlyScan* node, EState* estate, int eflags) +{ + IndexOnlyScanState* indexScan = ExecInitIndexOnlyScan((IndexOnlyScan*)node, estate, eflags); + indexScan->ss.ps.type = T_IndexOnlyScanState; + indexScan->ss.ps.ExecProcNode = ExecSpqIndexOnlyScan; + return indexScan; +} + +void init_spqindexonlyscan_hook() +{ + backup_init_spqindexonlyscan_hook = init_indexonlyscan_hook; + backup_exec_spqindexonlyscan_hook = exec_indexonlyscan_hook; + init_indexonlyscan_hook = ExecInitSpqIndexOnlyScan; + exec_indexonlyscan_hook = ExecSpqIndexOnlyScan; +} + +void restore_spqindexonlyscan_hook() +{ + init_indexonlyscan_hook = backup_init_spqindexonlyscan_hook; + exec_indexonlyscan_hook = backup_exec_spqindexonlyscan_hook; +} diff --git a/contrib/spq_plugin/src/executor/spq_indexscan.cpp b/contrib/spq_plugin/src/executor/spq_indexscan.cpp new file mode 100644 index 000000000..a71aff31d --- /dev/null +++ b/contrib/spq_plugin/src/executor/spq_indexscan.cpp @@ -0,0 +1,195 @@ +/* ------------------------------------------------------------------------- + * + * spq_indexscan.cpp + * Routines to support indexed scans of relations + * + * Portions Copyright (c) 2023 Huawei Technologies Co.,Ltd. + * + * + * IDENTIFICATION + * spq_indexscan.cpp + * + * ------------------------------------------------------------------------- + * + * INTERFACE ROUTINES + * ExecSpqIndexScan scans a relation using an index + * SpqIndexNext retrieve next tuple using index + */ +#include "postgres.h" +#include "knl/knl_variable.h" +#include "executor/executor.h" +#include "executor/node/nodeIndexscan.h" +#include "executor/spq_indexscan.h" +#include "executor/spq_seqscan.h" +#include "access/tableam.h" + +THR_LOCAL init_spqindexscan_hook_type backup_init_spqindexscan_hook = NULL; +THR_LOCAL exec_spqindexscan_hook_type backup_exec_spqindexscan_hook = NULL; + +bool IndexRecheck(IndexScanState* node, TupleTableSlot* slot) +{ + ExprContext* econtext = NULL; + + /* + * extract necessary information from index scan node + */ + econtext = node->ss.ps.ps_ExprContext; + + /* Does the tuple meet the indexqual condition? */ + econtext->ecxt_scantuple = slot; + + ResetExprContext(econtext); + + return ExecQual(node->indexqualorig, econtext, false); +} + +/* ---------------------------------------------------------------- + * SpqIndexNext + * + * Retrieve a tuple from the IndexScan node's current_relation + * using the index specified in the IndexScanState information. + * ---------------------------------------------------------------- + */ + +TupleTableSlot* SpqIndexNext(IndexScanState* node) +{ + EState* estate = NULL; + ExprContext* econtext = NULL; + ScanDirection direction; + IndexScanDesc scandesc; + HeapTuple tuple; + TupleTableSlot* slot = NULL; + bool isUstore = false; + + /* + * extract necessary information from index scan node + */ + estate = node->ss.ps.state; + direction = estate->es_direction; + /* flip direction if this is an overall backward scan */ + if (ScanDirectionIsBackward(((IndexScan*)node->ss.ps.plan)->indexorderdir)) { + if (ScanDirectionIsForward(direction)) + direction = BackwardScanDirection; + else if (ScanDirectionIsBackward(direction)) + direction = ForwardScanDirection; + } + scandesc = node->iss_ScanDesc; + econtext = node->ss.ps.ps_ExprContext; + slot = node->ss.ss_ScanTupleSlot; + + if (IS_SPQ_EXECUTOR && scandesc->spq_scan == NULL && node->ss.ps.plan->spq_scan_partial) { + SPQScanDesc spq_scan = NULL; + spq_scan = (SPQScanDescData*)palloc0(sizeof(SPQScanDescData)); + GetInstanceIDAndSliceNumber(estate->es_plannedstmt, ((Plan *)node->ss.ps.plan)->dop, + spq_scan->slice_num, + spq_scan->instance_id); + scandesc->spq_scan = spq_scan; + } + + isUstore = RelationIsUstoreFormat(node->ss.ss_currentRelation); + + /* + * ok, now that we have what we need, fetch the next tuple. + */ + // we should change abs_idx_getnext to call IdxScanAm(scan)->idx_getnext and channge .idx_getnext in g_HeapIdxAm to + // IndexGetnextSlot + while (true) { + CHECK_FOR_INTERRUPTS(); + + IndexScanDesc indexScan = GetIndexScanDesc(scandesc); + if (isUstore) { + if (!IndexGetnextSlot(scandesc, direction, slot, &node->ss.ps.state->have_current_xact_date)) { + break; + } + } else { + if ((tuple = scan_handler_idx_getnext(scandesc, direction, InvalidOid, InvalidBktId, + &node->ss.ps.state->have_current_xact_date)) == NULL) { + break; + } + /* Update indexScan, because hashbucket may switch current index in scan_handler_idx_getnext */ + indexScan = GetIndexScanDesc(scandesc); + /* + * Store the scanned tuple in the scan tuple slot of the scan state. + * Note: we pass 'false' because tuples returned by amgetnext are + * pointers onto disk pages and must not be pfree_ext()'d. + */ + (void)ExecStoreTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + indexScan->xs_cbuf, /* buffer containing tuple */ + false); /* don't pfree */ + } + + /* + * If the index was lossy, we have to recheck the index quals using + * the fetched tuple. + */ + if (indexScan->xs_recheck) { + econtext->ecxt_scantuple = slot; + ResetExprContext(econtext); + if (!ExecQual(node->indexqualorig, econtext, false)) { + /* Fails recheck, so drop it and loop back for another */ + InstrCountFiltered2(node, 1); + continue; + } + } + + return slot; + } + + /* + * if we get here it means the index scan failed so we are at the end of + * the scan.. + */ + return ExecClearTuple(slot); +} + +/* ---------------------------------------------------------------- + * ExecIndexScan(node) + * ---------------------------------------------------------------- + */ +TupleTableSlot* ExecSpqIndexScan(PlanState* state) +{ + IndexScanState* node = castNode(IndexScanState, state); + /* + * If we have runtime keys and they've not already been set up, do it now. + */ + if (node->iss_NumRuntimeKeys != 0 && (!node->iss_RuntimeKeysReady || (u_sess->parser_cxt.has_set_uservar && DB_IS_CMPT(B_FORMAT)))) { + /* + * set a flag for partitioned table, so we can deal with it specially + * when we rescan the partitioned table + */ + if (node->ss.isPartTbl) { + if (PointerIsValid(node->ss.partitions)) { + node->ss.ss_ReScan = true; + ExecReScan((PlanState*)node); + } + } else { + ExecReScan((PlanState*)node); + } + } + + return ExecScan(&node->ss, (ExecScanAccessMtd)SpqIndexNext, (ExecScanRecheckMtd)IndexRecheck); +} + + +IndexScanState* ExecInitSpqIndexScan(SpqIndexScan* node, EState* estate, int eflags) +{ + IndexScanState* indexScan = ExecInitIndexScan((IndexScan*)node, estate, eflags); + indexScan->ss.ps.type = T_IndexScanState; + indexScan->ss.ps.ExecProcNode = ExecSpqIndexScan; + return indexScan; +} + +void init_spqindexscan_hook() +{ + backup_init_spqindexscan_hook = init_indexscan_hook; + backup_exec_spqindexscan_hook = exec_indexscan_hook; + init_indexscan_hook = ExecInitSpqIndexScan; + exec_indexscan_hook = ExecSpqIndexScan; +} + +void restore_spqindexscan_hook() +{ + init_indexscan_hook = backup_init_spqindexscan_hook; + exec_indexscan_hook = backup_exec_spqindexscan_hook; +} diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index d8b63a429..6197a8c6a 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1060,6 +1060,16 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_debug_slice_print", + "Prints slice detail information.", + NULL, + &u_sess->attr.attr_spq.spq_debug_slice_print, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); DefineCustomBoolVariable("spqplugin.spq_print_direct_dispatch_info", "For testing purposes, print information about direct dispatch decisions.", NULL, @@ -1130,6 +1140,16 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_left_index_nestloop_join", + "Enable left index nestloop join.", + NULL, + &u_sess->attr.attr_spq.spq_enable_left_index_nestloop_join, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesInt() @@ -1426,6 +1446,18 @@ static void InitSpqConfigureNamesReal() NULL, NULL, NULL); + DefineCustomRealVariable("spqplugin.spq_optimizer_share_indexscan_factor", + "Set the share indexscan cost factor in the optimizer, 1.0 means same as default, > 1.0 means more costly than default, < 1.0 means means less costly than default", + NULL, + &u_sess->attr.attr_spq.spq_optimizer_share_indexscan_factor, + 1.0, + 0.0, + DBL_MAX, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesString() diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index e8cbe9f30..4936e7371 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -28,6 +28,8 @@ #include "spq/spq_util.h" #include "parser/parsetree.h" +Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedStmt *result); + /* * Is the node a "subclass" of Plan? */ @@ -634,9 +636,29 @@ int exec_by_multiple_dop(PlannerInfo* root, Plan *spqplan) heap_close(relation, NoLock); return spqplan->dop; } - -Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion) +bool check_slice_dop(PlanSlice *slices, Plan *subplan, PlannedStmt *result) { + int producerDop = slices->numsegments == 1 ? 1 : u_sess->opt_cxt.query_dop; + if (producerDop != subplan->dop) { + ereport(LOG,(errmsg("check_slice_dop fail, slice info:"))); + for (int i = 0; i < result->numSlices; i++) { + PlanSlice *slices = &(result->slices[i]); + ereport(LOG, (errmsg("Index[%d] pIndex[%d] type[%d] segments[%d] worker_idx[%d]", + slices->sliceIndex, slices->parentIndex, slices->gangType, slices->numsegments, slices->worker_idx))); + } + } + return producerDop == subplan->dop; +} +Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedStmt *result) +{ + PlanSlice *slices = &(result->slices[motion->motionID]); + PlanSlice *parentSlices = &(result->slices[slices->parentIndex]); + if (check_slice_dop(slices, subplan, result) == false) { + ereport(ERROR, (errmsg("check_slice_dop in remote check fail motion[%d]", motion->motionID))); + } + if (parentSlices->numsegments == 1) { + return make_gather_stream(root, subplan, motion, result); + } Stream* stream = makeNode(Stream); Plan* plan = &stream->scan.plan; Distribution* distribution = ng_get_dest_distribution(subplan); @@ -654,7 +676,7 @@ Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion) // set by redistribute_keys? stream->smpDesc.producerDop = exec_by_multiple_dop(root, subplan); - stream->smpDesc.consumerDop = u_sess->opt_cxt.query_dop; + stream->smpDesc.consumerDop = parentSlices->numsegments > 1 ? u_sess->opt_cxt.query_dop : 1;; plan->dop = stream->smpDesc.consumerDop; @@ -733,8 +755,13 @@ Plan* create_spq_local_gather(PlannerInfo* root, Plan* plan, Motion *motion) stream_node->streamID = motion->motionID; return stream_plan; } -Plan* make_gather_Remote(PlannerInfo* root, Plan *lefttree, Motion *motion) { +Plan* make_gather_Remote(PlannerInfo* root, Plan *lefttree, Motion *motion, PlannedStmt *result) { + + PlanSlice *slices = &(result->slices[motion->motionID]); + if (check_slice_dop(slices, lefttree, result) == false) { + ereport(ERROR, (errmsg("check_slice_dop in remote check fail motion[%d]", motion->motionID))); + } if (lefttree->dop > 1) { lefttree = create_spq_local_gather(root, lefttree, motion); } @@ -751,13 +778,13 @@ Plan* make_gather_Remote(PlannerInfo* root, Plan *lefttree, Motion *motion) { remote_query->scan.plan.exec_type = EXEC_ON_COORDS; remote_query->is_simple = true; remote_query->rq_need_proj = false; - //int num = 2; // QDsize - //double size = PLAN_LOCAL_ROWS(lefttree) * Max(lefttree->plan_width, 128) / 8192.0; copy_plan_costsize(&remote_query->scan.plan, &motion->plan); remote_query->scan.plan.plan_width = lefttree->plan_width; remote_query->sort = NULL; remote_query->streamID = motion->motionID; remote_query->scan.plan.dop = 1; + + remote_query->nodeCount = slices->numsegments > 1 ? t_thrd.spq_ctx.num_nodes : 1; if (motion->sendSorted) { return make_sort(motion, (Plan*)remote_query); } @@ -765,16 +792,20 @@ Plan* make_gather_Remote(PlannerInfo* root, Plan *lefttree, Motion *motion) { return (Plan*)remote_query; } -Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion) { - /* Set stream struct parameter. */ - //double size = (PLAN_LOCAL_ROWS(subplan)) * (subplan->plan_width) / 8192.0; +Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedStmt *result) +{ + PlanSlice *slices = &(result->slices[motion->motionID]); + PlanSlice *parentSlices = &(result->slices[slices->parentIndex]); + if (check_slice_dop(slices, subplan, result) == false) { + ereport(ERROR, (errmsg("check_slice_dop in remote check fail motion[%d]", motion->motionID))); + } Stream *stream_node = makeNode(Stream); stream_node->type = STREAM_GATHER; stream_node->consumer_nodes = (ExecNodes *)copyObject(subplan->exec_nodes); stream_node->is_sorted = false; stream_node->is_dummy = false; stream_node->sort = NULL; - stream_node->smpDesc.consumerDop = u_sess->opt_cxt.query_dop; + stream_node->smpDesc.consumerDop = parentSlices->numsegments > 1 ? u_sess->opt_cxt.query_dop : 1; stream_node->smpDesc.producerDop = exec_by_multiple_dop(root, subplan); /* plan->dop */ stream_node->smpDesc.distriType = REMOTE_DIRECT_DISTRIBUTE; stream_node->distribute_keys = NIL; @@ -796,9 +827,43 @@ Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion) { } return (Plan*)stream_node; } - +Plan* tran_motion_to_stream(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, bool &top) +{ + bool backtop = top; + top = false; + Motion *motion = (Motion *)plan; + Assert(!motion->plan.righttree); + int backIndex = cxt->curentIndex; + if (motion->motionID >= cxt->result->numSlices) { + ereport(ERROR, (errmsg("MotionID check fail id[%d] numslice[%d]", motion->motionID, cxt->result->numSlices))); + } + cxt->curentIndex = motion->motionID; + Plan *subplan = replace_motion_stream_recurse(root, cxt, motion->plan.lefttree, top); + cxt->curentIndex = backIndex; + + if (u_sess->attr.attr_spq.spq_debug_slice_print && motion->motionID < cxt->result->numSlices) { + PlanSlice *slices = &(cxt->result->slices[motion->motionID]); + PlanSlice *parentSlices = &(cxt->result->slices[slices->parentIndex]); + ereport(LOG,(errmsg("[MotionInfo] motionID[%d] motiontype[%d] recvcout[%d] sendcount[%d]", + motion->motionID, motion->motionType, parentSlices->numsegments, slices->numsegments ))); + ereport(LOG,(errmsg("[SliceInfo] sliceIndex[%d] slicetype[%d] worker_idx[%d] parentIndex[%d]", + slices->sliceIndex, slices->gangType, slices->worker_idx, slices->parentIndex))); + } + // no need check motion->motionID again in below func; + if (motion->motionType == MOTIONTYPE_GATHER) { + if (backtop) { + top = backtop; + return make_gather_Remote(root, subplan, motion, cxt->result); + } + Plan *gather_stream = make_gather_stream(root, subplan, motion, cxt->result); + top = backtop; + return gather_stream; + } else { + return make_stream(root, subplan, motion, cxt->result); + } +} //TODO SPQ need fix: dops and multiple gather -Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) +Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *cxt, Plan *plan, bool &top) { ListCell* lc = NULL; @@ -817,7 +882,7 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) if (IsA(node, SubPlan)) { subplan = (SubPlan*)lfirst(lc); initNode = (Plan*)list_nth(subplans, subplan->plan_id - 1); - lfirst(lc) = replace_motion_stream_recurse(root, initNode, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, initNode, top); } } list_free_ext(subplan_list); @@ -827,14 +892,14 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) List* initplans = plan->initPlan; foreach (lc, initplans) { Plan* initplan = (Plan*)lfirst(lc); - lfirst(lc) = replace_motion_stream_recurse(root, initplan, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, initplan, top); } if (IsA(plan, Append)) { Append* node = (Append*)plan; foreach(lc, node->appendplans) { Plan* initNode = (Plan*)lfirst(lc); - lfirst(lc) = replace_motion_stream_recurse(root, initNode, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, initNode, top); } } @@ -842,37 +907,26 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) Sequence* node = (Sequence*)plan; foreach(lc, node->subplans) { Plan* subplan = (Plan*)lfirst(lc); - lfirst(lc) = replace_motion_stream_recurse(root, subplan, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, subplan, top); } } if (IsA(plan, Motion)) { - bool backtop = top; - top = false; - Motion *motion = (Motion *)plan; - Assert(!motion->plan.righttree); - Plan *subplan = replace_motion_stream_recurse(root, motion->plan.lefttree, top); - if (motion->motionType == MOTIONTYPE_GATHER) { - if (backtop) { - top = backtop; - return make_gather_Remote(root, subplan, motion); - } - Plan *gather_stream = make_gather_stream(root, subplan, motion); - top = backtop; - return gather_stream; - } else { - return make_stream(root, subplan, motion); - } + return tran_motion_to_stream(root, cxt, plan, top); } else { if (plan->lefttree) { - plan->lefttree = replace_motion_stream_recurse(root, plan->lefttree, top); + plan->lefttree = replace_motion_stream_recurse(root, cxt, plan->lefttree, top); plan->dop = plan->lefttree->dop; } if (plan->righttree) { - plan->righttree = replace_motion_stream_recurse(root, plan->righttree, top); + plan->righttree = replace_motion_stream_recurse(root, cxt, plan->righttree, top); } if (plan->lefttree == nullptr && plan->righttree == nullptr) { - plan->dop = u_sess->opt_cxt.query_dop; + if (cxt->curentIndex >= cxt->result->numSlices) { + ereport(ERROR, (errmsg("curentIndex check fail curentIndex[%d] numslice[%d]", cxt->curentIndex, cxt->result->numSlices))); + } + PlanSlice *slices = &(cxt->result->slices[cxt->curentIndex]); + plan->dop = slices->numsegments > 1 ? u_sess->opt_cxt.query_dop : 1; } plan->parallel_enabled = (plan->dop > 1); return plan; @@ -896,7 +950,17 @@ static void InitRemoteNodeDefinition(PlannedStmt* planstmt) void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal *glob) { bool top = true; - result->planTree = replace_motion_stream_recurse(root, result->planTree, top); + if (u_sess->attr.attr_spq.spq_debug_slice_print) { + for (int i = 0; i < result->numSlices; i++) { + PlanSlice *slices = &(result->slices[i]); + ereport(LOG, (errmsg("Index[%d] pIndex[%d] type[%d] segments[%d] worker_idx[%d]", + slices->sliceIndex, slices->parentIndex, slices->gangType, slices->numsegments, slices->worker_idx))); + } + } + SpqSliceContext sliceCxt; + sliceCxt.result = result; + sliceCxt.curentIndex = 0; + result->planTree = replace_motion_stream_recurse(root, &sliceCxt, result->planTree, top); // should fix all? //result->planTree = set_plan_references(root, result->planTree); int parent_node_id = INITIAL_PARENT_NODE_ID; /* beginning with INITIAL_PARENT_NODE_ID */ diff --git a/contrib/spq_plugin/src/spq/spq_plan.cpp b/contrib/spq_plugin/src/spq/spq_plan.cpp index dbd3fec12..56fc581c1 100644 --- a/contrib/spq_plugin/src/spq/spq_plan.cpp +++ b/contrib/spq_plugin/src/spq/spq_plan.cpp @@ -330,7 +330,24 @@ Node *plan_tree_mutator(Node *node, Node *(*mutator)(Node *, void *), void *cont // return (Node *) newDynamicSeqScan; // } // break; + case T_SpqIndexScan: { + SpqIndexScan *spqindexscan = (SpqIndexScan *)node; + SpqIndexScan *newspqindexscan; + FLATCOPY(newspqindexscan, spqindexscan, SpqIndexScan); + SCANMUTATE(newspqindexscan, spqindexscan); + return (Node *)newspqindexscan; + break; + } + case T_SpqIndexOnlyScan: { + SpqIndexOnlyScan *spqindexonlyscan = (SpqIndexOnlyScan *)node; + SpqIndexOnlyScan *newspqindexonlyscan; + + FLATCOPY(newspqindexonlyscan, spqindexonlyscan, SpqIndexOnlyScan); + SCANMUTATE(newspqindexonlyscan, spqindexonlyscan); + return (Node *)newspqindexonlyscan; + break; + } case T_IndexScan: // case T_DynamicIndexScan: { @@ -402,8 +419,18 @@ Node *plan_tree_mutator(Node *node, Node *(*mutator)(Node *, void *), void *cont } break; + case T_SpqBitmapHeapScan: + { + SpqBitmapHeapScan *spqbitmapheapscan = (SpqBitmapHeapScan *)node; + SpqBitmapHeapScan *newspqbitmapheapscan; + + FLATCOPY(newspqbitmapheapscan, spqbitmapheapscan, SpqBitmapHeapScan); + SCANMUTATE(newspqbitmapheapscan, spqbitmapheapscan); + return (Node *)newspqbitmapheapscan; + } + break; + case T_BitmapHeapScan: - // case T_DynamicBitmapHeapScan: { BitmapHeapScan *bmheapscan = (BitmapHeapScan *)node; BitmapHeapScan *newbmheapscan; diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/traceflags/traceflags.h b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/traceflags/traceflags.h index c39790481..2644f016a 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/traceflags/traceflags.h +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/include/naucrates/traceflags/traceflags.h @@ -228,8 +228,11 @@ enum EOptTraceFlag // Discard HashJoin with RedistributeMotion nodes EopttraceDiscardRedistributeHashJoin = 103044, - // Do not keep an order-by - EopttraceRemoveSuperfluousOrder = 103045, + // Do not keep an order-by + EopttraceRemoveSuperfluousOrder = 103045, + + // enable left index nestloop join + EopttraceEnableLeftIndexNLJoin = 104040, /////////////////////////////////////////////////////// ///////////////////// statistics flags //////////////// diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalLeftOuterIndexNLJoin.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalLeftOuterIndexNLJoin.cpp index d135e1228..874a324b3 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalLeftOuterIndexNLJoin.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalLeftOuterIndexNLJoin.cpp @@ -101,6 +101,17 @@ CPhysicalLeftOuterIndexNLJoin::Ped(CMemoryPool *mp, CExpressionHandle &exprhdl, CEnfdDistribution::EDistributionMatching dmatch = Edm(prppInput, child_index, pdrspqdpCtxt, ulOptReq); + + if (SPQOS_FTRACE(EopttraceEnableLeftIndexNLJoin)) + { + if (1 == child_index) + return SPQOS_NEW(mp) CEnfdDistribution(SPQOS_NEW(mp) + CDistributionSpecReplicated(CDistributionSpec::EdtStrictReplicated), dmatch); + else + return SPQOS_NEW(mp) CEnfdDistribution(SPQOS_NEW(mp) + CDistributionSpecAny(this->Eopid(), true /*fAllowOuterRefs*/), dmatch); + } + if (1 == child_index) { // inner (index-scan side) is requested for Any distribution, diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/translate/CTranslatorExprToDXL.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/translate/CTranslatorExprToDXL.cpp index cc515c62f..c1c392699 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/translate/CTranslatorExprToDXL.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/translate/CTranslatorExprToDXL.cpp @@ -1623,19 +1623,20 @@ CTranslatorExprToDXL::PdxlnIndexScanWithInlinedCondition( CPhysicalIndexScan::PopConvert(pexprIndexScan->Pop()); isGist = (indexScan->Pindexdesc()->IndexType() == IMDIndex::EmdindGist); } + /* SPQ: for shareindexscan */ + else if (COperator::EopPhysicalShareIndexScan == op_id) + { + CPhysicalShareIndexScan *indexScan = + CPhysicalShareIndexScan::PopConvert(pexprIndexScan->Pop()); + isGist = (indexScan->Pindexdesc()->IndexType() == IMDIndex::EmdindGist); + } else if (COperator::EopPhysicalIndexOnlyScan != op_id) { CPhysicalDynamicIndexScan *indexScan = CPhysicalDynamicIndexScan::PopConvert(pexprIndexScan->Pop()); isGist = (indexScan->Pindexdesc()->IndexType() == IMDIndex::EmdindGist); } - /* SPQ: for shareindexscan */ - else if (COperator::EopPhysicalShareIndexScan == op_id) - { - CPhysicalShareIndexScan *indexScan = - CPhysicalShareIndexScan::PopConvert(pexprIndexScan->Pop()); - isGist = (indexScan->Pindexdesc()->IndexType() == IMDIndex::EmdindGist); - } + // inline scalar condition in index scan, if it is not the same as index lookup condition // Exception: most GiST indexes require a recheck condition since they are lossy: re-add the lookup diff --git a/contrib/spq_plugin/src/spq_optimizer_util/config/CConfigParamMapping.cpp b/contrib/spq_plugin/src/spq_optimizer_util/config/CConfigParamMapping.cpp index cb2edd933..d236a79b5 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/config/CConfigParamMapping.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/config/CConfigParamMapping.cpp @@ -295,6 +295,10 @@ void CConfigParamMapping::InitConfigParamElements(CBitSet *traceflag_bitset) true, // m_negate_param SPQOS_WSZ_LIT( "Penalize a hash join with a skewed redistribute as a child.")}, + {EopttraceEnableLeftIndexNLJoin, &u_sess->attr.attr_spq.spq_enable_left_index_nestloop_join, + false, // m_negate_param + SPQOS_WSZ_LIT( + "Enable left index nestloop join.")}, {EopttraceTranslateUnusedColrefs, &u_sess->attr.attr_spq.spq_optimizer_prune_unused_columns, true, // m_negate_param SPQOS_WSZ_LIT("Prune unused columns from the query.")}, diff --git a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp index d5b38c398..62800e283 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp @@ -1936,11 +1936,11 @@ spqdb::RelPartIsNone(Oid relid) { SPQ_WRAP_START; { - //spq partition support - //return PART_STATUS_NONE == rel_part_status(relid); + // spq partition support + return spq_relation_not_partitioned(relid); } SPQ_WRAP_END; - return false; + return true; } bool diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CContextDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CContextDXLToPlStmt.cpp index 1c9ecd607..7a0861e0f 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CContextDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CContextDXLToPlStmt.cpp @@ -47,12 +47,14 @@ CContextDXLToPlStmt::CContextDXLToPlStmt( m_plan_id_counter(plan_id_counter), m_motion_id_counter(motion_id_counter), m_param_id_counter(param_id_counter), - m_param_types_list(NULL), /* SPQ: param list */ + m_param_types_list(NULL), /* SPQ: param list */ m_distribution_hashops(distribution_hashops), m_rtable_entries_list(rtable_entries_list), m_partitioned_tables_list(NULL), m_num_partition_selectors_array(NULL), m_subplan_entries_list(subplan_entries_list), + m_subplan_sliceids_list(NULL), + m_slices_list(NULL), m_result_relation_index(0), m_into_clause(NULL), m_distribution_policy(NULL) @@ -336,6 +338,7 @@ CContextDXLToPlStmt::AddSubplan(Plan *plan) { (*(m_subplan_entries_list)) = spqdb::LAppend((*(m_subplan_entries_list)), plan); + m_subplan_sliceids_list = spqdb::LAppendInt(m_subplan_sliceids_list, m_current_slice->sliceIndex); } //--------------------------------------------------------------------------- @@ -470,4 +473,57 @@ CContextDXLToPlStmt::GetDistributionHashFuncForType(Oid typid) return hashproc; } +int +CContextDXLToPlStmt::AddSlice(PlanSlice *slice) +{ + slice->sliceIndex = list_length(m_slices_list); + m_slices_list = spqdb::LAppend(m_slices_list, slice); + + return slice->sliceIndex; +} + +PlanSlice * +CContextDXLToPlStmt::GetSlices(int *numSlices_p) +{ + int numSlices = list_length(m_slices_list); + PlanSlice *sliceArray; + ListCell *lc; + int i; + + sliceArray = (PlanSlice *) spqdb::SPQDBAlloc(numSlices * sizeof(PlanSlice)); + + i = 0; + foreach(lc, m_slices_list) + { + PlanSlice *src = (PlanSlice *) lfirst(lc); + + memcpy(&sliceArray[i], src, sizeof(PlanSlice)); + + i++; + } + + m_current_slice = NULL; + spqdb::ListFreeDeep(m_slices_list); + + *numSlices_p = numSlices; + return sliceArray; +} +int * +CContextDXLToPlStmt::GetSubplanSliceIdArray() +{ + int numSubplans = list_length(*m_subplan_entries_list); + int *sliceIdArray; + ListCell *lc; + int i; + + sliceIdArray = (int *) spqdb::SPQDBAlloc(numSubplans * sizeof(int)); + + i = 0; + foreach(lc, m_subplan_sliceids_list) + { + sliceIdArray[i++] = lfirst_int(lc); + } + + return sliceIdArray; +} // EOF diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index 006bcae0d..82b9ddfbd 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -63,6 +63,7 @@ using namespace spqmd; #define SPQDXL_MOTION_ID_START 1 #define SPQDXL_PARAM_ID_START 0 +#define MASTER_CONTENT_ID (-1) ULONG CTranslatorDXLToPlStmt::m_external_scan_counter = 0; @@ -284,6 +285,22 @@ CTranslatorDXLToPlStmt::GetPlannedStmtFromDXL(const CDXLNode *dxlnode, CDXLTranslateContext dxl_translate_ctxt(m_mp, false); + PlanSlice *topslice; + + topslice = (PlanSlice *) spqdb::SPQDBAlloc(sizeof(PlanSlice)); + memset(topslice, 0, sizeof(PlanSlice)); + topslice->sliceIndex = 0; + topslice->parentIndex = -1; + topslice->gangType = GANGTYPE_UNALLOCATED; + topslice->numsegments = 1; + topslice->worker_idx = -1; + topslice->directDispatch.isDirectDispatch = false; + topslice->directDispatch.contentIds = NIL; + topslice->directDispatch.haveProcessedAnyCalculations = false; + + m_dxl_to_plstmt_context->AddSlice(topslice); + m_dxl_to_plstmt_context->SetCurrentSlice(topslice); + CDXLTranslationContextArray *ctxt_translation_prev_siblings = SPQOS_NEW(m_mp) CDXLTranslationContextArray(m_mp); Plan *plan = TranslateDXLOperatorToPlan(dxlnode, &dxl_translate_ctxt, @@ -331,6 +348,9 @@ CTranslatorDXLToPlStmt::GetPlannedStmtFromDXL(const CDXLNode *dxlnode, /* SPQ: for get param type list */ List* paramList = m_dxl_to_plstmt_context->GetParamTypes(); planned_stmt->nParamExec = spqdb::ListLength(paramList); + planned_stmt->slices = m_dxl_to_plstmt_context->GetSlices(&planned_stmt->numSlices); + planned_stmt->subplan_sliceIds = m_dxl_to_plstmt_context->GetSubplanSliceIdArray(); + /*SPQOS_ASSERT(plan->nMotionNodes >= 0); if (0 == plan->nMotionNodes && !m_is_tgt_tbl_distributed) @@ -751,9 +771,9 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexScan( rte->requiredPerms |= ACL_SELECT; m_dxl_to_plstmt_context->AddRTE(rte); - IndexScan *index_scan = NULL; - index_scan = MakeNode(IndexScan); - index_scan->scan.scanrelid = index; + SpqIndexScan *index_scan = MakeNode(SpqIndexScan); + index_scan->scan.scan.scanrelid = index; + index_scan->scan.scan.plan.spq_scan_partial = true; CMDIdSPQDB *mdid_index = CMDIdSPQDB::CastMdid( physical_idx_scan_dxlop->GetDXLIndexDescr()->MDId()); @@ -761,9 +781,9 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexScan( Oid index_oid = mdid_index->Oid(); SPQOS_ASSERT(InvalidOid != index_oid); - index_scan->indexid = index_oid; + index_scan->scan.indexid = index_oid; - Plan *plan = &(index_scan->scan.plan); + Plan *plan = &(index_scan->scan.scan.plan); plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); //plan->nMotionNodes = 0; @@ -793,7 +813,7 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexScan( &base_table_context, ctxt_translation_prev_siblings); - index_scan->indexorderdir = CTranslatorUtils::GetScanDirection( + index_scan->scan.indexorderdir = CTranslatorUtils::GetScanDirection( physical_idx_scan_dxlop->GetIndexScanDir()); // translate index condition list @@ -809,12 +829,14 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexScan( ctxt_translation_prev_siblings, &index_cond, &index_orig_cond, &index_strategy_list, &index_subtype_list); - index_scan->indexqual = index_cond; - index_scan->indexqualorig = index_orig_cond; + index_scan->scan.indexqual = index_cond; + index_scan->scan.indexqualorig = index_orig_cond; /* * As of 8.4, the indexstrategy and indexsubtype fields are no longer * available or needed in IndexScan. Ignore them. */ + plan->dop = is_execute_on_datanodes(plan) ? SET_DOP(u_sess->opt_cxt.query_dop) : 1; + add_distribute_info(plan, plan->qual, rte, orig_query, index); SetParamIds(plan); return (Plan *) index_scan; @@ -893,8 +915,9 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexOnlyScan( rte->requiredPerms |= ACL_SELECT; m_dxl_to_plstmt_context->AddRTE(rte); - IndexOnlyScan *index_scan = MakeNode(IndexOnlyScan); - index_scan->scan.scanrelid = index; + SpqIndexOnlyScan *index_scan = MakeNode(SpqIndexOnlyScan); + index_scan->scan.scan.scanrelid = index; + index_scan->scan.scan.plan.spq_scan_partial = true; CMDIdSPQDB *mdid_index = CMDIdSPQDB::CastMdid( physical_idx_scan_dxlop->GetDXLIndexDescr()->MDId()); @@ -902,9 +925,9 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexOnlyScan( Oid index_oid = mdid_index->Oid(); SPQOS_ASSERT(InvalidOid != index_oid); - index_scan->indexid = index_oid; + index_scan->scan.indexid = index_oid; - Plan *plan = &(index_scan->scan.plan); + Plan *plan = &(index_scan->scan.scan.plan); plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); // translate operator costs @@ -926,8 +949,7 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexOnlyScan( CDXLTranslateContextBaseTable index_context(m_mp); // translate index targetlist - index_scan->indextlist = TranslateDXLIndexTList(md_rel, md_index, index, - table_desc, &index_context); + index_scan->scan.indextlist = TranslateDXLIndexTList(md_rel, md_index, index, table_desc, &index_context); // translate target list plan->targetlist = @@ -939,7 +961,7 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexOnlyScan( TranslateDXLIndexFilter(filter_dxlnode, output_context, &index_context, ctxt_translation_prev_siblings); - index_scan->indexorderdir = CTranslatorUtils::GetScanDirection( + index_scan->scan.indexorderdir = CTranslatorUtils::GetScanDirection( physical_idx_scan_dxlop->GetIndexScanDir()); // translate index condition list @@ -955,8 +977,10 @@ CTranslatorDXLToPlStmt::TranslateDXLIndexOnlyScan( ctxt_translation_prev_siblings, &index_cond, &index_orig_cond, &index_strategy_list, &index_subtype_list); - index_scan->indexqual = index_cond; - index_scan->indexqualorig = index_orig_cond; + index_scan->scan.indexqual = index_cond; + index_scan->scan.indexqualorig = index_orig_cond; + plan->dop = is_execute_on_datanodes(plan) ? SET_DOP(u_sess->opt_cxt.query_dop) : 1; + add_distribute_info(plan, plan->qual, rte, orig_query, index); SetParamIds(plan); return (Plan *) index_scan; @@ -1833,7 +1857,8 @@ CTranslatorDXLToPlStmt::TranslateDXLNLJoin( // setting of prefetch_inner to true except for the case of index NLJ where we cannot prefetch inner // because inner child depends on variables coming from outer child join->prefetch_inner = !dxl_nlj->IsIndexNLJ(); - + join->prefetch_inner = (SPQOS_FTRACE(EopttraceEnableLeftIndexNLJoin) ? true : + !dxl_nlj->IsIndexNLJ()); CDXLTranslationContextArray *translation_context_arr_with_siblings = SPQOS_NEW(m_mp) CDXLTranslationContextArray(m_mp); Plan *left_plan = NULL; @@ -2179,7 +2204,7 @@ CTranslatorDXLToPlStmt::TranslateDXLDuplicateSensitiveMotion( { CDXLPhysicalMotion *motion_dxlop = CDXLPhysicalMotion::Cast(motion_dxlnode->GetOperator()); - if (CTranslatorUtils::IsDuplicateSensitiveMotion(motion_dxlop)) + if (CTranslatorUtils::IsDuplicateSensitiveMotion(motion_dxlop) && false) { return TranslateDXLRedistributeMotionToResultHashFilters( motion_dxlnode, output_context, ctxt_translation_prev_siblings); @@ -2210,7 +2235,6 @@ CTranslatorDXLToPlStmt::TranslateDXLMotion( Plan *plan = &(motion->plan); plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); - motion->motionID = m_dxl_to_plstmt_context->GetNextMotionId(); // translate operator costs TranslatePlanCosts( @@ -2222,6 +2246,54 @@ CTranslatorDXLToPlStmt::TranslateDXLMotion( CDXLNode *project_list_dxlnode = (*motion_dxlnode)[EdxlgmIndexProjList]; CDXLNode *filter_dxlnode = (*motion_dxlnode)[EdxlgmIndexFilter]; CDXLNode *sort_col_list_dxl = (*motion_dxlnode)[EdxlgmIndexSortColList]; + PlanSlice *recvslice = m_dxl_to_plstmt_context->GetCurrentSlice(); + PlanSlice *sendslice = (PlanSlice *) spqdb::SPQDBAlloc(sizeof(PlanSlice)); + memset(sendslice, 0, sizeof(PlanSlice)); + + sendslice->sliceIndex = m_dxl_to_plstmt_context->AddSlice(sendslice); + sendslice->parentIndex = recvslice->sliceIndex; + m_dxl_to_plstmt_context->SetCurrentSlice(sendslice); + + + const IntPtrArray *input_segids_array = motion_dxlop->GetInputSegIdsArray(); + + if (1 == input_segids_array->Size()) + { + int worker_idx = *((*input_segids_array)[0]); + + // only one segment in total + if (worker_idx == MASTER_CONTENT_ID) + { + // sender is on master, must be singleton gang + sendslice->gangType = GANGTYPE_ENTRYDB_READER; + } + else if (1 == spqdb::GetSPQSegmentCount()) + { + // sender is on segment, can not tell it's singleton or + // all-segment gang, so treat it as all-segment reader gang. + // It can be promoted to writer gang later if needed. + sendslice->gangType = GANGTYPE_PRIMARY_READER; + } + else + { + // multiple segments, must be singleton gang + sendslice->gangType = GANGTYPE_SINGLETON_READER; + } + sendslice->numsegments = 1; + sendslice->worker_idx = worker_idx; + } + else + { + // Mark it as reader for now. Will be overwritten into WRITER, if we + // encounter a DML node. + sendslice->gangType = GANGTYPE_PRIMARY_READER; + sendslice->numsegments = m_num_of_segments; + sendslice->worker_idx = 0; + } + sendslice->directDispatch.isDirectDispatch = false; + sendslice->directDispatch.contentIds = NIL; + sendslice->directDispatch.haveProcessedAnyCalculations = false; + motion->motionID = sendslice->sliceIndex; // translate motion child // child node is in the same position in broadcast and gather motion nodes @@ -2334,43 +2406,8 @@ CTranslatorDXLToPlStmt::TranslateDXLMotion( // cleanup child_contexts->Release(); - // TODO SPQ undef FLOW - // create flow for child node to distinguish between singleton flows and all-segment flows - /*Flow *flow = MakeNode(Flow); - - const IntPtrArray *input_segids_array = motion_dxlop->GetInputSegIdsArray(); - - - // only one sender - if (1 == input_segids_array->Size()) - { - flow->segindex = *((*input_segids_array)[0]); - - // only one segment in total - if (1 == spqdb::GetSPQSegmentCount()) - { - if (flow->segindex == MASTER_CONTENT_ID) - // sender is on master, must be singleton flow - flow->flotype = FLOW_SINGLETON; - else - // sender is on segment, can not tell it's singleton or - // all-segment flow, just treat it as all-segment flow so - // it can be promoted to writer gang later if needed. - flow->flotype = FLOW_UNDEFINED; - } - else - { - // multiple segments, must be singleton flow - flow->flotype = FLOW_SINGLETON; - } - } - else - { - flow->flotype = FLOW_UNDEFINED; - } - - child_plan->flow = flow;*/ - + m_dxl_to_plstmt_context->SetCurrentSlice(recvslice); + plan->lefttree = child_plan; //plan->nMotionNodes = child_plan->nMotionNodes + 1; @@ -5863,7 +5900,7 @@ CTranslatorDXLToPlStmt::TranslateDXLBitmapTblScan( m_dxl_to_plstmt_context->AddRTE(rte); - BitmapHeapScan *bitmap_tbl_scan = NULL; + SpqBitmapHeapScan *bitmap_tbl_scan = NULL; if (is_dynamic) { @@ -5877,11 +5914,12 @@ CTranslatorDXLToPlStmt::TranslateDXLBitmapTblScan( } else { - bitmap_tbl_scan = MakeNode(BitmapHeapScan); + bitmap_tbl_scan = MakeNode(SpqBitmapHeapScan); } - bitmap_tbl_scan->scan.scanrelid = index; + bitmap_tbl_scan->scan.scan.scanrelid = index; + bitmap_tbl_scan->scan.scan.plan.spq_scan_partial = true; - Plan *plan = &(bitmap_tbl_scan->scan.plan); + Plan *plan = &(bitmap_tbl_scan->scan.scan.plan); plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); //plan->nMotionNodes = 0; @@ -5908,13 +5946,16 @@ CTranslatorDXLToPlStmt::TranslateDXLBitmapTblScan( output_context); plan->qual = quals_list; - bitmap_tbl_scan->bitmapqualorig = TranslateDXLFilterToQual( + bitmap_tbl_scan->scan.bitmapqualorig = TranslateDXLFilterToQual( recheck_cond_dxlnode, &base_table_context, ctxt_translation_prev_siblings, output_context); - bitmap_tbl_scan->scan.plan.lefttree = TranslateDXLBitmapAccessPath( + bitmap_tbl_scan->scan.scan.plan.lefttree = TranslateDXLBitmapAccessPath( bitmap_access_path_dxlnode, output_context, md_rel, table_descr, - &base_table_context, ctxt_translation_prev_siblings, bitmap_tbl_scan); + &base_table_context, ctxt_translation_prev_siblings, &bitmap_tbl_scan->scan); + + plan->dop = is_execute_on_datanodes(plan) ? SET_DOP(u_sess->opt_cxt.query_dop) : 1; + add_distribute_info(plan, plan->qual, rte, orig_query, index); SetParamIds(plan); return (Plan *) bitmap_tbl_scan; @@ -6057,6 +6098,7 @@ CTranslatorDXLToPlStmt::TranslateDXLBitmapIndexProbe( bitmap_idx_scan = MakeNode(BitmapIndexScan); } bitmap_idx_scan->scan.scanrelid = bitmap_tbl_scan->scan.scanrelid; + bitmap_idx_scan->scan.plan.spq_scan_partial = true; CMDIdSPQDB *mdid_index = CMDIdSPQDB::CastMdid( sc_bitmap_idx_probe_dxlop->GetDXLIndexDescr()->MDId()); @@ -6347,6 +6389,8 @@ CTranslatorDXLToPlStmt::TranslateDXLShareIndexScan( * As of 8.4, the indexstrategy and indexsubtype fields are no longer * available or needed in ShareIndexScan. Ignore them. */ + plan->dop = is_execute_on_datanodes(plan) ? SET_DOP(u_sess->opt_cxt.query_dop) : 1; + add_distribute_info(plan, plan->qual, rte, orig_query, index); SetParamIds(plan); return (Plan *) index_scan; diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp index 0930dafb4..dde3423c7 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorRelcacheToDXL.cpp @@ -84,21 +84,27 @@ static IMDIndex::EmdindexType GetIndexTypeFromOid(OID index_oid) { //LogicalIndexType indexType = spqdb::GetLogicalIndexType(index_oid); + IMDIndex::EmdindexType index_type = IMDIndex::EmdindSentinel; Relation index_rel = spqdb::GetRelation(index_oid); - // TODO SPQ switch (index_rel->rd_rel->relam) { case BTREE_AM_OID: - return IMDIndex::EmdindBtree; + index_type = IMDIndex::EmdindBtree; + break; //case INDTYPE_BITMAP: //return IMDIndex::EmdindBitmap; case GIST_AM_OID: - return IMDIndex::EmdindGist; + index_type = IMDIndex::EmdindGist; + break; case GIN_AM_OID: - return IMDIndex::EmdindGin; + index_type = IMDIndex::EmdindGin; + break; + default: + SPQOS_RAISE(spqdxl::ExmaMD, spqdxl::ExmiMDObjUnsupported, + SPQOS_WSZ_LIT("Query references unknown index type")); } - SPQOS_RAISE(spqdxl::ExmaMD, spqdxl::ExmiMDObjUnsupported, - SPQOS_WSZ_LIT("Query references unknown index type")); + spqdb::CloseRelation(index_rel); + return index_type; } //--------------------------------------------------------------------------- diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index c6dc6d936..3fe2fae90 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -17,6 +17,9 @@ #include "access/transam.h" #include "optimizer/planner.h" #include "executor/spq_seqscan.h" +#include "executor/spq_indexscan.h" +#include "executor/spq_indexonlyscan.h" +#include "executor/spq_bitmapheapscan.h" #include "spq_optimizer_util/SPQOptimizer.h" #include "spq_opt.h" #include "guc_spq.h" @@ -242,6 +245,9 @@ void _PG_init(void) backup_spq_planner_hook = spq_planner_hook; spq_planner_hook = spq_optimize_query; init_spqseqscan_hook(); + init_spqindexscan_hook(); + init_spqindexonlyscan_hook(); + init_spqbitmapheapscan_hook(); spq_guc_init(&u_sess->spq_cxt); } HOOK_INIT = true; @@ -252,6 +258,9 @@ void _PG_fini(void) spq_planner_hook = backup_spq_planner_hook; MemoryContextDelete(u_sess->spq_cxt.spq_worker_context); restore_spqseqscan_hook(); + restore_spqindexscan_hook(); + restore_spqindexonlyscan_hook(); + restore_spqbitmapheapscan_hook(); } void spqplugin_invoke(void) diff --git a/contrib/whale/include/plugin_postgres.h b/contrib/whale/include/plugin_postgres.h index 271f8c6c3..b9f05f005 100644 --- a/contrib/whale/include/plugin_postgres.h +++ b/contrib/whale/include/plugin_postgres.h @@ -109,7 +109,7 @@ typedef struct ASqlPluginContext { int scanbuflen; /* sqlscan.l */ int xcdepth = 0; /* depth of nesting in slash-star comments */ - char *dolqstart; /* current $foo$ quote start string */ + char *dolqstart; /* current $foo$ quote start string */ bool extended_string = false; /* plvsubst.c */ text *c_subst = NULL; @@ -128,4 +128,4 @@ typedef struct ASqlPluginContext { ASqlPluginContext* GetSessionContext(); -#endif \ No newline at end of file +#endif -- Gitee From 5f9f0e9ddaa08d14248f37c8ebb8a456718702ac Mon Sep 17 00:00:00 2001 From: Mijamind Date: Wed, 29 Nov 2023 21:05:29 +0800 Subject: [PATCH 085/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=E4=BF=AE=E5=A4=8Dspq=20mdcache=E7=BC=93?= =?UTF-8?q?=E5=AD=98=E6=9C=AA=E5=A4=B1=E6=95=88=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/spq_optimizer_util/spq_wrappers.cpp | 14 ++++++-------- .../translate/CTranslatorDXLToPlStmt.cpp | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp index d5b38c398..da9257f6e 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp @@ -46,6 +46,7 @@ #include "parser/parse_agg.h" #include "spqos/error/CAutoExceptionStack.h" #include "parser/parse_coerce.h" +#include "utils/inval.h" #define SPQ_WRAP_START \ sigjmp_buf local_sigjmp_buf; \ @@ -2591,8 +2592,7 @@ static bool mdcache_invalidation_counter_registered = false; static int64 mdcache_invalidation_counter = 0; static int64 last_mdcache_invalidation_counter = 0; -// TODO SPQ not used -/*static void +static void mdsyscache_invalidation_counter_callback(Datum arg, int cacheid, uint32 hashvalue) { @@ -2602,7 +2602,7 @@ static void mdrelcache_invalidation_counter_callback(Datum arg, Oid relid) { mdcache_invalidation_counter++; -}*/ +} static void register_mdcache_invalidation_callbacks(void) @@ -2662,14 +2662,12 @@ register_mdcache_invalidation_callbacks(void) for (i = 0; i < lengthof(metadata_caches); i++) { - //CacheRegisterSyscacheCallback(metadata_caches[i], - // &mdsyscache_invalidation_counter_callback, - // (Datum) 0); + CacheRegisterSessionSyscacheCallback(metadata_caches[i], + &mdsyscache_invalidation_counter_callback, (Datum) 0); } /* also register the relcache callback */ - //CacheRegisterRelcacheCallback(&mdrelcache_invalidation_counter_callback, - // (Datum) 0); + CacheRegisterSessionRelcacheCallback(&mdrelcache_invalidation_counter_callback, (Datum) 0); } // Has there been any catalog changes since last call? diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index 19428af97..a24fc78ef 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -3531,7 +3531,7 @@ CTranslatorDXLToPlStmt::TranslateDXLAppend( SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, SPQOS_WSZ_LIT("exec_nodes cannot be NULL")); } - if (max_num_exec_nodes < list_length(plan->exec_nodes->nodeList)) { + if (max_num_exec_nodes < list_length(child_plan->exec_nodes->nodeList)) { plan->exec_nodes = ng_get_dest_execnodes(child_plan); max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); } -- Gitee From 6fb704874a83fde4af178ba553cea93158896e26 Mon Sep 17 00:00:00 2001 From: yaojun Date: Thu, 30 Nov 2023 17:07:41 +0800 Subject: [PATCH 086/434] sync server code and fix code check Signed-off-by: yaojun --- contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index 932d4c6bf..6ced2eabe 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -14760,10 +14760,13 @@ Datum dss_io_stat(PG_FUNCTION_ARGS) if (duration > MAX_DURATION_TIME) { ereport(ERROR, (errmsg("The duration is too long, and it must be less than 60s."))); } + if (duration <= 0) { + ereport(ERROR, (errmsg("The duration must be greater than zero."))); + } init_dss_io_stat(); unsigned long long read_bytes = 0; unsigned long long write_bytes = 0; - int io_count = 0; + unsigned int io_count = 0; get_dss_io_stat(duration, &read_bytes, &write_bytes, &io_count); // tuple header int i = 1; @@ -14778,7 +14781,7 @@ Datum dss_io_stat(PG_FUNCTION_ARGS) i = 0; values[i++] = UInt64GetDatum(read_bytes); values[i++] = UInt64GetDatum(write_bytes); - values[i] = Int32GetDatum(io_count); + values[i] = UInt32GetDatum(io_count); HeapTuple heap_tuple = heap_form_tuple(tupdesc, values, nulls); result = HeapTupleGetDatum(heap_tuple); -- Gitee From 0107a74de5c4fdd8f20134287aa6fbb9a537de62 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 29 Nov 2023 14:38:22 +0800 Subject: [PATCH 087/434] Sync server code. c74f46399411051224eacd991f9c77293c6e2578 --- contrib/dolphin/include/builtin_funcs.ini | 23 +- .../plugin_optimizer/commands/copy.cpp | 23 +- .../plugin_optimizer/plan/pgxcplan_single.cpp | 6 + .../dolphin/plugin_optimizer/plan/planner.cpp | 30 + contrib/dolphin/plugin_parser/gram.y | 10 +- contrib/dolphin/plugin_parser/parse_expr.cpp | 1 + .../dolphin/plugin_utils/adt/jsonfuncs.cpp | 1734 ++++++++++++----- .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 8 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 55 +- contrib/dolphin/tablecmds.cpp | 34 +- 10 files changed, 1404 insertions(+), 520 deletions(-) diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index b09a63b67..71287de8a 100755 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -4035,7 +4035,7 @@ ), AddFuncGroup( "gs_stat_walrecvwriter", 1, - AddBuiltinFunc(_0(2868), _1("gs_stat_walrecvwriter"), _2(1), _3(false), _4(true), _5(gs_stat_walrecvwriter), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(1), _20(1, 23), _21(16, 23, 16, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 1184, 1184), _22(16, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(16, "operation", "is_enable_stat", "total_write_bytes", "write_times", "total_write_time", "avg_write_time", "avg_write_bytes", "total_sync_bytes", "sync_times", "total_sync_time", "avg_sync_time", "avg_sync_bytes", "current_xlog_segno", "newest_xlog_segno", "last_reset_time", "cur_time"), _24("({CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location -1 :constvalue 4 [ 2 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_stat_walrecvwriter"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(2868), _1("gs_stat_walrecvwriter"), _2(1), _3(false), _4(true), _5(gs_stat_walrecvwriter), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(1), _20(1, 23), _21(16, 23, 16, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 1184, 1184), _22(16, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(16, "operation", "is_enable_stat", "total_write_bytes", "write_times", "total_write_time", "avg_write_time", "avg_write_bytes", "total_sync_bytes", "sync_times", "total_sync_time", "avg_sync_time", "avg_sync_bytes", "current_xlog_segno", "inited_xlog_segno", "last_reset_time", "cur_time"), _24("({CONST :consttype 23 :consttypmod -1 :constcollid 0 :constlen 4 :constbyval true :constisnull false :ismaxvalue false :location -1 :constvalue 4 [ 2 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_stat_walrecvwriter"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "gs_xlogdump_lsn", 1, @@ -6325,6 +6325,12 @@ "jsonb_contains", 1, AddBuiltinFunc(_0(3418), _1("jsonb_contains"), _2(2), _3(true), _4(false), _5(jsonb_contains), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 3802, 3802), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_contains"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "jsonb_delete", 3, + AddBuiltinFunc(_0(5612), _1("jsonb_delete"), _2(2), _3(true), _4(false), _5(jsonb_delete_idx), _6(3802), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 3802, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_delete_idx"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(5613), _1("jsonb_delete"), _2(2), _3(true), _4(false), _5(jsonb_delete), _6(3802), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 3802, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_delete"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(5614), _1("jsonb_delete"), _2(2), _3(true), _4(false), _5(jsonb_delete_array), _6(3802), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 3802, 1009), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_delete_array"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "jsonb_each", 1, AddBuiltinFunc(_0(3419), _1("jsonb_each"), _2(1), _3(true), _4(true), _5(jsonb_each), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 3802), _21(3, 3802, 25, 3802), _22(3, 'i', 'o', 'o'), _23(3, "from_json", "key", "value"), _24(NULL), _25("jsonb_each"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -6381,6 +6387,10 @@ "jsonb_in", 1, AddBuiltinFunc(_0(3806), _1("jsonb_in"), _2(1), _3(true), _4(false), _5(jsonb_in), _6(3802), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2275), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_in"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "jsonb_insert", 1, + AddBuiltinFunc(_0(5610), _1("jsonb_insert"), _2(4), _3(true), _4(false), _5(jsonb_insert), _6(3802), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(1), _20(4, 3802, 1009, 3802, 16), _21(NULL), _22(NULL), _23(NULL), _24("({CONST :consttype 16 :consttypmod -1 :constcollid 0 :constlen 1 :constbyval true :constisnull false :ismaxvalue false :location 72803 :constvalue 1 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno 0 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("jsonb_insert"), _26(NULL), _27(NULL), _28(NULL), _29(1, 3), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "jsonb_le", 1, AddBuiltinFunc(_0(3431), _1("jsonb_le"), _2(2), _3(true), _4(false), _5(jsonb_le), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 3802, 3802), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_le"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -6425,6 +6435,10 @@ "jsonb_send", 1, AddBuiltinFunc(_0(3444), _1("jsonb_send"), _2(1), _3(true), _4(false), _5(jsonb_send), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 3802), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_send"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "jsonb_set", 1, + AddBuiltinFunc(_0(5611), _1("jsonb_set"), _2(4), _3(true), _4(false), _5(jsonb_set), _6(3802), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(1), _20(4, 3802, 1009, 3802, 16), _21(NULL), _22(NULL), _23(NULL), _24("({CONST :consttype 16 :consttypmod -1 :constcollid 0 :constlen 1 :constbyval true :constisnull false :ismaxvalue false :location 72803 :constvalue 1 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno 0 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("jsonb_set"), _26(NULL), _27(NULL), _28(NULL), _29(1, 3), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "jsonb_typeof", 1, AddBuiltinFunc(_0(3445), _1("jsonb_typeof"), _2(1), _3(true), _4(false), _5(jsonb_typeof), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 3802), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("jsonb_typeof"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -8517,9 +8531,10 @@ AddBuiltinFunc(_0(2560), _1("pg_postmaster_start_time"), _2(0), _3(true), _4(false), _5(pg_postmaster_start_time), _6(1184), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pg_postmaster_start_time"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("postmaster start time"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( - "pg_prepared_statement", 1, - AddBuiltinFunc(_0(2510), _1("pg_prepared_statement"), _2(0), _3(true), _4(true), _5(pg_prepared_statement), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(5, 25, 25, 1184, 2211, 16), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "name", "statement", "prepare_time", "parameter_types", "from_sql"), _24(NULL), _25("pg_prepared_statement"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("get the prepared statements for this session"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) - ), + "pg_prepared_statement", 2, + AddBuiltinFunc(_0(2510), _1("pg_prepared_statement"), _2(0), _3(true), _4(true), _5(pg_prepared_statement), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(5, 25, 25, 1184, 2211, 16), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "name", "statement", "prepare_time", "parameter_types", "from_sql"), _24(NULL), _25("pg_prepared_statement"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(true), _33("get the prepared statements for this session"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), + AddBuiltinFunc(_0(3702), _1("pg_prepared_statement"), _2(1), _3(true), _4(true), _5(pg_prepared_statement_global), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 20), _21(8, 20, 20, 25, 25, 25, 1184, 2211, 16), _22(8, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(8,"in_sessionid", "sessionid", "username", "name", "statement", "prepare_time", "parameter_types", "from_sql"), _24(NULL), _25("pg_prepared_statement_global"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(true), _33("get the prepared statements for specified session"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "pg_prepared_xact", 1, AddBuiltinFunc(_0(1065), _1("pg_prepared_xact"), _2(0), _3(true), _4(true), _5(pg_prepared_xact), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(5, 28, 25, 1184, 26, 26), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "transaction", "gid", "prepared", "ownerid", "dbid"), _24(NULL), _25("pg_prepared_xact"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("view two-phase transactions"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/contrib/dolphin/plugin_optimizer/commands/copy.cpp b/contrib/dolphin/plugin_optimizer/commands/copy.cpp index 71ba492cd..b366555ab 100644 --- a/contrib/dolphin/plugin_optimizer/commands/copy.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/copy.cpp @@ -2654,8 +2654,21 @@ static CopyState BeginCopy(bool is_from, Relation rel, Node* raw_query, const ch Assert(query->commandType == CMD_SELECT); Assert(query->utilityStmt == NULL); - /* plan the query */ - plan = planner(query, 0, NULL); + bool old_smp_enabled = u_sess->opt_cxt.smp_enabled; + u_sess->opt_cxt.smp_enabled = false; + + PG_TRY(); + { + /* plan the query */ + plan = planner(query, 0, NULL); + } + PG_CATCH(); + { + u_sess->opt_cxt.smp_enabled = old_smp_enabled; + PG_RE_THROW(); + } + PG_END_TRY(); + u_sess->opt_cxt.smp_enabled = old_smp_enabled; /* * Use a snapshot with an updated command ID to ensure this query sees @@ -5859,9 +5872,6 @@ static int CopyFromCompressAndInsertBatch(PageCompress* pcState, EState* estate, // bool IsTypeAcceptEmptyStr(Oid typeOid) { - if (type_is_set(typeOid)) { - return true; - } switch (typeOid) { case VARCHAROID: case NVARCHAR2OID: @@ -5878,6 +5888,9 @@ bool IsTypeAcceptEmptyStr(Oid typeOid) case CHAROID: return true; default: + if (type_is_set(typeOid)) { + return true; + } return false; } } diff --git a/contrib/dolphin/plugin_optimizer/plan/pgxcplan_single.cpp b/contrib/dolphin/plugin_optimizer/plan/pgxcplan_single.cpp index 93de9b8b3..b15a922e7 100644 --- a/contrib/dolphin/plugin_optimizer/plan/pgxcplan_single.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/pgxcplan_single.cpp @@ -221,6 +221,12 @@ PlannedStmt* pgxc_planner(Query* query, int cursorOptions, ParamListInfo boundPa ecxt = MemoryContextSwitchTo(current_context); edata = CopyErrorData(); + if (SS_STANDBY_MODE_WITH_REMOTE_EXECUTE) { + LWLockReleaseAll(); + AbortBufferIO(); + UnlockBuffers(); + } + /* * refuse to recreate plan if * 1. no query copy: query have been polluted by rewrite diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index dd41a3a9a..e25d0c7e5 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -94,6 +94,14 @@ #include "catalog/gs_collation.h" #include "replication/libpqsw.h" +#ifndef DOLPHIN +/* Hook for plugins to get control in planner() */ +THR_LOCAL ndp_pushdown_hook_type ndp_pushdown_hook = NULL; +#ifdef USE_SPQ +THR_LOCAL spq_planner_hook_type spq_planner_hook = NULL; +#endif +#endif + #ifndef MIN #define MIN(A, B) ((B) < (A) ? (B) : (A)) #endif @@ -1269,6 +1277,24 @@ static inline bool contain_system_column(Node *var_list) return result; } +static inline bool contain_placeholdervar(Node *var_list) +{ + List* vars = pull_var_clause(var_list, PVC_RECURSE_AGGREGATES, PVC_INCLUDE_PLACEHOLDERS); + ListCell* lc = NULL; + bool result = false; + + foreach (lc, vars) { + Node* var = (Node*)lfirst(lc); + if (IsA(var, PlaceHolderVar)) { + result = true; + break; + } + } + + list_free_ext(vars); + return result; +} + /* -------------------- * subquery_planner * Invokes the planner on a subquery. We recurse to here for each @@ -1831,6 +1857,10 @@ Plan* subquery_planner(PlannerGlobal* glob, Query* parse, PlannerInfo* parent_ro support_rewrite = false; break; } + if (root->parse->jointree != NULL && contain_placeholdervar(root->parse->jointree->quals)) { + support_rewrite = false; + break; + } if (!fulljoin_2_left_union_right_anti_support(root->parse)) { support_rewrite = false; break; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index d6a428bc3..8478093ed 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -40684,11 +40684,15 @@ static void CheckPartitionExpr(Node* expr, int* colCount) if (expr == NULL) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("The expr can't be NULL"))); if (expr->type == T_A_Expr) { - char* name = strVal(linitial(((A_Expr*)expr)->name)); + A_Expr* a_expr = (A_Expr*)expr; + if (a_expr->name == NULL) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("The expr is not supported for Partition Expr"))); + } + char* name = strVal(linitial(a_expr->name)); if (strcmp(name, "+") != 0 && strcmp(name, "-") != 0 && strcmp(name, "*") != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("The %s operator is not supported for Partition Expr", name))); - CheckPartitionExpr(((A_Expr*)expr)->lexpr, colCount); - CheckPartitionExpr(((A_Expr*)expr)->rexpr, colCount); + CheckPartitionExpr(a_expr->lexpr, colCount); + CheckPartitionExpr(a_expr->rexpr, colCount); } else if (expr->type == T_FuncCall) { char* validFuncName[MAX_SUPPORTED_FUNC_FOR_PART_EXPR] = {"abs","ceiling","datediff","day","dayofmonth","dayofweek","dayofyear","extract","floor","hour", "microsecond","minute","mod","month","quarter","second","time_to_sec","to_days","to_seconds","unix_timestamp","weekday","year","yearweek","date_part","div"}; diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index bb168089b..2cd6ef3d9 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -1461,6 +1461,7 @@ static Node* transformAExprOp(ParseState* pstate, A_Expr* a) static Node* transformAExprAnd(ParseState* pstate, A_Expr* a) { + a->rexpr = (Node *)copyObject(a->rexpr); Node* lexpr = transformExprRecurse(pstate, a->lexpr); Node* rexpr = transformExprRecurse(pstate, a->rexpr); #ifdef DOLPHIN diff --git a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp index 6a4718721..21585970d 100644 --- a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp @@ -30,6 +30,17 @@ #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/typcache.h" +#include "utils/array.h" + +/* Operations available for setPath */ +#define JB_PATH_CREATE 0x0001 +#define JB_PATH_DELETE 0x0002 +#define JB_PATH_REPLACE 0x0004 +#define JB_PATH_INSERT_BEFORE 0x0008 +#define JB_PATH_INSERT_AFTER 0x0010 +#define JB_PATH_CREATE_OR_INSERT (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER | JB_PATH_CREATE) +#define JB_PATH_FILL_GAPS 0x0020 +#define JB_PATH_CONSISTENT_POSITION 0x0040 #ifdef DOLPHIN #include "cjson/cJSON.h" @@ -299,115 +310,128 @@ static void populate_recordset_array_element_start(void *state, bool isnull); /* worker function for populate_recordset and to_recordset */ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg); /* Worker that takes care of common setup for us */ -static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader, uint32 flags, char *key, uint32 keylen); +static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader, uint32 flags, char *key, uint32 keylen); + +/* functions supporting jsonb_delete, jsonb_set and jsonb_concat */ +static void addJsonbToParseState(JsonbParseState **pstate, Jsonb *jb); +static JsonbValue *setPath(JsonbIterator **it, Datum *path_elems, bool *path_nulls, int path_len, + JsonbParseState **st, int level, Jsonb *newval, int op_type); +static void setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, int path_len, JsonbParseState **st, + int level, Jsonb *newval, uint32 npairs, int op_type); +static void setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, int path_len, JsonbParseState **st, + int level, Jsonb *newval, uint32 nelems, int op_type); /* search type classification for json_get* functions */ -typedef enum { JSON_SEARCH_OBJECT = 1, JSON_SEARCH_ARRAY, JSON_SEARCH_PATH } JsonSearch; +typedef enum { + JSON_SEARCH_OBJECT = 1, + JSON_SEARCH_ARRAY, + JSON_SEARCH_PATH +} JsonSearch; /* state for json_object_keys */ typedef struct OkeysState { JsonLexContext *lex; - char **result; - int result_size; - int result_count; - int sent_count; + char **result; + int result_size; + int result_count; + int sent_count; } OkeysState; /* state for json_get* functions */ typedef struct GetState { JsonLexContext *lex; - JsonSearch search_type; - int search_index; - int array_index; - char *search_term; - char *result_start; - text *tresult; - bool result_is_null; - bool normalize_results; - bool next_scalar; - char **path; - int npath; - char **current_path; - bool *pathok; - int *array_level_index; - int *path_level_index; + JsonSearch search_type; + int search_index; + int array_index; + char *search_term; + char *result_start; + text *tresult; + bool result_is_null; + bool normalize_results; + bool next_scalar; + char **path; + int npath; + char **current_path; + bool *pathok; + int *array_level_index; + int *path_level_index; } GetState; /* state for json_array_length */ typedef struct AlenState { JsonLexContext *lex; - int count; + int count; } AlenState; /* state for json_each */ typedef struct EachState { - JsonLexContext *lex; + JsonLexContext *lex; Tuplestorestate *tuple_store; - TupleDesc ret_tdesc; - MemoryContext tmp_cxt; - char *result_start; - bool normalize_results; - bool next_scalar; - char *normalized_scalar; + TupleDesc ret_tdesc; + MemoryContext tmp_cxt; + char *result_start; + bool normalize_results; + bool next_scalar; + char *normalized_scalar; } EachState; /* state for json_array_elements */ typedef struct ElementsState { - JsonLexContext *lex; + JsonLexContext *lex; Tuplestorestate *tuple_store; - TupleDesc ret_tdesc; - MemoryContext tmp_cxt; - char *result_start; - bool normalize_results; - bool next_scalar; - char *normalized_scalar; + TupleDesc ret_tdesc; + MemoryContext tmp_cxt; + char *result_start; + bool normalize_results; + bool next_scalar; + char *normalized_scalar; } ElementsState; /* state for get_json_object_as_hash */ typedef struct JhashState { JsonLexContext *lex; - HTAB *hash; - char *saved_scalar; - char *save_json_start; - bool use_json_as_text; - char *function_name; + HTAB *hash; + char *saved_scalar; + char *save_json_start; + bool use_json_as_text; + char *function_name; } JHashState; /* used to build the hashtable */ typedef struct JsonHashEntry { - char fname[NAMEDATALEN]; - char *val; - char *json; - bool isnull; + char fname[NAMEDATALEN]; + char *val; + char *json; + bool isnull; } JsonHashEntry; /* these two are stolen from hstore / record_out, used in populate_record* */ typedef struct ColumnIOData { - Oid column_type; - Oid typiofunc; - Oid typioparam; - FmgrInfo proc; + Oid column_type; + Oid typiofunc; + Oid typioparam; + FmgrInfo proc; } ColumnIOData; typedef struct RecordIOData { - Oid record_type; - int32 record_typmod; - int ncolumns; - ColumnIOData columns[1]; /* VARIABLE LENGTH ARRAY */ + Oid record_type; + int32 record_typmod; + int ncolumns; + ColumnIOData columns[1]; /* VARIABLE LENGTH ARRAY */ } RecordIOData; /* state for populate_recordset */ typedef struct PopulateRecordsetState { - JsonLexContext *lex; - HTAB *json_hash; - char *saved_scalar; - char *save_json_start; - bool use_json_as_text; + JsonLexContext *lex; + HTAB *json_hash; + char *saved_scalar; + char *save_json_start; + bool use_json_as_text; Tuplestorestate *tuple_store; - TupleDesc ret_tdesc; - HeapTupleHeader rec; - RecordIOData *my_extra; - MemoryContext fn_mcxt; /* used to stash IO funcs */ + TupleDesc ret_tdesc; + HeapTupleHeader rec; + RecordIOData *my_extra; + MemoryContext fn_mcxt; /* used to stash IO funcs */ } PopulateRecordsetState; /* Turn a jsonb object into a record */ @@ -510,23 +534,25 @@ extern "C" DLL_PUBLIC Datum json_le(PG_FUNCTION_ARGS); Datum jsonb_object_keys(PG_FUNCTION_ARGS) { FuncCallContext *funcctx = NULL; - OkeysState *state = NULL; + OkeysState *state = NULL; int i; if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; - Jsonb *jb = PG_GETARG_JSONB(0); - bool skipNested = false; + Jsonb *jb = PG_GETARG_JSONB(0); + bool skipNested = false; JsonbIterator *it = NULL; - JsonbValue v; - int r; + JsonbValue v; + int r; if (JB_ROOT_IS_SCALAR(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_object_keys on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_object_keys on a scalar"))); } else if (JB_ROOT_IS_ARRAY(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_object_keys on an array"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_object_keys on an array"))); } funcctx = SRF_FIRSTCALL_INIT(); @@ -555,13 +581,13 @@ Datum jsonb_object_keys(PG_FUNCTION_ARGS) } MemoryContextSwitchTo(oldcontext); - funcctx->user_fctx = (void *)state; + funcctx->user_fctx = (void *) state; } funcctx = SRF_PERCALL_SETUP(); - state = (OkeysState *)funcctx->user_fctx; + state = (OkeysState *) funcctx->user_fctx; if (state->sent_count < state->result_count) { - char *nxt = state->result[state->sent_count++]; + char *nxt = state->result[state->sent_count++]; SRF_RETURN_NEXT(funcctx, CStringGetTextDatum(nxt)); } @@ -579,12 +605,12 @@ Datum json_object_keys(PG_FUNCTION_ARGS) { FuncCallContext *funcctx = NULL; OkeysState *state = NULL; - int i; + int i; if (SRF_IS_FIRSTCALL()) { - text *json = PG_GETARG_TEXT_P(0); + text *json = PG_GETARG_TEXT_P(0); JsonLexContext *lex = makeJsonLexContext(json, true); - JsonSemAction *sem = NULL; + JsonSemAction *sem = NULL; MemoryContext oldcontext; @@ -600,7 +626,7 @@ Datum json_object_keys(PG_FUNCTION_ARGS) state->sent_count = 0; state->result = (char **)palloc(256 * sizeof(char *)); - sem->semstate = (void *)state; + sem->semstate = (void *) state; sem->array_start = okeys_array_start; sem->scalar = okeys_scalar; sem->object_field_start = okeys_object_field_start; @@ -613,13 +639,13 @@ Datum json_object_keys(PG_FUNCTION_ARGS) pfree(sem); MemoryContextSwitchTo(oldcontext); - funcctx->user_fctx = (void *)state; + funcctx->user_fctx = (void *) state; } funcctx = SRF_PERCALL_SETUP(); - state = (OkeysState *)funcctx->user_fctx; + state = (OkeysState *) funcctx->user_fctx; if (state->sent_count < state->result_count) { - char *nxt = state->result[state->sent_count++]; + char *nxt = state->result[state->sent_count++]; SRF_RETURN_NEXT(funcctx, CStringGetTextDatum(nxt)); } @@ -635,7 +661,7 @@ Datum json_object_keys(PG_FUNCTION_ARGS) static void okeys_object_field_start(void *state, char *fname, bool isnull) { - OkeysState *_state = (OkeysState *)state; + OkeysState *_state = (OkeysState *) state; /* only collecting keys for the top level object */ if (_state->lex->lex_level != 1) { @@ -654,21 +680,25 @@ static void okeys_object_field_start(void *state, char *fname, bool isnull) static void okeys_array_start(void *state) { - OkeysState *_state = (OkeysState *)state; + OkeysState *_state = (OkeysState *) state; /* top level must be a json object */ if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call json_object_keys on an array"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_object_keys on an array"))); } } static void okeys_scalar(void *state, char *token, JsonTokenType tokentype) { - OkeysState *_state = (OkeysState *)state; + OkeysState *_state = (OkeysState *) state; /* top level must be a json object */ if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call json_object_keys on a scalar"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_object_keys on a scalar"))); } } @@ -737,22 +767,22 @@ Datum json_object_field(PG_FUNCTION_ARGS) Datum jsonb_object_field(PG_FUNCTION_ARGS) { - Jsonb *jb = PG_GETARG_JSONB(0); - char *key = text_to_cstring(PG_GETARG_TEXT_P(1)); - int klen = strlen(key); + Jsonb *jb = PG_GETARG_JSONB(0); + char *key = text_to_cstring(PG_GETARG_TEXT_P(1)); + int klen = strlen(key); JsonbIterator *it = NULL; - JsonbValue v; - int r; - bool skipNested = false; + JsonbValue v; + int r; + bool skipNested = false; if (JB_ROOT_IS_SCALAR(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_object_field (jsonb -> text " - "operator) on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_object_field (jsonb -> text operator) on a scalar"))); } else if (JB_ROOT_IS_ARRAY(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_object_field (jsonb -> text " - "operator) on an array"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_object_field (jsonb -> text operator) on an array"))); } Assert(JB_ROOT_IS_OBJECT(jb)); @@ -766,7 +796,7 @@ Datum jsonb_object_field(PG_FUNCTION_ARGS) * The next thing the iterator fetches should be the value, no * matter what shape it is. */ - (void)JsonbIteratorNext(&it, &v, skipNested); + (void) JsonbIteratorNext(&it, &v, skipNested); PG_RETURN_JSONB(JsonbValueToJsonb(&v)); } } @@ -900,22 +930,22 @@ Datum json_object_field_text(PG_FUNCTION_ARGS) Datum jsonb_object_field_text(PG_FUNCTION_ARGS) { - Jsonb *jb = PG_GETARG_JSONB(0); - char *key = text_to_cstring(PG_GETARG_TEXT_P(1)); - int klen = strlen(key); + Jsonb *jb = PG_GETARG_JSONB(0); + char *key = text_to_cstring(PG_GETARG_TEXT_P(1)); + int klen = strlen(key); JsonbIterator *it = NULL; - JsonbValue v; - int r; - bool skipNested = false; + JsonbValue v; + int r; + bool skipNested = false; if (JB_ROOT_IS_SCALAR(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_object_field_text (jsonb ->> " - "text operator) on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_object_field_text (jsonb ->> text operator) on a scalar"))); } else if (JB_ROOT_IS_ARRAY(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_object_field_text (jsonb ->> " - "text operator) on an array"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_object_field_text (jsonb ->> text operator) on an array"))); } Assert(JB_ROOT_IS_OBJECT(jb)); @@ -925,7 +955,7 @@ Datum jsonb_object_field_text(PG_FUNCTION_ARGS) if (r == WJB_KEY) { if (klen == v.string.len && strncmp(key, v.string.val, klen) == 0) { - text *result = NULL; + text *result = NULL; /* * The next thing the iterator fetches should be the value, no @@ -942,9 +972,9 @@ Datum jsonb_object_field_text(PG_FUNCTION_ARGS) } else if (v.type == jbvNull) { PG_RETURN_NULL(); } else { - StringInfo jtext = makeStringInfo(); - Jsonb *tjb = JsonbValueToJsonb(&v); - (void)JsonbToCString(jtext, VARDATA(tjb), -1); + StringInfo jtext = makeStringInfo(); + Jsonb *tjb = JsonbValueToJsonb(&v); + (void) JsonbToCString(jtext, VARDATA(tjb), -1); result = cstring_to_text_with_len(jtext->data, jtext->len); } PG_RETURN_TEXT_P(result); @@ -957,9 +987,9 @@ Datum jsonb_object_field_text(PG_FUNCTION_ARGS) Datum json_array_element(PG_FUNCTION_ARGS) { - text *json = PG_GETARG_TEXT_P(0); - text *result = NULL; - int element = PG_GETARG_INT32(1); + text *json = PG_GETARG_TEXT_P(0); + text *result = NULL; + int element = PG_GETARG_INT32(1); result = get_worker(json, NULL, element, NULL, NULL, -1, false); @@ -972,22 +1002,22 @@ Datum json_array_element(PG_FUNCTION_ARGS) Datum jsonb_array_element(PG_FUNCTION_ARGS) { - Jsonb *jb = PG_GETARG_JSONB(0); - int element = PG_GETARG_INT32(1); + Jsonb *jb = PG_GETARG_JSONB(0); + int element = PG_GETARG_INT32(1); JsonbIterator *it = NULL; - JsonbValue v; - int r; - bool skipNested = false; - int element_number = 0; + JsonbValue v; + int r; + bool skipNested = false; + int element_number = 0; if (JB_ROOT_IS_SCALAR(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_array_element (jsonb -> int " - "operator) on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_array_element (jsonb -> int operator) on a scalar"))); } else if (JB_ROOT_IS_OBJECT(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_array_element (jsonb -> int " - "operator) on an object"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_array_element (jsonb -> int operator) on an object"))); } Assert(JB_ROOT_IS_ARRAY(jb)); @@ -1009,9 +1039,9 @@ Datum jsonb_array_element(PG_FUNCTION_ARGS) Datum json_array_element_text(PG_FUNCTION_ARGS) { - text *json = PG_GETARG_TEXT_P(0); - text *result = NULL; - int element = PG_GETARG_INT32(1); + text *json = PG_GETARG_TEXT_P(0); + text *result = NULL; + int element = PG_GETARG_INT32(1); result = get_worker(json, NULL, element, NULL, NULL, -1, true); @@ -1024,20 +1054,22 @@ Datum json_array_element_text(PG_FUNCTION_ARGS) Datum jsonb_array_element_text(PG_FUNCTION_ARGS) { - Jsonb *jb = PG_GETARG_JSONB(0); - int element = PG_GETARG_INT32(1); + Jsonb *jb = PG_GETARG_JSONB(0); + int element = PG_GETARG_INT32(1); JsonbIterator *it = NULL; - JsonbValue v; - int r; - bool skipNested = false; - int element_number = 0; + JsonbValue v; + int r; + bool skipNested = false; + int element_number = 0; if (JB_ROOT_IS_SCALAR(jb)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call jsonb_array_element_text on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_array_element_text on a scalar"))); } else if (JB_ROOT_IS_OBJECT(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call jsonb_array_element_text on an object"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_array_element_text on an object"))); } Assert(JB_ROOT_IS_ARRAY(jb)); @@ -1050,15 +1082,15 @@ Datum jsonb_array_element_text(PG_FUNCTION_ARGS) * if it's a scalar string it needs to be de-escaped, * otherwise just return the text */ - text *result = NULL; + text *result = NULL; if (v.type == jbvString) { result = cstring_to_text_with_len(v.string.val, v.string.len); } else if (v.type == jbvNull) { PG_RETURN_NULL(); } else { - StringInfo jtext = makeStringInfo(); - Jsonb *tjb = JsonbValueToJsonb(&v); - (void)JsonbToCString(jtext, VARDATA(tjb), -1); + StringInfo jtext = makeStringInfo(); + Jsonb *tjb = JsonbValueToJsonb(&v); + (void) JsonbToCString(jtext, VARDATA(tjb), -1); result = cstring_to_text_with_len(jtext->data, jtext->len); } PG_RETURN_TEXT_P(result); @@ -1084,23 +1116,24 @@ Datum json_extract_path_text(PG_FUNCTION_ARGS) */ static inline Datum get_path_all(FunctionCallInfo fcinfo, bool as_text) { - text *json = NULL; - ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); - text *result = NULL; - Datum *pathtext = NULL; - bool *pathnulls = NULL; - int npath; - char **tpath = NULL; - int *ipath = NULL; - int i; - long ind; - char *endptr = NULL; + text *json = NULL; + ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); + text *result = NULL; + Datum *pathtext = NULL; + bool *pathnulls = NULL; + int npath; + char **tpath = NULL; + int *ipath = NULL; + int i; + long ind; + char *endptr = NULL; json = PG_GETARG_TEXT_P(0); if (array_contains_nulls(path)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call function with null path elements"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call function with null path elements"))); } deconstruct_array(path, TEXTOID, -1, false, 'i', &pathtext, &pathnulls, &npath); @@ -1117,8 +1150,9 @@ static inline Datum get_path_all(FunctionCallInfo fcinfo, bool as_text) for (i = 0; i < npath; i++) { tpath[i] = TextDatumGetCString(pathtext[i]); if (*tpath[i] == '\0') { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call function with empty path elements"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call function with empty path elements"))); } /* @@ -1128,7 +1162,7 @@ static inline Datum get_path_all(FunctionCallInfo fcinfo, bool as_text) */ ind = strtol(tpath[i], &endptr, 10); if (*endptr == '\0' && ind <= INT_MAX && ind >= 0) { - ipath[i] = (int)ind; + ipath[i] = (int) ind; } else { ipath[i] = -1; } @@ -1149,12 +1183,12 @@ static inline Datum get_path_all(FunctionCallInfo fcinfo, bool as_text) * * common worker for all the json getter functions */ -static inline text *get_worker(text *json, char *field, int elem_index, char **tpath, int *ipath, int npath, - bool normalize_results) +static inline text *get_worker(text *json, char *field, int elem_index, char **tpath, + int *ipath, int npath, bool normalize_results) { - GetState *state = NULL; + GetState *state = NULL; JsonLexContext *lex = makeJsonLexContext(json, true); - JsonSemAction *sem = NULL; + JsonSemAction *sem = NULL; /* only allowed to use one of these */ Assert(elem_index < 0 || (tpath == NULL && ipath == NULL && field == NULL)); @@ -1186,7 +1220,7 @@ static inline text *get_worker(text *json, char *field, int elem_index, char **t state->search_index = elem_index; state->array_index = -1; } - sem->semstate = (void *)state; + sem->semstate = (void *) state; /* * Not all variants need all the semantic routines. only set the ones @@ -1209,27 +1243,31 @@ static inline text *get_worker(text *json, char *field, int elem_index, char **t static void get_object_start(void *state) { - GetState *_state = (GetState *)state; + GetState *_state = (GetState *) state; /* json structure check */ if (_state->lex->lex_level == 0 && _state->search_type == JSON_SEARCH_ARRAY) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot extract array element from a non-array"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot extract array element from a non-array"))); } } static void get_object_field_start(void *state, char *fname, bool isnull) { - GetState *_state = (GetState *)state; - bool get_next = false; - int lex_level = _state->lex->lex_level; + GetState *_state = (GetState *) state; + bool get_next = false; + int lex_level = _state->lex->lex_level; - if (lex_level == 1 && _state->search_type == JSON_SEARCH_OBJECT && strcmp(fname, _state->search_term) == 0) { + if (lex_level == 1 && _state->search_type == JSON_SEARCH_OBJECT && + strcmp(fname, _state->search_term) == 0) { _state->tresult = NULL; _state->result_start = NULL; get_next = true; - } else if (_state->search_type == JSON_SEARCH_PATH && lex_level <= _state->npath && - _state->pathok[_state->lex->lex_level - 1] && strcmp(fname, _state->path[lex_level - 1]) == 0) { + } else if (_state->search_type == JSON_SEARCH_PATH && + lex_level <= _state->npath && + _state->pathok[_state->lex->lex_level - 1] && + strcmp(fname, _state->path[lex_level - 1]) == 0) { /* path search, path so far is ok, and we have a match */ /* this object overrides any previous matching object */ _state->tresult = NULL; @@ -1259,14 +1297,16 @@ static void get_object_field_start(void *state, char *fname, bool isnull) static void get_object_field_end(void *state, char *fname, bool isnull) { - GetState *_state = (GetState *)state; - bool get_last = false; - int lex_level = _state->lex->lex_level; + GetState *_state = (GetState *) state; + bool get_last = false; + int lex_level = _state->lex->lex_level; /* same tests as in get_object_field_start, mutatis mutandis */ if (lex_level == 1 && _state->search_type == JSON_SEARCH_OBJECT && strcmp(fname, _state->search_term) == 0) { get_last = true; - } else if (_state->search_type == JSON_SEARCH_PATH && lex_level <= _state->npath && _state->pathok[lex_level - 1] && + } else if (_state->search_type == JSON_SEARCH_PATH && + lex_level <= _state->npath && + _state->pathok[lex_level - 1] && strcmp(fname, _state->path[lex_level - 1]) == 0) { /* done with this field so reset pathok */ if (lex_level < _state->npath) { @@ -1288,7 +1328,7 @@ static void get_object_field_end(void *state, char *fname, bool isnull) int len = _state->lex->prev_token_terminator - _state->result_start; if (isnull && _state->normalize_results) { - _state->tresult = (text *)NULL; + _state->tresult = (text *) NULL; } else { _state->tresult = cstring_to_text_with_len(_state->result_start, len); } @@ -1303,12 +1343,14 @@ static void get_object_field_end(void *state, char *fname, bool isnull) static void get_array_start(void *state) { - GetState *_state = (GetState *)state; - int lex_level = _state->lex->lex_level; + GetState *_state = (GetState *) state; + int lex_level = _state->lex->lex_level; /* json structure check */ if (lex_level == 0 && _state->search_type == JSON_SEARCH_OBJECT) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot extract field from a non-object"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot extract field from a non-object"))); } /* @@ -1322,9 +1364,9 @@ static void get_array_start(void *state) static void get_array_element_start(void *state, bool isnull) { - GetState *_state = (GetState *)state; - bool get_next = false; - int lex_level = _state->lex->lex_level; + GetState *_state = (GetState *) state; + bool get_next = false; + int lex_level = _state->lex->lex_level; if (lex_level == 1 && _state->search_type == JSON_SEARCH_ARRAY) { /* single integer search */ @@ -1332,7 +1374,9 @@ static void get_array_element_start(void *state, bool isnull) if (_state->array_index == _state->search_index) { get_next = true; } - } else if (_state->search_type == JSON_SEARCH_PATH && lex_level <= _state->npath && _state->pathok[lex_level - 1]) { + } else if (_state->search_type == JSON_SEARCH_PATH && + lex_level <= _state->npath && + _state->pathok[lex_level - 1]) { /* * path search, path so far is ok * @@ -1350,11 +1394,13 @@ static void get_array_element_start(void *state, bool isnull) _state->pathok[lex_level] = true; } } + } /* same logic as for objects */ if (get_next) { - if (_state->normalize_results && _state->lex->token_type == JSON_TOKEN_STRING) { + if (_state->normalize_results && + _state->lex->token_type == JSON_TOKEN_STRING) { _state->next_scalar = true; } else { _state->result_start = _state->lex->token_start; @@ -1364,15 +1410,19 @@ static void get_array_element_start(void *state, bool isnull) static void get_array_element_end(void *state, bool isnull) { - GetState *_state = (GetState *)state; - bool get_last = false; - int lex_level = _state->lex->lex_level; + GetState *_state = (GetState *) state; + bool get_last = false; + int lex_level = _state->lex->lex_level; /* same logic as in get_object_end, modified for arrays */ - if (lex_level == 1 && _state->search_type == JSON_SEARCH_ARRAY && _state->array_index == _state->search_index) { + if (lex_level == 1 && _state->search_type == JSON_SEARCH_ARRAY && + _state->array_index == _state->search_index) { get_last = true; - } else if (_state->search_type == JSON_SEARCH_PATH && lex_level <= _state->npath && _state->pathok[lex_level - 1] && - _state->array_level_index[lex_level - 1] == _state->path_level_index[lex_level - 1]) { + } else if (_state->search_type == JSON_SEARCH_PATH && + lex_level <= _state->npath && + _state->pathok[lex_level - 1] && + _state->array_level_index[lex_level - 1] == + _state->path_level_index[lex_level - 1]) { /* done with this element so reset pathok */ if (lex_level < _state->npath) { _state->pathok[lex_level] = false; @@ -1386,7 +1436,7 @@ static void get_array_element_end(void *state, bool isnull) int len = _state->lex->prev_token_terminator - _state->result_start; if (isnull && _state->normalize_results) { - _state->tresult = (text *)NULL; + _state->tresult = (text *) NULL; } else { _state->tresult = cstring_to_text_with_len(_state->result_start, len); } @@ -1395,10 +1445,12 @@ static void get_array_element_end(void *state, bool isnull) static void get_scalar(void *state, char *token, JsonTokenType tokentype) { - GetState *_state = (GetState *)state; + GetState *_state = (GetState *) state; if (_state->lex->lex_level == 0 && _state->search_type != JSON_SEARCH_PATH) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot extract element from a scalar"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot extract element from a scalar"))); } if (_state->next_scalar) { /* a de-escaped text value is wanted, so supply it */ @@ -1406,6 +1458,7 @@ static void get_scalar(void *state, char *token, JsonTokenType tokentype) /* make sure the next call to get_scalar doesn't overwrite it */ _state->next_scalar = false; } + } Datum jsonb_extract_path(PG_FUNCTION_ARGS) @@ -1420,21 +1473,23 @@ Datum jsonb_extract_path_text(PG_FUNCTION_ARGS) static inline Datum get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) { - Jsonb *jb = PG_GETARG_JSONB(0); - ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); - Datum *pathtext = NULL; - bool *pathnulls = NULL; - int npath; - int i; - Jsonb *res = NULL; - bool have_object = false, have_array = false; + Jsonb *jb = PG_GETARG_JSONB(0); + ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); + Datum *pathtext = NULL; + bool *pathnulls = NULL; + int npath; + int i; + Jsonb *res = NULL; + bool have_object = false, + have_array = false; JsonbValue *jbvp = NULL; - JsonbValue tv; + JsonbValue tv; JsonbSuperHeader superHeader; if (array_contains_nulls(path)) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call function with null path elements"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call function with null path elements"))); } deconstruct_array(path, TEXTOID, -1, false, 'i', &pathtext, &pathnulls, &npath); @@ -1451,28 +1506,29 @@ static inline Datum get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) have_array = true; } - superHeader = (JsonbSuperHeader)VARDATA(jb); + superHeader = (JsonbSuperHeader) VARDATA(jb); for (i = 0; i < npath; i++) { if (have_object) { jbvp = findJsonbValueFromSuperHeaderLen(superHeader, JB_FOBJECT, VARDATA_ANY(pathtext[i]), VARSIZE_ANY_EXHDR(pathtext[i])); } else if (have_array) { - long lindex; - uint32 index; - char *indextext = TextDatumGetCString(pathtext[i]); - char *endptr = NULL; + long lindex; + uint32 index; + char *indextext = TextDatumGetCString(pathtext[i]); + char *endptr = NULL; lindex = strtol(indextext, &endptr, 10); if (*endptr != '\0' || lindex > INT_MAX || lindex < 0) { PG_RETURN_NULL(); } - index = (uint32)lindex; + index = (uint32) lindex; jbvp = getIthJsonbValueFromSuperHeader(superHeader, index); } else { if (i == 0) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call extract path from a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call extract path from a scalar"))); } PG_RETURN_NULL(); } @@ -1484,11 +1540,11 @@ static inline Datum get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) } if (jbvp->type == jbvBinary) { - JsonbIterator *it = JsonbIteratorInit(jbvp->binary.data); - int r; + JsonbIterator *it = JsonbIteratorInit(jbvp->binary.data); + int r; r = JsonbIteratorNext(&it, &tv, true); - superHeader = (JsonbSuperHeader)jbvp->binary.data; + superHeader = (JsonbSuperHeader) jbvp->binary.data; have_object = r == WJB_BEGIN_OBJECT; have_array = r == WJB_BEGIN_ARRAY; } else { @@ -1520,9 +1576,9 @@ static inline Datum get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) */ Datum json_array_length(PG_FUNCTION_ARGS) { - text *json = NULL; + text *json = NULL; - AlenState *state = NULL; + AlenState *state = NULL; JsonLexContext *lex = NULL; JsonSemAction *sem = NULL; @@ -1533,7 +1589,7 @@ Datum json_array_length(PG_FUNCTION_ARGS) state->lex = lex; - sem->semstate = (void *)state; + sem->semstate = (void *) state; sem->object_start = alen_object_start; sem->scalar = alen_scalar; sem->array_element_start = alen_array_element_start; @@ -1548,9 +1604,13 @@ Datum jsonb_array_length(PG_FUNCTION_ARGS) Jsonb *jb = PG_GETARG_JSONB(0); if (JB_ROOT_IS_SCALAR(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot get array length of a scalar"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot get array length of a scalar"))); } else if (!JB_ROOT_IS_ARRAY(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot get array length of a non-array"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot get array length of a non-array"))); } PG_RETURN_INT32(JB_ROOT_COUNT(jb)); @@ -1563,27 +1623,31 @@ Datum jsonb_array_length(PG_FUNCTION_ARGS) static void alen_object_start(void *state) { - AlenState *_state = (AlenState *)state; + AlenState *_state = (AlenState *) state; /* json structure check */ if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot get array length of a non-array"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot get array length of a non-array"))); } } static void alen_scalar(void *state, char *token, JsonTokenType tokentype) { - AlenState *_state = (AlenState *)state; + AlenState *_state = (AlenState *) state; /* json structure check */ if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot get array length of a scalar"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot get array length of a scalar"))); } } static void alen_array_element_start(void *state, bool isnull) { - AlenState *_state = (AlenState *)state; + AlenState *_state = (AlenState *) state; /* just count up all the level 1 elements */ if (_state->lex->lex_level == 1) { @@ -1623,57 +1687,68 @@ Datum jsonb_each_text(PG_FUNCTION_ARGS) static inline Datum each_worker_jsonb(FunctionCallInfo fcinfo, bool as_text) { - Jsonb *jb = PG_GETARG_JSONB(0); + Jsonb *jb = PG_GETARG_JSONB(0); ReturnSetInfo *rsi = NULL; Tuplestorestate *tuple_store = NULL; - TupleDesc tupdesc; - TupleDesc ret_tdesc; - MemoryContext old_cxt, tmp_cxt; - bool skipNested = false; + TupleDesc tupdesc; + TupleDesc ret_tdesc; + MemoryContext old_cxt, + tmp_cxt; + bool skipNested = false; JsonbIterator *it = NULL; - JsonbValue v; - int r; + JsonbValue v; + int r; if (!JB_ROOT_IS_OBJECT(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call jsonb_each%s on a non-object", as_text ? "_text" : ""))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_each%s on a non-object", + as_text ? "_text" : ""))); } - rsi = (ReturnSetInfo *)fcinfo->resultinfo; + rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || + if (!rsi || !IsA(rsi, ReturnSetInfo) || + ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || rsi->expectedDesc == NULL) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " - "cannot accept a set"))); - } + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that " + "cannot accept a set"))); + } rsi->returnMode = SFRM_Materialize; if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " - "that cannot accept type record"))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function returning record called in context " + "that cannot accept type record"))); } old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); ret_tdesc = CreateTupleDescCopy(tupdesc); BlessTupleDesc(ret_tdesc); - tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, false, - u_sess->attr.attr_memory.work_mem); + tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, + false, u_sess->attr.attr_memory.work_mem); MemoryContextSwitchTo(old_cxt); - tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "jsonb_each temporary cxt", ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, + "jsonb_each temporary cxt", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); it = JsonbIteratorInit(VARDATA_ANY(jb)); while ((r = JsonbIteratorNext(&it, &v, skipNested)) != WJB_DONE) { skipNested = true; if (r == WJB_KEY) { - text *key = NULL; - HeapTuple tuple; - Datum values[2]; - bool nulls[2] = {false, false}; + text *key = NULL; + HeapTuple tuple; + Datum values[2]; + bool nulls[2] = {false, false}; /* Use the tmp context so we can clean up after each tuple is done */ old_cxt = MemoryContextSwitchTo(tmp_cxt); @@ -1689,7 +1764,7 @@ static inline Datum each_worker_jsonb(FunctionCallInfo fcinfo, bool as_text) if (v.type == jbvNull) { /* a json null is an sql null in text mode */ nulls[1] = true; - values[1] = (Datum)NULL; + values[1] = (Datum) NULL; } else { text *sv = NULL; @@ -1698,16 +1773,16 @@ static inline Datum each_worker_jsonb(FunctionCallInfo fcinfo, bool as_text) sv = cstring_to_text_with_len(v.string.val, v.string.len); } else { /* Turn anything else into a json string */ - StringInfo jtext = makeStringInfo(); - Jsonb *jb = JsonbValueToJsonb(&v); - (void)JsonbToCString(jtext, VARDATA(jb), 2 * v.estSize); + StringInfo jtext = makeStringInfo(); + Jsonb *jb = JsonbValueToJsonb(&v); + (void) JsonbToCString(jtext, VARDATA(jb), 2 * v.estSize); sv = cstring_to_text_with_len(jtext->data, jtext->len); } values[1] = PointerGetDatum(sv); } } else { /* Not in text mode, just return the Jsonb */ - Jsonb *val = JsonbValueToJsonb(&v); + Jsonb *val = JsonbValueToJsonb(&v); values[1] = PointerGetDatum(val); } tuple = heap_form_tuple(ret_tdesc, values, nulls); @@ -1724,46 +1799,52 @@ static inline Datum each_worker_jsonb(FunctionCallInfo fcinfo, bool as_text) PG_RETURN_NULL(); } + static inline Datum each_worker(FunctionCallInfo fcinfo, bool as_text) { - text *json = NULL; + text *json = NULL; JsonLexContext *lex = NULL; - JsonSemAction *sem = NULL; - ReturnSetInfo *rsi = NULL; - MemoryContext old_cxt; - TupleDesc tupdesc; - EachState *state = NULL; + JsonSemAction *sem = NULL; + ReturnSetInfo *rsi = NULL; + MemoryContext old_cxt; + TupleDesc tupdesc; + EachState *state = NULL; json = PG_GETARG_TEXT_P(0); lex = makeJsonLexContext(json, true); state = (EachState *)palloc0(sizeof(EachState)); sem = (JsonSemAction *)palloc0(sizeof(JsonSemAction)); - rsi = (ReturnSetInfo *)fcinfo->resultinfo; + rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || + if (!rsi || !IsA(rsi, ReturnSetInfo) || + ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || rsi->expectedDesc == NULL) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " - "cannot accept a set"))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that " + "cannot accept a set"))); } rsi->returnMode = SFRM_Materialize; - (void)get_call_result_type(fcinfo, NULL, &tupdesc); + (void) get_call_result_type(fcinfo, NULL, &tupdesc); if (tupdesc == NULL) { ereport(ERROR, - (errmodule(MOD_OPT), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("tupdesc should not be NULL value"), - errdetail("N/A"), errcause("An error occurred when obtaining the value of tupdesc."), - erraction("Contact Huawei Engineer."))); + (errmodule(MOD_OPT), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("tupdesc should not be NULL value"), + errdetail("N/A"), + errcause("An error occurred when obtaining the value of tupdesc."), + erraction("Contact Huawei Engineer."))); } /* make these in a sufficiently long-lived memory context */ old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); state->ret_tdesc = CreateTupleDescCopy(tupdesc); BlessTupleDesc(state->ret_tdesc); - state->tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, false, - u_sess->attr.attr_memory.work_mem); + state->tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, + false, u_sess->attr.attr_memory.work_mem); MemoryContextSwitchTo(old_cxt); - sem->semstate = (void *)state; + sem->semstate = (void *) state; sem->array_start = each_array_start; sem->scalar = each_scalar; sem->object_field_start = each_object_field_start; @@ -1771,8 +1852,11 @@ static inline Datum each_worker(FunctionCallInfo fcinfo, bool as_text) state->normalize_results = as_text; state->next_scalar = false; state->lex = lex; - state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "json_each temporary cxt", ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, + "json_each temporary cxt", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); pg_parse_json(lex, sem); MemoryContextDelete(state->tmp_cxt); rsi->setResult = state->tuple_store; @@ -1782,7 +1866,7 @@ static inline Datum each_worker(FunctionCallInfo fcinfo, bool as_text) static void each_object_field_start(void *state, char *fname, bool isnull) { - EachState *_state = (EachState *)state; + EachState *_state = (EachState *) state; /* save a pointer to where the value starts */ if (_state->lex->lex_level == 1) { @@ -1801,13 +1885,13 @@ static void each_object_field_start(void *state, char *fname, bool isnull) static void each_object_field_end(void *state, char *fname, bool isnull) { - EachState *_state = (EachState *)state; + EachState *_state = (EachState *) state; MemoryContext old_cxt; - int len; - text *val = NULL; - HeapTuple tuple; - Datum values[2]; - bool nulls[2] = {false, false}; + int len; + text *val = NULL; + HeapTuple tuple; + Datum values[2]; + bool nulls[2] = {false, false}; /* skip over nested objects */ if (_state->lex->lex_level != 1) { @@ -1821,7 +1905,7 @@ static void each_object_field_end(void *state, char *fname, bool isnull) if (isnull && _state->normalize_results) { nulls[1] = true; - values[1] = (Datum)NULL; + values[1] = (Datum) NULL; } else if (_state->next_scalar) { values[1] = CStringGetTextDatum(_state->normalized_scalar); _state->next_scalar = false; @@ -1841,21 +1925,25 @@ static void each_object_field_end(void *state, char *fname, bool isnull) static void each_array_start(void *state) { - EachState *_state = (EachState *)state; + EachState *_state = (EachState *) state; /* json structure check */ if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot deconstruct an array as an object"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot deconstruct an array as an object"))); } } static void each_scalar(void *state, char *token, JsonTokenType tokentype) { - EachState *_state = (EachState *)state; + EachState *_state = (EachState *) state; /* json structure check */ if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot deconstruct a scalar"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot deconstruct a scalar"))); } /* supply de-escaped value if required */ @@ -1883,28 +1971,37 @@ Datum jsonb_array_elements_text(PG_FUNCTION_ARGS) static inline Datum elements_worker_jsonb(FunctionCallInfo fcinfo, bool as_text) { - Jsonb *jb = PG_GETARG_JSONB(0); - ReturnSetInfo *rsi = NULL; + Jsonb *jb = PG_GETARG_JSONB(0); + ReturnSetInfo *rsi = NULL; Tuplestorestate *tuple_store = NULL; - TupleDesc tupdesc; - TupleDesc ret_tdesc; - MemoryContext old_cxt, tmp_cxt; - bool skipNested = false; - JsonbIterator *it = NULL; - JsonbValue v; - int r; + TupleDesc tupdesc; + TupleDesc ret_tdesc; + MemoryContext old_cxt, + tmp_cxt; + bool skipNested = false; + JsonbIterator *it = NULL; + JsonbValue v; + int r; if (JB_ROOT_IS_SCALAR(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot extract elements from a scalar"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot extract elements from a scalar"))); } else if (!JB_ROOT_IS_ARRAY(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot extract elements from an object"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot extract elements from an object"))); } - rsi = (ReturnSetInfo *)fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || (rsi->allowedModes & SFRM_Materialize) == 0 || rsi->expectedDesc == NULL) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " - "cannot accept a set"))); - } + rsi = (ReturnSetInfo *) fcinfo->resultinfo; + if (!rsi || !IsA(rsi, ReturnSetInfo) || + (rsi->allowedModes & SFRM_Materialize) == 0 || + rsi->expectedDesc == NULL) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that " + "cannot accept a set"))); + } rsi->returnMode = SFRM_Materialize; /* it's a simple type, so don't use get_call_result_type() */ @@ -1916,35 +2013,38 @@ static inline Datum elements_worker_jsonb(FunctionCallInfo fcinfo, bool as_text) tuplestore_begin_heap(rsi->allowedModes & SFRM_Materialize_Random, false, u_sess->attr.attr_memory.work_mem); MemoryContextSwitchTo(old_cxt); - tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "jsonb_each temporary cxt", ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, + "jsonb_each temporary cxt", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); it = JsonbIteratorInit(VARDATA_ANY(jb)); while ((r = JsonbIteratorNext(&it, &v, skipNested)) != WJB_DONE) { skipNested = true; if (r == WJB_ELEM) { - HeapTuple tuple; - Datum values[1]; - bool nulls[1] = {false}; + HeapTuple tuple; + Datum values[1]; + bool nulls[1] = {false}; /* use the tmp context so we can clean up after each tuple is done */ old_cxt = MemoryContextSwitchTo(tmp_cxt); if (!as_text) { - Jsonb *val = JsonbValueToJsonb(&v); + Jsonb *val = JsonbValueToJsonb(&v); values[0] = PointerGetDatum(val); } else { if (v.type == jbvNull) { /* a json null is an sql null in text mode */ nulls[0] = true; - values[0] = (Datum)NULL; + values[0] = (Datum) NULL; } else { - text *sv = NULL; + text *sv = NULL; if (v.type == jbvString) { /* in text mode scalar strings should be dequoted */ sv = cstring_to_text_with_len(v.string.val, v.string.len); } else { /* turn anything else into a json string */ - StringInfo jtext = makeStringInfo(); - Jsonb *jb = JsonbValueToJsonb(&v); - (void)JsonbToCString(jtext, VARDATA(jb), 2 * v.estSize); + StringInfo jtext = makeStringInfo(); + Jsonb *jb = JsonbValueToJsonb(&v); + (void) JsonbToCString(jtext, VARDATA(jb), 2 * v.estSize); sv = cstring_to_text_with_len(jtext->data, jtext->len); } values[0] = PointerGetDatum(sv); @@ -1979,24 +2079,27 @@ Datum json_array_elements_text(PG_FUNCTION_ARGS) static inline Datum elements_worker(FunctionCallInfo fcinfo, bool as_text) { - text *json = PG_GETARG_TEXT_P(0); + text *json = PG_GETARG_TEXT_P(0); /* elements only needs escaped strings when as_text */ JsonLexContext *lex = makeJsonLexContext(json, as_text); JsonSemAction *sem = NULL; ReturnSetInfo *rsi = NULL; MemoryContext old_cxt; - TupleDesc tupdesc; + TupleDesc tupdesc; ElementsState *state = NULL; state = (ElementsState *)palloc0(sizeof(ElementsState)); sem = (JsonSemAction *)palloc0(sizeof(JsonSemAction)); - rsi = (ReturnSetInfo *)fcinfo->resultinfo; + rsi = (ReturnSetInfo *) fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || + if (!rsi || !IsA(rsi, ReturnSetInfo) || + ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || rsi->expectedDesc == NULL) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " - "cannot accept a set"))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that " + "cannot accept a set"))); } rsi->returnMode = SFRM_Materialize; @@ -2007,11 +2110,12 @@ static inline Datum elements_worker(FunctionCallInfo fcinfo, bool as_text) old_cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory); state->ret_tdesc = CreateTupleDescCopy(tupdesc); BlessTupleDesc(state->ret_tdesc); - state->tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, false, - u_sess->attr.attr_memory.work_mem); + state->tuple_store = + tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, + false, u_sess->attr.attr_memory.work_mem); MemoryContextSwitchTo(old_cxt); - sem->semstate = (void *)state; + sem->semstate = (void *) state; sem->object_start = elements_object_start; sem->scalar = elements_scalar; sem->array_element_start = elements_array_element_start; @@ -2019,9 +2123,11 @@ static inline Datum elements_worker(FunctionCallInfo fcinfo, bool as_text) state->normalize_results = as_text; state->next_scalar = false; state->lex = lex; - state->tmp_cxt = - AllocSetContextCreate(CurrentMemoryContext, "json_array_elements temporary cxt", ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, + "json_array_elements temporary cxt", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); pg_parse_json(lex, sem); MemoryContextDelete(state->tmp_cxt); @@ -2032,7 +2138,7 @@ static inline Datum elements_worker(FunctionCallInfo fcinfo, bool as_text) static void elements_array_element_start(void *state, bool isnull) { - ElementsState *_state = (ElementsState *)state; + ElementsState *_state = (ElementsState *) state; /* save a pointer to where the value starts */ if (_state->lex->lex_level == 1) { @@ -2051,12 +2157,12 @@ static void elements_array_element_start(void *state, bool isnull) static void elements_array_element_end(void *state, bool isnull) { - ElementsState *_state = (ElementsState *)state; + ElementsState *_state = (ElementsState *) state; MemoryContext old_cxt; - int len; - text *val = NULL; - HeapTuple tuple; - Datum values[1]; + int len; + text *val = NULL; + HeapTuple tuple; + Datum values[1]; bool nulls[1] = {false}; /* skip over nested objects */ @@ -2069,7 +2175,7 @@ static void elements_array_element_end(void *state, bool isnull) if (isnull && _state->normalize_results) { nulls[0] = true; - values[0] = (Datum)NULL; + values[0] = (Datum) NULL; } else if (_state->next_scalar) { values[0] = CStringGetTextDatum(_state->normalized_scalar); _state->next_scalar = false; @@ -2089,23 +2195,25 @@ static void elements_array_element_end(void *state, bool isnull) static void elements_object_start(void *state) { - ElementsState *_state = (ElementsState *)state; + ElementsState *_state = (ElementsState *) state; /* json structure check */ if (_state->lex->lex_level == 0) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call json_array_elements on a non-array"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_array_elements on a non-array"))); } } static void elements_scalar(void *state, char *token, JsonTokenType tokentype) { - ElementsState *_state = (ElementsState *)state; + ElementsState *_state = (ElementsState *) state; /* json structure check */ if (_state->lex->lex_level == 0) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call json_array_elements on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_array_elements on a scalar"))); } /* supply de-escaped value if required */ @@ -2143,23 +2251,23 @@ Datum json_to_record(PG_FUNCTION_ARGS) static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg) { - Oid argtype; - Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, have_record_arg ? 1 : 0); - text *json = NULL; - Jsonb *jb = NULL; - bool use_json_as_text = false; - HTAB *json_hash = NULL; + Oid argtype; + Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, have_record_arg ? 1 : 0); + text *json = NULL; + Jsonb *jb = NULL; + bool use_json_as_text = false; + HTAB *json_hash = NULL; HeapTupleHeader rec = NULL; - Oid tupType = InvalidOid; - int32 tupTypmod = -1; - TupleDesc tupdesc; + Oid tupType = InvalidOid; + int32 tupTypmod = -1; + TupleDesc tupdesc; HeapTupleData tuple; - HeapTuple rettuple; + HeapTuple rettuple; RecordIOData *my_extra = NULL; - int ncolumns; - int i; - Datum *values = NULL; - bool *nulls = NULL; + int ncolumns; + int i; + Datum *values = NULL; + bool *nulls = NULL; errno_t rc = 0; Assert(jtype == JSONOID || jtype == JSONBOID); @@ -2170,9 +2278,10 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); if (!type_is_rowtype(argtype)) { - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("first argument of json%s_populate_record must be a row type", - jtype == JSONBOID ? "b" : ""))); + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("first argument of json%s_populate_record must be a row type", + jtype == JSONBOID ? "b" : ""))); } if (PG_ARGISNULL(0)) { @@ -2196,17 +2305,18 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re tupTypmod = HeapTupleHeaderGetTypMod(rec); } tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); - } else { /* json{b}_to_record case */ + } else { /* json{b}_to_record case */ use_json_as_text = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1); if (PG_ARGISNULL(0)) { PG_RETURN_NULL(); } if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("function returning record called in context " - "that cannot accept type record"), - errhint("Try calling the function in the FROM clause " - "using a column definition list."))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function returning record called in context " + "that cannot accept type record"), + errhint("Try calling the function in the FROM clause " + "using a column definition list."))); } } @@ -2249,33 +2359,34 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; } - if (have_record_arg && (my_extra->record_type != tupType || my_extra->record_typmod != tupTypmod)) { - rc = memset_s(my_extra, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData), 0, - sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); + if (have_record_arg && (my_extra->record_type != tupType || + my_extra->record_typmod != tupTypmod)) { + rc = memset_s(my_extra, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData), + 0, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); securec_check(rc, "\0", "\0"); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; } - values = (Datum *)palloc(ncolumns * sizeof(Datum)); - nulls = (bool *)palloc(ncolumns * sizeof(bool)); + values = (Datum *) palloc(ncolumns * sizeof(Datum)); + nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (rec) { /* Break down the tuple into fields */ heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { - values[i] = (Datum)0; + values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; - Oid column_type = tupdesc->attrs[i].atttypid; + Oid column_type = tupdesc->attrs[i].atttypid; JsonbValue *v = NULL; - char fname[NAMEDATALEN]; + char fname[NAMEDATALEN]; JsonHashEntry *hashentry = NULL; /* Ignore dropped columns in datatype */ @@ -2291,7 +2402,7 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re securec_check(rc, "\0", "\0"); hashentry = (JsonHashEntry *)hash_search(json_hash, fname, HASH_FIND, NULL); } else { - char *key = NameStr(tupdesc->attrs[i].attname); + char *key = NameStr(tupdesc->attrs[i].attname); v = findJsonbValueFromSuperHeaderLen(VARDATA(jb), JB_FOBJECT, key, strlen(key)); } @@ -2303,7 +2414,8 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re * then every field which we don't populate needs to be run through * the input function just in case it's a domain type. */ - if (((jtype == JSONOID && hashentry == NULL) || (jtype == JSONBOID && v == NULL)) && rec) { + if (((jtype == JSONOID && hashentry == NULL) || + (jtype == JSONBOID && v == NULL)) && rec) { continue; } @@ -2321,11 +2433,11 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ - values[i] = - InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i].atttypmod); + values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, + tupdesc->attrs[i].atttypmod); nulls[i] = true; } else { - char *s = NULL; + char *s = NULL; if (jtype == JSONOID) { /* already done the hard work in the json case */ @@ -2338,9 +2450,9 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re } else if (v->type == jbvNumeric) { s = DatumGetCString(DirectFunctionCall1(numeric_out, PointerGetDatum(v->numeric))); } else if (!use_json_as_text) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot populate with a nested object unless " - "use_json_as_text is true"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot populate with a nested object unless use_json_as_text is true"))); } else if (v->type == jbvBinary) { s = JsonbToCString(NULL, v->binary.data, v->binary.len); } else { @@ -2348,7 +2460,8 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re } } - values[i] = InputFunctionCall(&column_info->proc, s, column_info->typioparam, tupdesc->attrs[i].atttypmod); + values[i] = InputFunctionCall(&column_info->proc, s, + column_info->typioparam, tupdesc->attrs[i].atttypmod); nulls[i] = false; } } @@ -2371,11 +2484,11 @@ static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_re */ static HTAB *get_json_object_as_hash(text *json, char *funcname, bool use_json_as_text) { - HASHCTL ctl; - HTAB *tab = NULL; - JHashState *state = NULL; + HASHCTL ctl; + HTAB *tab = NULL; + JHashState *state = NULL; JsonLexContext *lex = makeJsonLexContext(json, true); - JsonSemAction *sem = NULL; + JsonSemAction *sem = NULL; errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); securec_check(rc, "\0", "\0"); @@ -2392,7 +2505,7 @@ static HTAB *get_json_object_as_hash(text *json, char *funcname, bool use_json_a state->lex = lex; state->use_json_as_text = use_json_as_text; - sem->semstate = (void *)state; + sem->semstate = (void *) state; sem->array_start = hash_array_start; sem->scalar = hash_scalar; sem->object_field_start = hash_object_field_start; @@ -2405,7 +2518,7 @@ static HTAB *get_json_object_as_hash(text *json, char *funcname, bool use_json_a static void hash_object_field_start(void *state, char *fname, bool isnull) { - JHashState *_state = (JHashState *)state; + JHashState *_state = (JHashState *) state; if (_state->lex->lex_level > 1) { return; @@ -2413,8 +2526,10 @@ static void hash_object_field_start(void *state, char *fname, bool isnull) if (_state->lex->token_type == JSON_TOKEN_ARRAY_START || _state->lex->token_type == JSON_TOKEN_OBJECT_START) { if (!_state->use_json_as_text) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call %s on a nested object", _state->function_name))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call %s on a nested object", + _state->function_name))); } _state->save_json_start = _state->lex->token_start; } else { @@ -2425,10 +2540,10 @@ static void hash_object_field_start(void *state, char *fname, bool isnull) static void hash_object_field_end(void *state, char *fname, bool isnull) { - JHashState *_state = (JHashState *)state; + JHashState *_state = (JHashState *) state; JsonHashEntry *hashentry = NULL; - bool found = false; - char name[NAMEDATALEN]; + bool found = false; + char name[NAMEDATALEN]; /* * ignore field names >= NAMEDATALEN - they can't match a record field @@ -2452,7 +2567,7 @@ static void hash_object_field_end(void *state, char *fname, bool isnull) hashentry->isnull = isnull; if (_state->save_json_start != NULL) { - int len = _state->lex->prev_token_terminator - _state->save_json_start; + int len = _state->lex->prev_token_terminator - _state->save_json_start; char *val = (char *)palloc((len + 1) * sizeof(char)); rc = memcpy_s(val, (len + 1) * sizeof(char), _state->save_json_start, len); @@ -2467,21 +2582,23 @@ static void hash_object_field_end(void *state, char *fname, bool isnull) static void hash_array_start(void *state) { - JHashState *_state = (JHashState *)state; + JHashState *_state = (JHashState *) state; if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call %s on an array", _state->function_name))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call %s on an array", _state->function_name))); } } static void hash_scalar(void *state, char *token, JsonTokenType tokentype) { - JHashState *_state = (JHashState *)state; + JHashState *_state = (JHashState *) state; if (_state->lex->lex_level == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call %s on a scalar", _state->function_name))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call %s on a scalar", _state->function_name))); } if (_state->lex->lex_level == 1) { @@ -2489,6 +2606,7 @@ static void hash_scalar(void *state, char *token, JsonTokenType tokentype) } } + /* * SQL function json_populate_recordset * @@ -2506,17 +2624,17 @@ Datum jsonb_populate_recordset(PG_FUNCTION_ARGS) static void make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState *state) { - Datum *values = NULL; - bool *nulls = NULL; - int i; + Datum *values = NULL; + bool *nulls = NULL; + int i; RecordIOData *my_extra = state->my_extra; - int ncolumns = my_extra->ncolumns; - TupleDesc tupdesc = state->ret_tdesc; + int ncolumns = my_extra->ncolumns; + TupleDesc tupdesc = state->ret_tdesc; HeapTupleHeader rec = state->rec; - HeapTuple rettuple; + HeapTuple rettuple; - values = (Datum *)palloc(ncolumns * sizeof(Datum)); - nulls = (bool *)palloc(ncolumns * sizeof(bool)); + values = (Datum *) palloc(ncolumns * sizeof(Datum)); + nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (state->rec) { HeapTupleData tuple; @@ -2530,16 +2648,16 @@ static void make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState * heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { - values[i] = (Datum)0; + values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; - Oid column_type = tupdesc->attrs[i].atttypid; - JsonbValue *v = NULL; - char *key = NULL; + Oid column_type = tupdesc->attrs[i].atttypid; + JsonbValue *v = NULL; + char *key = NULL; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i].attisdropped) { @@ -2574,8 +2692,8 @@ static void make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState * * Need InputFunctionCall to happen even for nulls, so that domain * checks are done */ - values[i] = - InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i].atttypmod); + values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, + tupdesc->attrs[i].atttypmod); nulls[i] = true; } else { char *s = NULL; @@ -2588,8 +2706,8 @@ static void make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState * s = DatumGetCString(DirectFunctionCall1(numeric_out, PointerGetDatum(v->numeric))); } else if (!state->use_json_as_text) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot populate with a nested object unless " - "use_json_as_text is true"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot populate with a nested object unless use_json_as_text is true"))); } else if (v->type == jbvBinary) { s = JsonbToCString(NULL, v->binary.data, v->binary.len); } else { @@ -2620,37 +2738,41 @@ Datum json_to_recordset(PG_FUNCTION_ARGS) */ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg) { - Oid argtype; - Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, have_record_arg ? 1 : 0); - bool use_json_as_text = false; + Oid argtype; + Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, have_record_arg ? 1 : 0); + bool use_json_as_text = false; ReturnSetInfo *rsi = NULL; - MemoryContext old_cxt; - Oid tupType; - int32 tupTypmod; + MemoryContext old_cxt; + Oid tupType; + int32 tupTypmod; HeapTupleHeader rec; - TupleDesc tupdesc; - bool needforget = false; - RecordIOData *my_extra = NULL; - int ncolumns; + TupleDesc tupdesc; + bool needforget = false; + RecordIOData *my_extra = NULL; + int ncolumns; PopulateRecordsetState *state = NULL; if (have_record_arg) { argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); use_json_as_text = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2); if (!type_is_rowtype(argtype)) { - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("first argument of json_populate_recordset must " - "be a row type"))); + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("first argument of json_populate_recordset must be a row type"))); } } else { argtype = InvalidOid; use_json_as_text = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1); } - rsi = (ReturnSetInfo *)fcinfo->resultinfo; - if (!rsi || !IsA(rsi, ReturnSetInfo) || ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || + rsi = (ReturnSetInfo *) fcinfo->resultinfo; + if (!rsi || !IsA(rsi, ReturnSetInfo) || + ((uint32)rsi->allowedModes & SFRM_Materialize) == 0 || rsi->expectedDesc == NULL) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " - "cannot accept a set"))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that " + "cannot accept a set"))); } rsi->returnMode = SFRM_Materialize; /* @@ -2659,8 +2781,10 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have * to_record function which returns a setof record. */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " - "that cannot accept type record"))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function returning record called in context " + "that cannot accept type record"))); } /* if the json is null send back an empty set */ @@ -2671,8 +2795,7 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have if (PG_ARGISNULL(0)) { rec = NULL; } else { - /* using the arg tupdesc, because it may not be the same as the result - * tupdesc. */ + /* using the arg tupdesc, because it may not be the same as the result tupdesc. */ rec = PG_GETARG_HEAPTUPLEHEADER(0); tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(rec), HeapTupleHeaderGetTypMod(rec)); needforget = true; @@ -2692,11 +2815,11 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have * We arrange to look up the needed I/O info just once per series of * calls, assuming the record type doesn't change underneath us. */ - my_extra = (RecordIOData *)fcinfo->flinfo->fn_extra; + my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || my_extra->ncolumns != ncolumns) { - fcinfo->flinfo->fn_extra = MemoryContextAlloc( - fcinfo->flinfo->fn_mcxt, sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); - my_extra = (RecordIOData *)fcinfo->flinfo->fn_extra; + fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, + sizeof(RecordIOData) - sizeof(ColumnIOData) + ncolumns * sizeof(ColumnIOData)); + my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; } @@ -2717,8 +2840,8 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have DecrTupleDescRefCount(tupdesc); } BlessTupleDesc(state->ret_tdesc); - state->tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, false, - u_sess->attr.attr_memory.work_mem); + state->tuple_store = tuplestore_begin_heap((uint32)rsi->allowedModes & SFRM_Materialize_Random, + false, u_sess->attr.attr_memory.work_mem); MemoryContextSwitchTo(old_cxt); state->my_extra = my_extra; @@ -2727,13 +2850,13 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have state->fn_mcxt = fcinfo->flinfo->fn_mcxt; if (jtype == JSONOID) { - text *json = PG_GETARG_TEXT_P(have_record_arg ? 1 : 0); + text *json = PG_GETARG_TEXT_P(have_record_arg ? 1 : 0); JsonLexContext *lex = NULL; - JsonSemAction *sem = NULL; + JsonSemAction *sem = NULL; sem = (JsonSemAction *)palloc0(sizeof(JsonSemAction)); lex = makeJsonLexContext(json, true); - sem->semstate = (void *)state; + sem->semstate = (void *) state; sem->array_start = populate_recordset_array_start; sem->array_element_start = populate_recordset_array_element_start; sem->scalar = populate_recordset_scalar; @@ -2745,27 +2868,28 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have pg_parse_json(lex, sem); } else { - Jsonb *jb = NULL; + Jsonb *jb = NULL; JsonbIterator *it = NULL; - JsonbValue v; - bool skipNested = false; - int r; + JsonbValue v; + bool skipNested = false; + int r; Assert(jtype == JSONBOID); jb = PG_GETARG_JSONB(have_record_arg ? 1 : 0); if (JB_ROOT_IS_SCALAR(jb) || !JB_ROOT_IS_ARRAY(jb)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call jsonb_populate_recordset on non-array"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call jsonb_populate_recordset on non-array"))); } it = JsonbIteratorInit(VARDATA_ANY(jb)); while ((r = JsonbIteratorNext(&it, &v, skipNested)) != WJB_DONE) { skipNested = true; if (r == WJB_ELEM) { - Jsonb *element = JsonbValueToJsonb(&v); + Jsonb *element = JsonbValueToJsonb(&v); if (!JB_ROOT_IS_OBJECT(element)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("jsonb_populate_recordset argument must be an " - "array of objects"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("jsonb_populate_recordset argument must be an array of objects"))); } make_row_from_rec_and_jsonb(element, state); } @@ -2778,16 +2902,18 @@ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have static void populate_recordset_object_start(void *state) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; - int lex_level = _state->lex->lex_level; - HASHCTL ctl; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; + int lex_level = _state->lex->lex_level; + HASHCTL ctl; if (lex_level == 0) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call json_populate_recordset on an object"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_populate_recordset on an object"))); } else if (lex_level > 1 && !_state->use_json_as_text) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call json_populate_recordset with nested objects"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_populate_recordset with nested objects"))); } /* set up a new hash for this entry */ @@ -2801,24 +2927,24 @@ static void populate_recordset_object_start(void *state) static void populate_recordset_object_end(void *state) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; - HTAB *json_hash = _state->json_hash; - Datum *values = NULL; - bool *nulls = NULL; - char fname[NAMEDATALEN]; - int i; - RecordIOData *my_extra = _state->my_extra; - int ncolumns = my_extra->ncolumns; - TupleDesc tupdesc = _state->ret_tdesc; - JsonHashEntry *hashentry = NULL; - HeapTupleHeader rec = _state->rec; - HeapTuple rettuple; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; + HTAB *json_hash = _state->json_hash; + Datum *values = NULL; + bool *nulls = NULL; + char fname[NAMEDATALEN]; + int i; + RecordIOData *my_extra = _state->my_extra; + int ncolumns = my_extra->ncolumns; + TupleDesc tupdesc = _state->ret_tdesc; + JsonHashEntry *hashentry = NULL; + HeapTupleHeader rec = _state->rec; + HeapTuple rettuple; if (_state->lex->lex_level > 1) { return; } - values = (Datum *)palloc(ncolumns * sizeof(Datum)); - nulls = (bool *)palloc(ncolumns * sizeof(bool)); + values = (Datum *) palloc(ncolumns * sizeof(Datum)); + nulls = (bool *) palloc(ncolumns * sizeof(bool)); if (_state->rec) { HeapTupleData tuple; /* Build a temporary HeapTuple control structure */ @@ -2830,15 +2956,15 @@ static void populate_recordset_object_end(void *state) heap_deform_tuple(&tuple, tupdesc, values, nulls); } else { for (i = 0; i < ncolumns; ++i) { - values[i] = (Datum)0; + values[i] = (Datum) 0; nulls[i] = true; } } for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; - Oid column_type = tupdesc->attrs[i].atttypid; - char *value = NULL; + Oid column_type = tupdesc->attrs[i].atttypid; + char *value = NULL; /* Ignore dropped columns in datatype */ if (tupdesc->attrs[i].attisdropped) { @@ -2877,13 +3003,13 @@ static void populate_recordset_object_end(void *state) * need InputFunctionCall to happen even for nulls, so that domain * checks are done */ - values[i] = - InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, tupdesc->attrs[i].atttypmod); + values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, + tupdesc->attrs[i].atttypmod); nulls[i] = true; } else { value = hashentry->val; - values[i] = - InputFunctionCall(&column_info->proc, value, column_info->typioparam, tupdesc->attrs[i].atttypmod); + values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, + tupdesc->attrs[i].atttypmod); nulls[i] = false; } } @@ -2895,30 +3021,34 @@ static void populate_recordset_object_end(void *state) static void populate_recordset_array_element_start(void *state, bool isnull) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; - if (_state->lex->lex_level == 1 && _state->lex->token_type != JSON_TOKEN_OBJECT_START) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("must call json_populate_recordset on an array of objects"))); + if (_state->lex->lex_level == 1 && + _state->lex->token_type != JSON_TOKEN_OBJECT_START) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("must call json_populate_recordset on an array of objects"))); } } static void populate_recordset_array_start(void *state) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; if (_state->lex->lex_level != 0 && !_state->use_json_as_text) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call json_populate_recordset with nested arrays"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_populate_recordset with nested arrays"))); } } static void populate_recordset_scalar(void *state, char *token, JsonTokenType tokentype) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; if (_state->lex->lex_level == 0) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot call json_populate_recordset on a scalar"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_populate_recordset on a scalar"))); } if (_state->lex->lex_level == 2) { @@ -2928,7 +3058,7 @@ static void populate_recordset_scalar(void *state, char *token, JsonTokenType to static void populate_recordset_object_field_start(void *state, char *fname, bool isnull) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; if (_state->lex->lex_level > 2) { return; @@ -2936,8 +3066,9 @@ static void populate_recordset_object_field_start(void *state, char *fname, bool if (_state->lex->token_type == JSON_TOKEN_ARRAY_START || _state->lex->token_type == JSON_TOKEN_OBJECT_START) { if (!_state->use_json_as_text) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call json_populate_recordset on a nested object"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot call json_populate_recordset on a nested object"))); } _state->save_json_start = _state->lex->token_start; } else { @@ -2947,10 +3078,10 @@ static void populate_recordset_object_field_start(void *state, char *fname, bool static void populate_recordset_object_field_end(void *state, char *fname, bool isnull) { - PopulateRecordsetState *_state = (PopulateRecordsetState *)state; - JsonHashEntry *hashentry = NULL; - bool found = false; - char name[NAMEDATALEN]; + PopulateRecordsetState *_state = (PopulateRecordsetState *) state; + JsonHashEntry *hashentry = NULL; + bool found = false; + char name[NAMEDATALEN]; /* * ignore field names >= NAMEDATALEN - they can't match a record field @@ -2972,7 +3103,7 @@ static void populate_recordset_object_field_end(void *state, char *fname, bool i */ hashentry->isnull = isnull; if (_state->save_json_start != NULL) { - int len = _state->lex->prev_token_terminator - _state->save_json_start; + int len = _state->lex->prev_token_terminator - _state->save_json_start; char *val = (char *)palloc((len + 1) * sizeof(char)); errno_t rc = memcpy_s(val, (len + 1) * sizeof(char), _state->save_json_start, len); securec_check(rc, "\0", "\0"); @@ -2989,7 +3120,7 @@ static void populate_recordset_object_field_end(void *state, char *fname, bool i */ static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader, uint32 flags, char *key, uint32 keylen) { - JsonbValue k; + JsonbValue k; k.type = jbvString; k.string.val = key; @@ -2998,6 +3129,637 @@ static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader, ui return findJsonbValueFromSuperHeader(sheader, flags, NULL, &k); } +static void +push_null_elements(JsonbParseState **ps, int num) +{ + JsonbValue null; + + null.type = jbvNull; + + while (num-- > 0) + pushJsonbValue(ps, WJB_ELEM, &null); +} + +static void +addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb) +{ + JsonbIterator *it; + JsonbValue *o = &(*jbps)->contVal; + int type; + JsonbValue v; + + it = JsonbIteratorInit(VARDATA(jb)); + + Assert(o->type == jbvArray || o->type == jbvObject); + + if(JB_ROOT_IS_SCALAR(jb)) { + (void) JsonbIteratorNext(&it, &v, false); + (void) JsonbIteratorNext(&it, &v, false); + + switch(o->type) + { + case jbvArray: + (void)pushJsonbValue(jbps, WJB_ELEM, &v); + break; + case jbvObject: + (void)pushJsonbValue(jbps, WJB_VALUE, &v); + break; + default: + elog(ERROR, "unexpected parent oe nested structure."); + } + } else { + while((type == JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { + if (type = WJB_ELEM || type == WJB_KEY || type == WJB_VALUE) { + (void)pushJsonbValue(jbps, type, &v); + } else { + (void)pushJsonbValue(jbps, type, NULL); + } + } + } +} + +/* + * Do most of the heavy work for jsonb_set/jsonb_insert + * + * If JB_PATH_DELETE bit is set in op_type, the element is to be removed. + * + * If any bit mentioned in JB_PATH_CREATE_OR_INSERT is set in op_type, + * we create the new value if the key or array index does not exist. + * + * Bits JB_PATH_INSERT_BEFORE and JB_PATH_INSERT_AFTER in op_type + * behave as JB_PATH_CREATE if new value is inserted in JsonbObject. + * + * If JB_PATH_FILL_GAPS bit is set, this will change an assignment logic in + * case if target is an array. The assignment index will not be restricted by + * number of elements in the array, and if there are any empty slots between + * last element of the array and a new one they will be filled with nulls. If + * the index is negative, it still will be considered an index from the end + * of the array. Of a part of the path is not present and this part is more + * than just one last element, this flag will instruct to create the whole + * chain of corresponding objects and insert the value. + * + * JB_PATH_CONSISTENT_POSITION for an array indicates that the caller wants to + * keep values with fixed indices. Indices for existing elements could be + * changed (shifted forward) in case if the array is prepended with a new value + * and a negative index out of the range, so this behavior will be prevented + * and return an error. + * + * All path elements before the last must already exist + * whatever bits in op_type are set, or nothing is done. + */ +static JsonbValue* setPath(JsonbIterator **it, Datum *path_elems, bool *path_nulls, int path_len, + JsonbParseState **st, int level, Jsonb *newval, int op_type) +{ + JsonbValue v; + int r; + JsonbValue *res; + + check_stack_depth(); + + if (path_nulls[level]) + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("path element at position %d is null", level + 1))); + + r = JsonbIteratorNext(it, &v, false); + + switch (r) { + case WJB_BEGIN_ARRAY: + + /* + * If instructed complain about attempts to replace within a raw + * scalar value. This happens even when current level is equal to + * path_len, because the last path key should also correspond to + * an object or an array, not raw scalar. + */ + if ((op_type & JB_PATH_FILL_GAPS) && (level <= path_len - 1) && v.array.rawScalar) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot replace existing key"), + errdetail("The path assumes key is a composite object, " + "but it is a scalar value."))); + + (void) pushJsonbValue(st, r, NULL); + setPathArray(it, path_elems, path_nulls, path_len, st, level, newval, v.array.nElems, op_type); + r = JsonbIteratorNext(it, &v, false); + Assert(r == WJB_END_ARRAY); + res = pushJsonbValue(st, r, NULL); + break; + case WJB_BEGIN_OBJECT: + (void) pushJsonbValue(st, r, NULL); + setPathObject(it, path_elems, path_nulls, path_len, st, level, newval, v.object.nPairs, op_type); + r = JsonbIteratorNext(it, &v, true); + Assert(r == WJB_END_OBJECT); + res = pushJsonbValue(st, r, NULL); + break; + case WJB_ELEM: + case WJB_VALUE: + + /* + * If instructed complain about attempts to replace within a + * scalar value. This happens even when current level is equal to + * path_len, because the last path key should also correspond to + * an object or an array, not an element or value. + */ + if ((op_type & JB_PATH_FILL_GAPS) && (level <= path_len - 1)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot replace existing key"), + errdetail("The path assumes key is a composite object, but it is a scalar value."))); + + res = pushJsonbValue(st, r, &v); + break; + default: + elog(ERROR, "unrecognized iterator result: %d", (int) r); + res = NULL; /* keep compiler quiet */ + break; + } + + return res; +} + +/* + * Object walker for setPath + */ +static void setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, + int path_len, JsonbParseState **st, int level, Jsonb *newval, uint32 npairs, int op_type) +{ + text *pathelem = NULL; + int i; + JsonbValue k, v; + bool done = false; + + if (level >= path_len || path_nulls[level]) { + done = true; + } else { + /* The path Datum could be toasted, in which case we must detoast it */ + pathelem = DatumGetTextPP(path_elems[level]); + } + + /* empty object is a special case for create */ + if ((npairs == 0) && (op_type & JB_PATH_CREATE_OR_INSERT) && (level == path_len - 1)) { + JsonbValue newkey; + + newkey.type = jbvString; + newkey.string.val = VARDATA_ANY(pathelem); + newkey.string.len = VARSIZE_ANY_EXHDR(pathelem); + + (void) pushJsonbValue(st, WJB_KEY, &newkey); + addJsonbToParseState(st, newval); + } + + for (i = 0; i < npairs; i++) { + int r = JsonbIteratorNext(it, &k, true); + + Assert(r == WJB_KEY); + + if (!done && k.string.len == VARSIZE_ANY_EXHDR(pathelem) && + memcmp(k.string.val, VARDATA_ANY(pathelem), k.string.len) == 0) { + done = true; + + if (level == path_len - 1) { + /* + * called from jsonb_insert(), it forbids redefining an + * existing value + */ + if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot replace existing key"), + errhint("Try using the function jsonb_set " + "to replace key value."))); + + r = JsonbIteratorNext(it, &v, true); /* skip value */ + if (!(op_type & JB_PATH_DELETE)) { + (void) pushJsonbValue(st, WJB_KEY, &k); + addJsonbToParseState(st, newval); + } + } else { + (void) pushJsonbValue(st, r, &k); + setPath(it, path_elems, path_nulls, path_len, + st, level + 1, newval, op_type); + } + } else { + if ((op_type & JB_PATH_CREATE_OR_INSERT) && !done && level == path_len - 1 && i == npairs - 1) { + JsonbValue newkey; + + newkey.type = jbvString; + newkey.string.val = VARDATA_ANY(pathelem); + newkey.string.len = VARSIZE_ANY_EXHDR(pathelem); + + (void) pushJsonbValue(st, WJB_KEY, &newkey); + addJsonbToParseState(st, newval); + } + + (void) pushJsonbValue(st, r, &k); + r = JsonbIteratorNext(it, &v, false); + (void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT) { + int walking_level = 1; + + while (walking_level != 0) { + r = JsonbIteratorNext(it, &v, false); + + if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT) + ++walking_level; + if (r == WJB_END_ARRAY || r == WJB_END_OBJECT) + --walking_level; + + (void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + } + } + } + } +} + +/* +* Array walker for setPath +*/ +static void setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, + int path_len, JsonbParseState **st, int level, Jsonb *newval, uint32 nelems, int op_type) +{ + JsonbValue v; + int idx, + i; + bool done = false; + + /* pick correct index */ + if (level < path_len && !path_nulls[level]) { + char *c = TextDatumGetCString(path_elems[level]); + char *badp; + long val; + + errno = 0; + val = strtol(c, &badp, 10); + if (errno != 0 || badp == c || badp[0] != '\0' || val > INT_MAX || + val < INT_MIN) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("path element at position %d is not an integer: \"%s\"", + level + 1, c))); + idx = val; + } else + idx = nelems; + + if (idx < 0) { + if (-idx > nelems) { + /* + * If asked to keep elements position consistent, it's not allowed + * to prepend the array. + */ + if (op_type & JB_PATH_CONSISTENT_POSITION) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("path element at position %d is out of range: %d", + level + 1, idx))); + else + idx = INT_MIN; + } else + idx = nelems + idx; + } + + /* + * Filling the gaps means there are no limits on the positive index are + * imposed, we can set any element. Otherwise limit the index by nelems. + */ + if (!(op_type & JB_PATH_FILL_GAPS)) { + if (idx > 0 && idx > nelems) + idx = nelems; + } + + /* + * if we're creating, and idx == INT_MIN, we prepend the new value to the + * array also if the array is empty - in which case we don't really care + * what the idx value is + */ + if ((idx == INT_MIN || nelems == 0) && (level == path_len - 1) && (op_type & JB_PATH_CREATE_OR_INSERT)) { + Assert(newval != NULL); + addJsonbToParseState(st, newval); + done = true; + } + + /* iterate over the array elements */ + for (i = 0; i < nelems; i++) { + int r; + + if (i == idx && level < path_len) { + done = true; + + if (level == path_len - 1) { + r = JsonbIteratorNext(it, &v, true); /* skip */ + + if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_CREATE)) + addJsonbToParseState(st, newval); + + /* + * We should keep current value only in case of + * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because + * otherwise it should be deleted or replaced + */ + if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE)) + (void) pushJsonbValue(st, r, &v); + + if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_REPLACE)) + addJsonbToParseState(st, newval); + } else + (void) setPath(it, path_elems, path_nulls, path_len, st, level + 1, newval, op_type); + } else { + r = JsonbIteratorNext(it, &v, false); + + (void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + + if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT) { + int walking_level = 1; + + while (walking_level != 0) { + r = JsonbIteratorNext(it, &v, false); + + if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT) + ++walking_level; + if (r == WJB_END_ARRAY || r == WJB_END_OBJECT) + --walking_level; + + (void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + } + } + } + } + + if ((op_type & JB_PATH_CREATE_OR_INSERT) && !done && level == path_len - 1) { + /* + * If asked to fill the gaps, idx could be bigger than nelems, so + * prepend the new element with nulls if that's the case. + */ + if (op_type & JB_PATH_FILL_GAPS && idx > nelems) + push_null_elements(st, idx - nelems); + + addJsonbToParseState(st, newval); + done = true; + } +} + +/* +* SQL function jsonb_insert(jsonb, text[], jsonb, boolean) +*/ +Datum jsonb_insert(PG_FUNCTION_ARGS) +{ + Jsonb *in = PG_GETARG_JSONB(0); + ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); + Jsonb *newjsonb = PG_GETARG_JSONB(2); + bool after = PG_GETARG_BOOL(3); + JsonbValue *res = NULL; + Datum *path_elems; + bool *path_nulls; + int path_len; + JsonbIterator *it; + JsonbParseState *st = NULL; + + if (ARR_NDIM(path) > 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); + + if (JB_ROOT_IS_SCALAR(in)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot set path in scalar"))); + + deconstruct_array(path, TEXTOID, -1, false, 'i', &path_elems, &path_nulls, &path_len); + + if (path_len == 0) + PG_RETURN_JSONB(in); + + it = JsonbIteratorInit(VARDATA(in)); + + res = setPath(&it, path_elems, path_nulls, path_len, &st, 0, newjsonb, + after ? JB_PATH_INSERT_AFTER : JB_PATH_INSERT_BEFORE); + + Assert(res != NULL); + + PG_RETURN_JSONB(JsonbValueToJsonb(res)); +} + +/* +* SQL function jsonb_delete (jsonb, text) +* +* return a copy of the jsonb with the indicated item +* removed. +*/ +Datum jsonb_delete(PG_FUNCTION_ARGS) +{ + Jsonb *in = PG_GETARG_JSONB(0); + text *key = PG_GETARG_TEXT_PP(1); + char *keyptr = VARDATA_ANY(key); + int keylen = VARSIZE_ANY_EXHDR(key); + JsonbParseState *state = NULL; + JsonbIterator *it; + JsonbValue v, *res = NULL; + bool skipNested = false; + int r; + + if (JB_ROOT_IS_SCALAR(in)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot delete from scalar"))); + + if (JB_ROOT_COUNT(in) == 0) + PG_RETURN_JSONB(in); + + it = JsonbIteratorInit(VARDATA(in)); + + while ((r = JsonbIteratorNext(&it, &v, skipNested)) != WJB_DONE) { + skipNested = true; + + if ((r == WJB_ELEM || r == WJB_KEY) && (v.type == jbvString && keylen == v.string.len && + memcmp(keyptr, v.string.val, keylen) == 0)) { + /* skip corresponding value as well */ + if (r == WJB_KEY) + (void) JsonbIteratorNext(&it, &v, true); + + continue; + } + + res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + } + + Assert(res != NULL); + + PG_RETURN_JSONB(JsonbValueToJsonb(res)); +} + +/* +* SQL function jsonb_delete (jsonb, variadic text[]) +* +* return a copy of the jsonb with the indicated items +* removed. +*/ +Datum jsonb_delete_array(PG_FUNCTION_ARGS) +{ + Jsonb *in = PG_GETARG_JSONB(0); + ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1); + Datum *keys_elems; + bool *keys_nulls; + int keys_len; + JsonbParseState *state = NULL; + JsonbIterator *it; + JsonbValue v, *res = NULL; + bool skipNested = false; + int r; + + if (ARR_NDIM(keys) > 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); + + if (JB_ROOT_IS_SCALAR(in)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot delete from scalar"))); + + if (JB_ROOT_COUNT(in) == 0) + PG_RETURN_JSONB(in); + + deconstruct_array(keys, TEXTOID, -1, false, 'i', &keys_elems, &keys_nulls, &keys_len); + + if (keys_len == 0) + PG_RETURN_JSONB(in); + + it = JsonbIteratorInit(VARDATA(in)); + + while ((r = JsonbIteratorNext(&it, &v, skipNested)) != WJB_DONE) { + skipNested = true; + + if ((r == WJB_ELEM || r == WJB_KEY) && v.type == jbvString) { + int i; + bool found = false; + + for (i = 0; i < keys_len; i++) { + char *keyptr; + int keylen; + + if (keys_nulls[i]) + continue; + + /* We rely on the array elements not being toasted */ + keyptr = VARDATA_ANY(keys_elems[i]); + keylen = VARSIZE_ANY_EXHDR(keys_elems[i]); + if (keylen == v.string.len && + memcmp(keyptr, v.string.val, keylen) == 0) + { + found = true; + break; + } + } + if (found) { + /* skip corresponding value as well */ + if (r == WJB_KEY) + (void) JsonbIteratorNext(&it, &v, true); + + continue; + } + } + + res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + } + + Assert(res != NULL); + + PG_RETURN_JSONB(JsonbValueToJsonb(res)); +} + +/* +* SQL function jsonb_delete (jsonb, int) +* +* return a copy of the jsonb with the indicated item +* removed. Negative int means count back from the +* end of the items. +*/ +Datum jsonb_delete_idx(PG_FUNCTION_ARGS) +{ + Jsonb *in = PG_GETARG_JSONB(0); + int idx = PG_GETARG_INT32(1); + JsonbParseState *state = NULL; + JsonbIterator *it; + uint32 i = 0, + n; + JsonbValue v, + *res = NULL; + int r; + + if (JB_ROOT_IS_SCALAR(in)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot delete from scalar"))); + + if (JB_ROOT_IS_OBJECT(in)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot delete from object using integer index"))); + + if (JB_ROOT_COUNT(in) == 0) + PG_RETURN_JSONB(in); + + it = JsonbIteratorInit(VARDATA(in)); + + r = JsonbIteratorNext(&it, &v, false); + Assert(r == WJB_BEGIN_ARRAY); + n = v.array.nElems; + + if (idx < 0) { + if (-idx > n) + idx = n; + else + idx = n + idx; + } + + if (idx >= n) + PG_RETURN_JSONB(in); + + pushJsonbValue(&state, r, NULL); + + while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE) + { + if (r == WJB_ELEM) { + if (i++ == idx) + continue; + } + + res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + } + + Assert(res != NULL); + + PG_RETURN_JSONB(JsonbValueToJsonb(res)); +} + +/* +* SQL function jsonb_set(jsonb, text[], jsonb, boolean) +*/ +Datum jsonb_set(PG_FUNCTION_ARGS) +{ + Jsonb *in = PG_GETARG_JSONB(0); + ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); + Jsonb *newjsonb = PG_GETARG_JSONB(2); + bool create = PG_GETARG_BOOL(3); + JsonbValue *res = NULL; + Datum *path_elems; + bool *path_nulls; + int path_len; + JsonbIterator *it; + JsonbParseState *st = NULL; + + if (ARR_NDIM(path) > 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("wrong number of array subscripts"))); + + if (JB_ROOT_IS_SCALAR(in)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot set path in scalar"))); + + if (JB_ROOT_COUNT(in) == 0 && !create) + PG_RETURN_JSONB(in); + + deconstruct_array(path, TEXTOID, -1, false, 'i', &path_elems, &path_nulls, &path_len); + + if (path_len == 0) + PG_RETURN_JSONB(in); + + it = JsonbIteratorInit(VARDATA(in)); + + res = setPath(&it, path_elems, path_nulls, path_len, &st, 0, newjsonb, create ? JB_PATH_CREATE : JB_PATH_REPLACE); + + Assert(res != NULL); + + PG_RETURN_JSONB(JsonbValueToJsonb(res)); +} + #ifdef DOLPHIN static cJSON *input_to_cjson(Oid valtype, const char *funcName, int pos, Datum arg) { diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index 932d4c6bf..ffadfad3e 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -14760,10 +14760,13 @@ Datum dss_io_stat(PG_FUNCTION_ARGS) if (duration > MAX_DURATION_TIME) { ereport(ERROR, (errmsg("The duration is too long, and it must be less than 60s."))); } + if (duration <= 0) { + ereport(ERROR, (errmsg("The duration must be greater than zero."))); + } init_dss_io_stat(); unsigned long long read_bytes = 0; unsigned long long write_bytes = 0; - int io_count = 0; + unsigned int io_count = 0; get_dss_io_stat(duration, &read_bytes, &write_bytes, &io_count); // tuple header int i = 1; @@ -14778,7 +14781,7 @@ Datum dss_io_stat(PG_FUNCTION_ARGS) i = 0; values[i++] = UInt64GetDatum(read_bytes); values[i++] = UInt64GetDatum(write_bytes); - values[i] = Int32GetDatum(io_count); + values[i] = UInt32GetDatum(io_count); HeapTuple heap_tuple = heap_form_tuple(tupdesc, values, nulls); result = HeapTupleGetDatum(heap_tuple); @@ -14914,7 +14917,6 @@ Datum gs_get_index_status(PG_FUNCTION_ARGS) } SRF_RETURN_DONE(funcctx); } - #endif TupleDesc create_query_node_reform_info_tupdesc() diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index 8491cc6bf..06bc25719 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -5442,6 +5442,10 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) /* index_tlist is set only if it's an IndexOnlyScan */ if (IsA(ps->plan, IndexOnlyScan)) dpns->index_tlist = ((IndexOnlyScan*)ps->plan)->indextlist; +#ifdef USE_SPQ + else if IsA(ps->plan, SpqIndexOnlyScan) + dpns->index_tlist = ((IndexOnlyScan*)ps->plan)->indextlist; +#endif else if (IsA(ps->plan, ForeignScan)) dpns->index_tlist = ((ForeignScan *)ps->plan)->fdw_scan_tlist; else if (IsA(ps->plan, ExtensiblePlan)) @@ -6843,7 +6847,23 @@ static void get_setop_query(Node* setOp, Query* query, deparse_context* context, if (context->qrw_phase) get_setop_query(subquery->setOperations, subquery, context, resultDesc); else - Assert(false); + get_query_def(subquery, + buf, + context->namespaces, + resultDesc, + context->prettyFlags, + context->wrapColumn, + context->indentLevel +#ifdef PGXC + , + context->finalise_aggs, + context->sortgroup_colno, + context->parser_arg +#endif /* PGXC */ + , + context->qrw_phase, + context->viewdef, + context->is_fqs); } if (need_paren) @@ -7052,6 +7072,24 @@ static void get_rule_groupingset(GroupingSet* gset, List* targetlist, deparse_co appendStringInfoString(buf, ")"); } +static void get_rule_separator(Const* con, StringInfo buf) +{ + Oid typoutput; + char* extval = NULL; + bool typIsVarlena = false; + + appendStringInfoString(buf, "\'"); + if (u_sess->exec_cxt.under_auto_explain) { + appendStringInfoString(buf, "***"); + } else if (!con->constisnull) { + getTypeOutputInfo(con->consttype, &typoutput, &typIsVarlena); + extval = OidOutputFunctionCall(typoutput, con->constvalue); + appendStringInfoString(buf, extval); + pfree_ext(extval); + } + appendStringInfoChar(buf, '\''); +} + /* * Display an ORDER BY list. */ @@ -10891,18 +10929,9 @@ static void get_agg_expr(Aggref* aggref, deparse_context* context) } if (pg_strcasecmp(funcname, "group_concat") == 0) { - Oid typoutput; - char* extval = NULL; - bool typIsVarlena = false; - /* parse back the first argument as separator */ - TargetEntry* tle = (TargetEntry*)lfirst(list_head(aggref->args)); - getTypeOutputInfo(((Const*)tle->expr)->consttype, &typoutput, &typIsVarlena); - extval = OidOutputFunctionCall(typoutput, ((Const*)tle->expr)->constvalue); - - appendStringInfoString(buf, " SEPARATOR '"); - appendStringInfoString(buf, extval); - appendStringInfoChar(buf, '\''); - pfree_ext(extval); + appendStringInfoString(buf, " SEPARATOR "); + Const* con = (Const*)(((TargetEntry*)lfirst(list_head(aggref->args)))->expr); + get_rule_separator(con, buf); } } diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 79e6f8cc2..dcca1bd63 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -2919,8 +2919,12 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object if (!IsInitdb && (relkind == RELKIND_RELATION) && !IsSystemNamespace(namespaceId) && !IsCStoreNamespace(namespaceId) && (pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) && (stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT) && !u_sess->attr.attr_storage.enable_recyclebin) { - if (u_sess->attr.attr_storage.enable_segment || bucketinfo != NULL) { + bool isSegmentType = (storage_type == SEGMENT_PAGE); + if (!isSegmentType && (u_sess->attr.attr_storage.enable_segment || bucketinfo != NULL)) { storage_type = SEGMENT_PAGE; + DefElem *storage_def = makeDefElem("segment", (Node *)makeString("on")); + stmt->options = lappend(stmt->options, storage_def); + reloptions = transformRelOptions((Datum)0, stmt->options, NULL, validnsps, true, false); } } else if (storage_type == SEGMENT_PAGE) { if (u_sess->attr.attr_storage.enable_recyclebin) { @@ -2963,6 +2967,12 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object } } + if (!IsInitdb && u_sess->attr.attr_storage.enable_segment && storage_type == SEGMENT_PAGE && + !CheckSegmentStorageOption(stmt->options)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Only support segment storage type while parameter enable_segment is ON."))); + } + /* * Create the relation. Inherited defaults and constraints are passed in * for immediate handling --- since they don't need parsing, they can be @@ -10038,12 +10048,12 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat newslot = MakeSingleTupleTableSlot(newTupDesc, false, oldrel->rd_tam_ops); /* Preallocate values/isnull arrays */ - i = Max(newTupDesc->natts, oldTupDesc->natts); - values = (Datum*)palloc(i * sizeof(Datum)); - isnull = (bool*)palloc(i * sizeof(bool)); - rc = memset_s(values, i * sizeof(Datum), 0, i * sizeof(Datum)); + int n = Max(newTupDesc->natts, oldTupDesc->natts); + values = (Datum*)palloc(n * sizeof(Datum)); + isnull = (bool*)palloc(n * sizeof(bool)); + rc = memset_s(values, n * sizeof(Datum), 0, n * sizeof(Datum)); securec_check(rc, "\0", "\0"); - rc = memset_s(isnull, i * sizeof(bool), true, i * sizeof(bool)); + rc = memset_s(isnull, n * sizeof(bool), true, n * sizeof(bool)); securec_check(rc, "\0", "\0"); /* @@ -10257,6 +10267,12 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat } CHECK_FOR_INTERRUPTS(); + if (tab->is_first_after) { + rc = memset_s(values, n * sizeof(Datum), 0, n * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(isnull, n * sizeof(bool), true, n * sizeof(bool)); + securec_check(rc, "\0", "\0"); + } } } else { ((HeapScanDesc) scan)->rs_tupdesc = oldTupDesc; @@ -10406,6 +10422,12 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat ResetExprContext(econtext); CHECK_FOR_INTERRUPTS(); + if (tab->is_first_after) { + rc = memset_s(values, n * sizeof(Datum), 0, n * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(isnull, n * sizeof(bool), true, n * sizeof(bool)); + securec_check(rc, "\0", "\0"); + } } } -- Gitee From 968d7f96d27bbca92529c2624725464942faec03 Mon Sep 17 00:00:00 2001 From: gbzhangkai Date: Fri, 1 Dec 2023 08:56:25 +0800 Subject: [PATCH 088/434] =?UTF-8?q?=E4=BB=A3=E7=A0=81=E4=B8=ADPG=5FTRY?= =?UTF-8?q?=E5=9D=97=E4=B8=AD=E6=9C=89return=E8=AF=AD=E5=8F=A5=EF=BC=8C?= =?UTF-8?q?=E4=BC=9A=E5=AF=BC=E8=87=B4stack-buffer-underflow=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_utils/adt/timestamp.cpp | 12 ++++++------ contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 339afcd80..73bc9f496 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -9068,30 +9068,30 @@ static inline bool convert_tz_internal(Timestamp raw_datetime, text *expr2, text { if (from_ok == 0) { // if expr2 is zone if (!calc_timestamp_internal(expr2, datetime, &datetime)) { - return false; + PG_TRY_RETURN(false); } } else { // if expr2 is izone if (!is_izone_in_range(str2, &interval1)) { - return false; + PG_TRY_RETURN(false); } datetime = (Timestamp)DirectFunctionCall2(timestamp_izone, PointerGetDatum(interval1), TimestampGetDatum(datetime)); } if (!datetime_in_unixtimestmap(datetime)) { *result = raw_datetime; - return true; + PG_TRY_RETURN(true); } if (to_ok == 0) { // if expr3 is zone if (!calc_timestamptz_internal(expr3, datetime, &datetime)) { - return false; + PG_TRY_RETURN(false); } } else { // if expr3 is izone if (!is_izone_in_range(str3, &interval2)) { - return false; + PG_TRY_RETURN(false); } datetime = (Timestamp)DirectFunctionCall2(timestamptz_izone, PointerGetDatum(interval2), TimestampGetDatum(datetime)); } *result = datetime; - return true; + PG_TRY_RETURN(true); } PG_CATCH(); { diff --git a/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp b/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp index ccbe17579..8d3c5de29 100644 --- a/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp +++ b/contrib/whale/plugin_pl/plpgsql/src/pl_comp.cpp @@ -5202,7 +5202,7 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyP } expr->func = NULL; list_free_deep(parsetreeList); - return NULL; + PG_TRY_RETURN(NULL); } } queryList = pg_analyze_and_rewrite_params(parsetree, expr->query, -- Gitee From 85f8efc49c2d06d330d7fe0c754a2d34e955bed9 Mon Sep 17 00:00:00 2001 From: chenbd Date: Tue, 28 Nov 2023 19:36:38 +0800 Subject: [PATCH 089/434] fix-select-proc --- .../dolphin/expected/multi_select_in_proc.out | 290 +++++++++++++++++- contrib/dolphin/plugin_postgres.cpp | 2 + contrib/dolphin/sql/multi_select_in_proc.sql | 41 +++ 3 files changed, 332 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/multi_select_in_proc.out b/contrib/dolphin/expected/multi_select_in_proc.out index 208a807bb..101b543ba 100644 --- a/contrib/dolphin/expected/multi_select_in_proc.out +++ b/contrib/dolphin/expected/multi_select_in_proc.out @@ -364,9 +364,295 @@ call proc_def_2(@out,11); 11 | 1245 (1 row) +set b_format_behavior_compat_options=enable_set_variables; +drop table if exists t_tinyint0009 cascade; +NOTICE: table "t_tinyint0009" does not exist, skipping +create table t_tinyint0009 ( +c1 tinyint auto_increment primary key, +c2 tinyint(1) default '0', +c3 tinyint(10) not null default '0', +c4 int default '0', +c5 text +); +NOTICE: CREATE TABLE will create implicit sequence "t_tinyint0009_c1_seq" for serial column "t_tinyint0009.c1" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_tinyint0009_pkey" for table "t_tinyint0009" +drop procedure if exists insertdata; +NOTICE: function insertdata() does not exist, skipping +create procedure insertdata(num int) as +begin + set @x = 1; + truncate t_tinyint0009; + repeat + set @c1 = @x; + set @c2 = floor(0.1*(127-18+1))+18; + set @c3 = floor(0.1*(127-100+1))+100; + set @c4 = floor(0.1*(10000-127+1))+127; + set @c5 = concat('amy', @x); + select @c1; + select @c2; + select @c3; + select @c4; + select @c5; + insert into t_tinyint0009 values (@c1, @c2, @c3, @c4, @c5); + set @x = @x + 1; + select @x; + until @x > num end repeat; +end; +/ +call insertdata(4); + @c1 +----- + 1 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy1 +(1 row) + + @x +---- + 2 +(1 row) + + @c1 +----- + 2 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy2 +(1 row) + + @x +---- + 3 +(1 row) + + @c1 +----- + 3 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy3 +(1 row) + + @x +---- + 4 +(1 row) + + @c1 +----- + 4 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy4 +(1 row) + + @x +---- + 5 +(1 row) + +call insertdata(4); + @c1 +----- + 1 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy1 +(1 row) + + @x +---- + 2 +(1 row) + + @c1 +----- + 2 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy2 +(1 row) + + @x +---- + 3 +(1 row) + + @c1 +----- + 3 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy3 +(1 row) + + @x +---- + 4 +(1 row) + + @c1 +----- + 4 +(1 row) + + @c2 +----- + 29 +(1 row) + + @c3 +----- + 102 +(1 row) + + @c4 +------ + 1114 +(1 row) + + @c5 +------ + amy4 +(1 row) + + @x +---- + 5 +(1 row) + +select * from t_tinyint0009 order by c2; + c1 | c2 | c3 | c4 | c5 +----+----+-----+------+------ + 1 | 29 | 102 | 1114 | amy1 + 2 | 29 | 102 | 1114 | amy2 + 3 | 29 | 102 | 1114 | amy3 + 4 | 29 | 102 | 1114 | amy4 +(4 rows) + set dolphin.sql_mode=default; drop schema multi_select_proc cascade; -NOTICE: drop cascades to 24 other objects +NOTICE: drop cascades to 26 other objects DETAIL: drop cascades to table test_1 drop cascades to table t drop cascades to function proc_a_1() @@ -391,4 +677,6 @@ drop cascades to function pro1_1145533(integer) drop cascades to function pro2_1145533(integer) drop cascades to function proc_def_1(integer,integer) drop cascades to function proc_def_2(integer) +drop cascades to table t_tinyint0009 +drop cascades to function insertdata(integer) reset current_schema; diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index e0a6ec9f9..8d17ba46d 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -422,6 +422,7 @@ static int SpiIsExecMultiSelect(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, P bool outPutSelRes = false; Port* MyProcPort = u_sess->proc_cxt.MyProcPort; int tmpPos = t_thrd.libpq_cxt.PqSendPointer; + CommandDest origDest = u_sess->SPI_cxt._current->dest; int rc; if (SQL_MODE_AllOW_PROCEDURE_WITH_SELECT() && GetSessionContext()->is_dolphin_call_stmt) { CachedPlan* cplan = SPI_plan_get_cached_plan(expr->plan); @@ -458,6 +459,7 @@ static int SpiIsExecMultiSelect(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, P } } *multi_res = outPutSelRes; + u_sess->SPI_cxt._current->dest = origDest; return rc; } diff --git a/contrib/dolphin/sql/multi_select_in_proc.sql b/contrib/dolphin/sql/multi_select_in_proc.sql index 588149d18..a2eb65e68 100644 --- a/contrib/dolphin/sql/multi_select_in_proc.sql +++ b/contrib/dolphin/sql/multi_select_in_proc.sql @@ -238,6 +238,47 @@ call proc_def_2(@out); call proc_def_2(@out,11); + +set b_format_behavior_compat_options=enable_set_variables; + +drop table if exists t_tinyint0009 cascade; +create table t_tinyint0009 ( +c1 tinyint auto_increment primary key, +c2 tinyint(1) default '0', +c3 tinyint(10) not null default '0', +c4 int default '0', +c5 text +); + +drop procedure if exists insertdata; +create procedure insertdata(num int) as +begin + set @x = 1; + truncate t_tinyint0009; + repeat + set @c1 = @x; + set @c2 = floor(0.1*(127-18+1))+18; + set @c3 = floor(0.1*(127-100+1))+100; + set @c4 = floor(0.1*(10000-127+1))+127; + set @c5 = concat('amy', @x); + select @c1; + select @c2; + select @c3; + select @c4; + select @c5; + insert into t_tinyint0009 values (@c1, @c2, @c3, @c4, @c5); + set @x = @x + 1; + select @x; + until @x > num end repeat; +end; +/ + +call insertdata(4); + +call insertdata(4); + +select * from t_tinyint0009 order by c2; + set dolphin.sql_mode=default; drop schema multi_select_proc cascade; -- Gitee From c3348454af1980f764dd4cd4602316f8badd6f3a Mon Sep 17 00:00:00 2001 From: he-shaoyu Date: Fri, 17 Nov 2023 16:17:03 +0800 Subject: [PATCH 090/434] =?UTF-8?q?json=5Fset=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/json_set.out | 9 + .../dolphin/plugin_utils/adt/jsonfuncs.cpp | 158 ++++++++++-------- contrib/dolphin/sql/json_set.sql | 5 + 3 files changed, 101 insertions(+), 71 deletions(-) diff --git a/contrib/dolphin/expected/json_set.out b/contrib/dolphin/expected/json_set.out index 32f7d8f90..d581326eb 100644 --- a/contrib/dolphin/expected/json_set.out +++ b/contrib/dolphin/expected/json_set.out @@ -112,5 +112,14 @@ select name from dataa; (1 row) drop table dataa; +create table t1_z (jdoc JSON); +insert into t1_z select ('{' || string_agg('"key' || i || '": "value' || i || '"', ',') || '}')::json from generate_series(1,10) as i; +select json_set(jdoc,'$.key09','test09','$.key1','test1') from t1_z; + json_set +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"key1": "test1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5", "key6": "value6", "key7": "value7", "key8": "value8", "key9": "value9", "key09": "test09", "key10": "value10"} +(1 row) + +drop table t1_z; drop schema test_json_set cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp index 6a4718721..9f632f705 100644 --- a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp @@ -168,12 +168,11 @@ static void cJSON_DeleteJsonPath(cJSON_JsonPath *jp); static void cJSON_DeleteResultWrapper(cJSON_ResultWrapper *res); /* functions for the cJson operation*/ -static void quicksort(cJSON *item1, cJSON *item2); +static cJSON* mergesort(cJSON *start, cJSON *end); static text *formatJsondoc(char *str); static inline cJSON_JsonPath *jp_pop(cJSON_JsonPath *jp); static void cJSON_SortObject(cJSON *object); static cJSON *cJSON_ResultWrapperToArray(cJSON_ResultWrapper *res); -static void cJSON_SwapItemValue(cJSON *item1, cJSON *item2); static cJSON_bool cJSON_ArrayAppend(cJSON *root, cJSON_JsonPath *jp, cJSON *value); static cJSON_bool cJSON_JsonInsert(cJSON *root, cJSON_JsonPath *jp, cJSON *value); static cJSON *get_json_value(Oid valtype, Datum arg, bool typIsVarlena, Oid typOutput); @@ -204,7 +203,7 @@ static text *remove_duplicate_path(search_LinkStack &stk); /* functions for json_merge */ static int put_object_keys_into_set(char **keys, cJSON *json1, cJSON *json2); static int put_object_keys_into_set(char **keys, cJSON *json); -static void appendObject(StringInfo result, cJSON *json, int cnt, char **keys, int *pos); +static void appendObject(StringInfo result, cJSON *json); static void appendStringInfoObject(StringInfo result, cJSON *json); static void appendStringInfoArray(StringInfo result, cJSON *json); static void appendStringInfoArrayEle(StringInfo result, cJSON *json); @@ -3417,24 +3416,83 @@ static void cJSON_DeleteResultWrapper(cJSON_ResultWrapper *res) pfree(res); } -static void quicksort(cJSON *start, cJSON *end) +static cJSON* mergesort(cJSON *start, cJSON *end) { - if (start == NULL || start == end) - return; - cJSON *q = end; + if (start == NULL || start == end) { + if (start != NULL) { + start->prev = start; + start->next = NULL; + } + return start; + } cJSON *p = start; - while (q != p) { - if ((strlen(start->string) > strlen(q->string) || - (strcmp(start->string, q->string) > 0 && strlen(q->string) == strlen(start->string)))) { + cJSON *q = end; + bool cur = false; + while (p != q) { + if (cur) { p = p->next; - cJSON_SwapItemValue(p, q); } else { q = q->prev; } + cur = !cur; + } + cJSON *mid = p; + cJSON *mid2 = p->next; + p = mergesort(start, mid); + q = mergesort(mid2, end); + if (p == NULL) { + return q; + } + if (q == NULL) { + return p; + } + mid = p->prev; + end = q->prev; + cJSON *head = NULL; + cJSON *tail = NULL; + while (p != NULL && q != NULL) { + Size p_len = strlen(p->string); + Size q_len = strlen(q->string); + if ((p_len < q_len) || (p_len == q_len && strcmp(p->string, q->string) < 0)) { + if (head == NULL) { + head = p; + tail = p; + } else { + tail->next = p; + p->prev = tail; + tail = p; + } + p = p->next; + tail->next = NULL; + } else { + if (head == NULL) { + head = q; + tail = q; + } else { + tail->next = q; + q->prev = tail; + tail = q; + } + q = q->next; + tail->next = NULL; + } + } + if (p != NULL) { + tail->next = p; + p->prev = tail; + tail = mid; } - cJSON_SwapItemValue(p, start); - quicksort(start, p); - quicksort(p->next, end); + if (q != NULL) { + tail->next = q; + q->prev = tail; + tail = end; + } + head->prev = tail; + tail->next = NULL; + + + + return head; } static text *formatJsondoc(char *str) @@ -3484,13 +3542,13 @@ static void cJSON_SortObject(cJSON *object) cJSON *start = NULL; cJSON *end = NULL; cJSON *child = NULL; - if (object->type == cJSON_Object) { + if ((object->type & 0XFF) == cJSON_Object) { start = object->child; if (!start) { return; } end = object->child->prev; - quicksort(start, end); + object->child = mergesort(start, end); } cJSON_ArrayForEach(child, object) { @@ -3510,37 +3568,6 @@ static cJSON *cJSON_ResultWrapperToArray(cJSON_ResultWrapper *res) return array; } -static void cJSON_SwapItemValue(cJSON *item1, cJSON *item2) -{ - if (item1 == NULL || item2 == NULL) { - return; - } - - cJSON tmp; - errno_t rc = memset_s(&tmp, sizeof(tmp), 0, sizeof(tmp)); - securec_check(rc, "\0", "\0"); - tmp.child = item1->child; - tmp.type = item1->type; - tmp.valuestring = item1->valuestring; - tmp.valueint = item1->valueint; - tmp.valuedouble = item1->valuedouble; - tmp.string = item1->string; - - item1->child = item2->child; - item1->type = item2->type; - item1->valuestring = item2->valuestring; - item1->valueint = item2->valueint; - item1->valuedouble = item2->valuedouble; - item1->string = item2->string; - - item2->child = tmp.child; - item2->type = tmp.type; - item2->valuestring = tmp.valuestring; - item2->valueint = tmp.valueint; - item2->valuedouble = tmp.valuedouble; - item2->string = tmp.string; -} - static cJSON_bool cJSON_ArrayAppend(cJSON *root, cJSON_JsonPath *jp, cJSON *value) { if (!root || !jp || !value) { @@ -4982,17 +5009,18 @@ static int put_object_keys_into_set(char **keys, cJSON *json) return cnt; } -static void appendObject(StringInfo result, cJSON *json, int cnt, char **keys, int *pos) +static void appendObject(StringInfo result, cJSON *json) { + int cnt = cJSON_GetArraySize(json); + cJSON *p = json->child; appendStringInfoChar(result, '{'); for (int i = 0; i < cnt; i++) { if (i != 0) appendStringInfoString(result, ", "); - escape_json(result, keys[pos[i]]); + escape_json(result, p->string); appendStringInfoString(result, ": "); - // appendStringInfo(result,"\"%s\": ", keys[pos[i]]); - cJSON *tResult = cJSON_GetObjectItem(json, keys[pos[i]]); - json_regular_format(result, tResult); + json_regular_format(result, p); + p = p->next; } appendStringInfoChar(result, '}'); } @@ -5003,19 +5031,8 @@ static void appendStringInfoObject(StringInfo result, cJSON *json) appendStringInfoString(result, "{}"); return; } - - char **keys = (char **)palloc(cJSON_GetArraySize(json) * sizeof(char *)); - int cnt = put_object_keys_into_set(keys, json); - - int pos[cnt]; - for (int i = 0; i < cnt; i++) - pos[i] = i; - - get_keys_order(keys, 0, cnt - 1, pos); - - appendObject(result, json, cnt, keys, pos); - - pfree(keys); + cJSON_SortObject(json); + appendObject(result, json); } static void appendStringInfoArray(StringInfo result, cJSON *json) @@ -5027,15 +5044,14 @@ static void appendStringInfoArray(StringInfo result, cJSON *json) static void appendStringInfoArrayEle(StringInfo result, cJSON *json) { - int arr_iter = 0; + cJSON *p = json->child; while (true) { - cJSON *val = cJSON_GetArrayItem(json, arr_iter); - if (val == NULL) + if (p == NULL) break; - if (arr_iter != 0) + if (p != json->child) appendStringInfoString(result, ", "); - json_regular_format(result, val); - arr_iter++; + json_regular_format(result, p); + p = p->next; } } diff --git a/contrib/dolphin/sql/json_set.sql b/contrib/dolphin/sql/json_set.sql index 49738aa77..4062d0d9f 100644 --- a/contrib/dolphin/sql/json_set.sql +++ b/contrib/dolphin/sql/json_set.sql @@ -27,5 +27,10 @@ insert into dataa (name) value(json_set('{"s":1}','$.s',3,'$.w',5)); select name from dataa; drop table dataa; +create table t1_z (jdoc JSON); +insert into t1_z select ('{' || string_agg('"key' || i || '": "value' || i || '"', ',') || '}')::json from generate_series(1,10) as i; +select json_set(jdoc,'$.key09','test09','$.key1','test1') from t1_z; +drop table t1_z; + drop schema test_json_set cascade; reset current_schema; \ No newline at end of file -- Gitee From ff7abfd0b6a840ac0368915699997f10743e9f6d Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 1 Dec 2023 15:53:33 +0800 Subject: [PATCH 091/434] Sync server code. 9bc3bab77bd0828dcef6d833f70f0a56cfa4e286 --- .../include/plugin_nodes/parsenodes_common.h | 1 + contrib/dolphin/output/view_definer_test.source | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 8593f78aa..6e41d464c 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -462,6 +462,7 @@ typedef struct HintState { typedef struct UpsertClause { NodeTag type; List *targetList; + Alias *aliasName; Node *whereClause; int location; } UpsertClause; diff --git a/contrib/dolphin/output/view_definer_test.source b/contrib/dolphin/output/view_definer_test.source index 6a8c581dc..62f04dd9c 100644 --- a/contrib/dolphin/output/view_definer_test.source +++ b/contrib/dolphin/output/view_definer_test.source @@ -62,6 +62,10 @@ SELECT c.relname as view_name, u.usename as rolname FROM pg_class c, pg_user u W -- dump all views \! @abs_bindir@/gs_dump test_db -p @portstring@ --include-depend-objs --exclude-self | grep -vE '^SET|^REVOKE|^GRANT|^--|^gs_dump|^COMMENT|^DROP'| tr -s '\n' > @abs_bindir@/definer_view_dump.sql 2>&1 +Begin scanning database. +Finish scanning database. +Start dumping objects +Finish dumping objects \! cat @abs_bindir@/definer_view_dump.sql CREATE SCHEMA "Test_User"; @@ -96,15 +100,17 @@ COPY tab_1107262 (id, c1) FROM stdin; \. ; \! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp +Begin scanning database. +Finish scanning database. --?gs_dump[port='@portstring@'][test_db].* +Start dumping objects +Finish dumping objects --?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* CREATE DATABASE target DBCOMPATIBILITY 'B'; \! @abs_bindir@/gs_restore -d target -p @portstring@ @abs_bindir@/definer_view_dump.dmp start restore operation ... -table tab_1107262 complete data imported ! -Finish reading 12 SQL statements! end restore operation ... restore operation successful --?.* @@ -140,7 +146,11 @@ create database test_1; create table startwith_t(id int, level int, connect_by_isleaf int, connect_by_iscycle int); create view startwith_v as select id, connect_by_isleaf as level, level as connect_by_isleaf from startwith_t; \! @abs_bindir@/gs_dump test_1 -p @portstring@ -f @abs_bindir@/dump_postgres.sql +Begin scanning database. +Finish scanning database. --?gs_dump[port='@portstring@'][test_1].* +Start dumping objects +Finish dumping objects --?gs_dump[port='@portstring@'][test_1].* --?gs_dump[port='@portstring@'][test_1].* --?gs_dump[port='@portstring@'][test_1].* -- Gitee From 104c85b35f3e940c8dfe36c014563599e3121b75 Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 1 Dec 2023 16:55:02 +0800 Subject: [PATCH 092/434] Optimize testcase. --- .../dolphin/expected/test_show_open_tables_lock_waiting_1.out | 2 +- contrib/dolphin/sql/test_show_open_tables_lock_waiting_1.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/expected/test_show_open_tables_lock_waiting_1.out b/contrib/dolphin/expected/test_show_open_tables_lock_waiting_1.out index f3592d25f..35697acab 100644 --- a/contrib/dolphin/expected/test_show_open_tables_lock_waiting_1.out +++ b/contrib/dolphin/expected/test_show_open_tables_lock_waiting_1.out @@ -10,7 +10,7 @@ SHOW OPEN TABLES IN show_open_tables_scm; show_open_tables_scm | show_open_tables_test_2 | 0 | 0 (2 rows) -select pg_sleep(2); +select pg_sleep(3); pg_sleep ---------- diff --git a/contrib/dolphin/sql/test_show_open_tables_lock_waiting_1.sql b/contrib/dolphin/sql/test_show_open_tables_lock_waiting_1.sql index 37ea42306..73ed5fb92 100644 --- a/contrib/dolphin/sql/test_show_open_tables_lock_waiting_1.sql +++ b/contrib/dolphin/sql/test_show_open_tables_lock_waiting_1.sql @@ -4,7 +4,7 @@ set role user1 password 'Show@123'; -- 1 and 1+ locks waiting on table lock tables show_open_tables_test_1 read; SHOW OPEN TABLES IN show_open_tables_scm; -select pg_sleep(2); +select pg_sleep(3); SHOW OPEN TABLES IN show_open_tables_scm; unlock tables; SHOW OPEN TABLES IN show_open_tables_scm; -- Gitee From 54ed0a0357e07f0205556c6342886c05b0b78d61 Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 1 Dec 2023 17:53:10 +0800 Subject: [PATCH 093/434] Support 0b as bconst. --- contrib/dolphin/expected/db_b_parser4.out | 67 +++++++++++++++++++++++ contrib/dolphin/plugin_parser/scan.l | 23 ++++++-- contrib/dolphin/sql/db_b_parser4.sql | 16 ++++++ 3 files changed, 102 insertions(+), 4 deletions(-) diff --git a/contrib/dolphin/expected/db_b_parser4.out b/contrib/dolphin/expected/db_b_parser4.out index af8153daa..cdf1728c6 100644 --- a/contrib/dolphin/expected/db_b_parser4.out +++ b/contrib/dolphin/expected/db_b_parser4.out @@ -52,6 +52,73 @@ drop table if exists tb_db_b_parser_0002; drop table if exists tb_default_float; drop table if exists tb_default_double; drop table if exists tb_real_float; +--bconst +select 0b01; + ?column? +---------- + 01 +(1 row) + +select 0b01+1; + ?column? +---------- + 2 +(1 row) + +select 1+0b01; + ?column? +---------- + 2 +(1 row) + +select 0b01 = b'01'; + ?column? +---------- + t +(1 row) + +select 0b01+1 = b'01'+1; + ?column? +---------- + t +(1 row) + +select 1+0b01 = 1+b'01'; + ?column? +---------- + t +(1 row) + +--wrong usage +select 0b02; +ERROR: syntax error at or near "2" +LINE 1: select 0b02; + ^ +select 0b2; + b2 +---- + 0 +(1 row) + +--not report error, but not bconst actually +select 0b; -- same as select 0 b; + b +--- + 0 +(1 row) + +select 0bf; -- same as select 0 bf; + bf +---- + 0 +(1 row) + +select 0b0f; -- same as select 0b0 f; + f +--- + 0 +(1 row) + drop schema db_b_parser4 cascade; NOTICE: drop cascades to table tb_db_b_parser_0001 reset current_schema; diff --git a/contrib/dolphin/plugin_parser/scan.l b/contrib/dolphin/plugin_parser/scan.l index c58d6b773..ef51aa42e 100644 --- a/contrib/dolphin/plugin_parser/scan.l +++ b/contrib/dolphin/plugin_parser/scan.l @@ -107,7 +107,7 @@ static char *litbuf_udeescape(unsigned char escape, core_yyscan_t yyscanner); static unsigned char unescape_single_char(unsigned char c, core_yyscan_t yyscanner); static int process_integer_literal(const char *token, YYSTYPE *lval); #ifdef DOLPHIN -static int process_hex_number(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); +static int process_special_number(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval); #endif static bool is_utf16_surrogate_first(pg_wchar c); static bool is_utf16_surrogate_second(pg_wchar c); @@ -251,6 +251,7 @@ quotefail {quote}{whitespace}*"-" */ xbstart [bB]{quote} xbinside [^']* +xbstring 0b[0-1]+ /* Hexadecimal number */ xhstart [xX]{quote} @@ -503,6 +504,18 @@ other . <> { yyerror("unterminated /* comment"); return 0;} +{xbstring} { + /* Binary bit type. + * At some point we should simply pass the string + * forward to the parser and label it there. + * In the meantime, place a leading "b" on the string + * to mark it for the input routine as a binary string. + */ + SET_YYLLOC(); + yyextra->is_hint_str = false; + /* skip first '0', remain 'b' and other bit number */ + return process_special_number(yytext + 1, yyleng - 1, yyscanner, yylval); + } {xbstart} { /* Binary bit type. * At some point we should simply pass the string @@ -1210,7 +1223,7 @@ other . SET_YYLLOC(); yyextra->is_hint_str = false; /* skip first '0', remain 'x' and other hex number */ - return process_hex_number(yytext + 1, yyleng - 1, yyscanner, yylval); + return process_special_number(yytext + 1, yyleng - 1, yyscanner, yylval); } {integer} { SET_YYLLOC(); @@ -1753,12 +1766,14 @@ litbufdup(core_yyscan_t yyscanner) } #ifdef DOLPHIN -static int process_hex_number(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval) +static int process_special_number(char *token, int len, core_yyscan_t yyscanner, YYSTYPE *lval) { + /* first char is b means BCONST(don't support upper case 'B'), otherwise, it must be 'x' or 'X', which is XCONST */ + int ret_type = *token == 'b' ? BCONST : XCONST; startlit(); addlit(token, len, yyscanner); lval->str = litbufdup(yyscanner); - return XCONST; + return ret_type; } #endif diff --git a/contrib/dolphin/sql/db_b_parser4.sql b/contrib/dolphin/sql/db_b_parser4.sql index ded92180b..45d9dff26 100644 --- a/contrib/dolphin/sql/db_b_parser4.sql +++ b/contrib/dolphin/sql/db_b_parser4.sql @@ -28,6 +28,22 @@ drop table if exists tb_default_float; drop table if exists tb_default_double; drop table if exists tb_real_float; +--bconst +select 0b01; +select 0b01+1; +select 1+0b01; + +select 0b01 = b'01'; +select 0b01+1 = b'01'+1; +select 1+0b01 = 1+b'01'; + +--wrong usage +select 0b02; +select 0b2; +--not report error, but not bconst actually +select 0b; -- same as select 0 b; +select 0bf; -- same as select 0 bf; +select 0b0f; -- same as select 0b0 f; drop schema db_b_parser4 cascade; reset current_schema; \ No newline at end of file -- Gitee From f9770c62d9c3e5dd390b2f296db925623d0eb8f6 Mon Sep 17 00:00:00 2001 From: pengjiong Date: Fri, 1 Dec 2023 12:00:26 +0000 Subject: [PATCH 094/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=88=9B=E5=BB=BA?= =?UTF-8?q?=E7=94=A8=E6=88=B7=E6=97=B6=E7=9A=84=E6=A6=82=E7=8E=87=E6=80=A7?= =?UTF-8?q?core=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: pengjiong --- contrib/dolphin/plugin_optimizer/commands/user.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/dolphin/plugin_optimizer/commands/user.cpp b/contrib/dolphin/plugin_optimizer/commands/user.cpp index 31bf83ab9..f4654bb00 100755 --- a/contrib/dolphin/plugin_optimizer/commands/user.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/user.cpp @@ -82,7 +82,12 @@ typedef unsigned int GS_UINT32; typedef unsigned long GS_UINT32; #endif +#ifdef DOLPHIN +/* use value in openGauss-server's user.cpp, don't redefine in dolphin */ +extern MemoryContext WaitCountGlobalContext; +#else MemoryContext WaitCountGlobalContext = NULL; +#endif #define CREATE_PG_AUTH_ROLE 1 #define ALTER_PG_AUTH_ROLE 2 -- Gitee From 332e8522635224a0558218a6dd7d77d155881719 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Sun, 3 Dec 2023 15:16:19 +0800 Subject: [PATCH 095/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E4=BF=AE=E5=A4=8Dexception=E5=AF=BC?= =?UTF-8?q?=E8=87=B4=E7=BA=BF=E7=A8=8B=E5=BC=82=E5=B8=B8=E9=80=80=E5=87=BA?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/spq_optimizer_util/SPQOptimizer.cpp | 82 +++++++++++++------ 1 file changed, 58 insertions(+), 24 deletions(-) diff --git a/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp b/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp index 6155ba2fc..0ea3f196a 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp @@ -35,6 +35,16 @@ bool optimizer_trace_fallback = false; extern MemoryContext MessageContext; +void DelCException(CException **exception) +{ + if (exception == NULL || *exception == NULL) + { + return; + } + delete *exception; + *exception = NULL; +} + //--------------------------------------------------------------------------- // @function: // SPQOptimizer::PlstmtOptimize @@ -54,6 +64,7 @@ SPQOptimizer::SPQOPTOptimizedPlan( PlannedStmt *plStmt = NULL; *had_unexpected_failure = false; + CException *exception = NULL; SPQOS_TRY { @@ -63,6 +74,12 @@ SPQOptimizer::SPQOPTOptimizedPlan( } SPQOS_CATCH_EX(ex) { + exception = new CException(ex.Major(), ex.Minor(), ex.Filename(), ex.Line()); + } + SPQOS_CATCH_END; + if (exception == NULL) { + return plStmt; + } // clone the error message before context free. CHAR *serialized_error_msg = spqopt_context.CloneErrorMsg(t_thrd.mem_cxt.msg_mem_cxt); @@ -74,37 +91,42 @@ SPQOptimizer::SPQOPTOptimizedPlan( // tries to do something smart with them. Also, ERRCODE_INTERNAL_ERROR // is handled specially in elog.c, and we don't want that for "normal" // application errors. - if (SPQOS_MATCH_EX(ex, spqdxl::ExmaDXL, + if (SPQOS_MATCH_EX((*exception), spqdxl::ExmaDXL, spqdxl::ExmiQuery2DXLNotNullViolation)) { - errstart(ERROR, ex.Filename(), ex.Line(), NULL, TEXTDOMAIN); + errstart(ERROR, exception->Filename(), exception->Line(), NULL, TEXTDOMAIN); + DelCException(&exception); errfinish(errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("%s", serialized_error_msg)); } - else if (SPQOS_MATCH_EX(ex, spqdxl::ExmaDXL, spqdxl::ExmiOptimizerError) || + else if (SPQOS_MATCH_EX((*exception), spqdxl::ExmaDXL, spqdxl::ExmiOptimizerError) || spqopt_context.m_should_error_out) { Assert(NULL != serialized_error_msg); - errstart(ERROR, ex.Filename(), ex.Line(), NULL, TEXTDOMAIN); + errstart(ERROR, exception->Filename(), exception->Line(), NULL, TEXTDOMAIN); + DelCException(&exception); errfinish(errcode(ERRCODE_INTERNAL_ERROR), errmsg("%s", serialized_error_msg)); } - else if (SPQOS_MATCH_EX(ex, spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError)) + else if (SPQOS_MATCH_EX((*exception), spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError)) { + DelCException(&exception); PG_RE_THROW(); } - else if (SPQOS_MATCH_EX(ex, spqdxl::ExmaDXL, + else if (SPQOS_MATCH_EX((*exception), spqdxl::ExmaDXL, spqdxl::ExmiNoAvailableMemory)) { - errstart(ERROR, ex.Filename(), ex.Line(), NULL, TEXTDOMAIN); + errstart(ERROR, exception->Filename(), exception->Line(), NULL, TEXTDOMAIN); + DelCException(&exception); errfinish(errcode(ERRCODE_INTERNAL_ERROR), errmsg("no available memory to allocate string buffer")); } - else if (SPQOS_MATCH_EX(ex, spqdxl::ExmaDXL, + else if (SPQOS_MATCH_EX((*exception), spqdxl::ExmaDXL, spqdxl::ExmiInvalidComparisonTypeCode)) { - errstart(ERROR, ex.Filename(), ex.Line(), NULL, TEXTDOMAIN); + errstart(ERROR, exception->Filename(), exception->Line(), NULL, TEXTDOMAIN); + DelCException(&exception); errfinish( errcode(ERRCODE_INTERNAL_ERROR), errmsg( @@ -118,7 +140,8 @@ SPQOptimizer::SPQOPTOptimizedPlan( if (optimizer_trace_fallback) { - errstart(INFO, ex.Filename(), ex.Line(), NULL, TEXTDOMAIN); + errstart(INFO, exception->Filename(), exception->Line(), NULL, TEXTDOMAIN); + DelCException(&exception); errfinish( errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg( @@ -131,9 +154,9 @@ SPQOptimizer::SPQOPTOptimizedPlan( if (serialized_error_msg) pfree(serialized_error_msg); - } - SPQOS_CATCH_END; - return plStmt; + DelCException(&exception); + + return NULL; } @@ -148,17 +171,20 @@ SPQOptimizer::SPQOPTOptimizedPlan( char * SPQOptimizer::SerializeDXLPlan(Query *query) { + CException *exception = NULL; SPQOS_TRY; { return COptTasks::Optimize(query); } SPQOS_CATCH_EX(ex); { - errstart(ERROR, ex.Filename(), ex.Line(), NULL, TEXTDOMAIN); - errfinish(errcode(ERRCODE_INTERNAL_ERROR), - errmsg("optimizer failed to produce plan")); + exception = new CException(ex.Major(), ex.Minor(), ex.Filename(), ex.Line()); } SPQOS_CATCH_END; + errstart(ERROR, exception->Filename(), exception->Line(), NULL, TEXTDOMAIN); + DelCException(&exception); + errfinish(errcode(ERRCODE_INTERNAL_ERROR), + errmsg("optimizer failed to produce plan")); return NULL; } @@ -248,18 +274,22 @@ SerializeDXLPlan(Query *query) void InitSPQOPT() { + CException *exception = NULL; SPQOS_TRY { return SPQOptimizer::InitSPQOPT(); } SPQOS_CATCH_EX(ex) { - if (SPQOS_MATCH_EX(ex, spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError)) - { - PG_RE_THROW(); - } + exception = new CException(ex.Major(), ex.Minor(), ex.Filename(), ex.Line()); } SPQOS_CATCH_END; + if (exception != NULL && SPQOS_MATCH_EX((*exception), spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError)) + { + DelCException(&exception); + PG_RE_THROW(); + } + DelCException(&exception); } //--------------------------------------------------------------------------- @@ -273,18 +303,22 @@ InitSPQOPT() void TerminateSPQOPT() { + CException *exception = NULL; SPQOS_TRY { return SPQOptimizer::TerminateSPQOPT(); } SPQOS_CATCH_EX(ex) { - if (SPQOS_MATCH_EX(ex, spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError)) - { - PG_RE_THROW(); - } + exception = new CException(ex.Major(), ex.Minor(), ex.Filename(), ex.Line()); } SPQOS_CATCH_END; + if (exception != NULL && SPQOS_MATCH_EX((*exception), spqdxl::ExmaSPQDB, spqdxl::ExmiSPQDBError)) + { + DelCException(&exception); + PG_RE_THROW(); + } + DelCException(&exception); } void UnInitSPQOPT(int status, Datum arg) -- Gitee From b082c521b9b290ee8accd5179eab93c4f494247a Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 4 Dec 2023 17:55:10 +0800 Subject: [PATCH 096/434] Add length(binary/varbinary). --- contrib/dolphin/expected/db_b_parser2.out | 17 +++++++++++++++++ contrib/dolphin/plugin_utils/adt/varlena.cpp | 8 ++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 3 +++ contrib/dolphin/sql/db_b_parser2.sql | 10 ++++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 5 +++++ 5 files changed, 43 insertions(+) diff --git a/contrib/dolphin/expected/db_b_parser2.out b/contrib/dolphin/expected/db_b_parser2.out index 29c67960e..703787d67 100644 --- a/contrib/dolphin/expected/db_b_parser2.out +++ b/contrib/dolphin/expected/db_b_parser2.out @@ -739,6 +739,23 @@ select length(''),length(' '),length(NULL); 0 | 1 | (1 row) +set dolphin.sql_mode='treat_bxconst_as_binary'; +create table t_binary(a binary, b varbinary(10), c binary(10)); +insert into t_binary values(null,null,null); +insert into t_binary values(b'1',b'111',b'111'); +insert into t_binary values(b'1',b'11111',b'111'); +insert into t_binary values(b'1',b'111111111',b'111'); +select *,length(a),length(b),length(c) from t_binary order by 1,2,3; + a | b | c | length | length | length +------+--------+------------------------+--------+--------+-------- + \x01 | \x01ff | \x07000000000000000000 | 1 | 2 | 10 + \x01 | \x07 | \x07000000000000000000 | 1 | 1 | 10 + \x01 | \x1f | \x07000000000000000000 | 1 | 1 | 10 + | | | | | +(4 rows) + +drop table t_binary; +reset dolphin.sql_mode; create table fchar_test(fchar char(10)); insert into fchar_test values('零一二三四五六七八ä¹'); insert into fchar_test values('零一二三四五六七八ä¹0'); diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index c73669dc3..4ef9d6c96 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10919,4 +10919,12 @@ Datum rand_seed(PG_FUNCTION_ARGS) PG_RETURN_FLOAT8(result); } + +PG_FUNCTION_INFO_V1_PUBLIC(binary_length); +extern "C" DLL_PUBLIC Datum binary_length(PG_FUNCTION_ARGS); +Datum binary_length(PG_FUNCTION_ARGS) +{ + bytea* vlena = PG_GETARG_BYTEA_PP(0); + PG_RETURN_INT32(VARSIZE_ANY_EXHDR(vlena)); +} #endif diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index a50e209d9..c63f662ab 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -128,6 +128,9 @@ $$ DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(text); DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(numeric); +drop function if EXISTS pg_catalog.length(binary); +drop function if EXISTS pg_catalog.length(varbinary); + drop CAST IF EXISTS (timestamptz as boolean); drop CAST IF EXISTS (timestamp(0) without time zone as boolean); DROP FUNCTION IF EXISTS pg_catalog.timestamptz_bool(timestamptz); diff --git a/contrib/dolphin/sql/db_b_parser2.sql b/contrib/dolphin/sql/db_b_parser2.sql index 45556c03f..b243f48b7 100644 --- a/contrib/dolphin/sql/db_b_parser2.sql +++ b/contrib/dolphin/sql/db_b_parser2.sql @@ -167,6 +167,16 @@ select length(E'哈1哈\n'),length(E'\n'),length(E'\\n'); --返回0ã€1ã€ç©º select length(''),length(' '),length(NULL); +set dolphin.sql_mode='treat_bxconst_as_binary'; +create table t_binary(a binary, b varbinary(10), c binary(10)); +insert into t_binary values(null,null,null); +insert into t_binary values(b'1',b'111',b'111'); +insert into t_binary values(b'1',b'11111',b'111'); +insert into t_binary values(b'1',b'111111111',b'111'); +select *,length(a),length(b),length(c) from t_binary order by 1,2,3; +drop table t_binary; +reset dolphin.sql_mode; + create table fchar_test(fchar char(10)); insert into fchar_test values('零一二三四五六七八ä¹'); insert into fchar_test values('零一二三四五六七八ä¹0'); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 207b4e819..2af77a3fc 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -209,6 +209,11 @@ DROP FUNCTION IF EXISTS pg_catalog.timestamp_bool(timestamp(0) without time zone CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_bool(timestamptz) returns boolean LANGUAGE C immutable strict as '$libdir/dolphin', 'timestamptz_bool'; CREATE CAST (timestamptz as boolean) WITH FUNCTION timestamptz_bool(timestamptz) AS ASSIGNMENT; +drop function if EXISTS pg_catalog.length(binary); +drop function if EXISTS pg_catalog.length(varbinary); +CREATE OR REPLACE FUNCTION pg_catalog.length(binary) returns int4 LANGUAGE C immutable strict as '$libdir/dolphin', 'binary_length'; +CREATE OR REPLACE FUNCTION pg_catalog.length(varbinary) returns int4 LANGUAGE C immutable strict as '$libdir/dolphin', 'binary_length'; + CREATE OR REPLACE FUNCTION pg_catalog.timestamp_bool(timestamp(0) without time zone) returns boolean LANGUAGE C immutable strict as '$libdir/dolphin', 'timestamp_bool'; CREATE CAST (timestamp(0) without time zone as boolean) WITH FUNCTION timestamp_bool(timestamp(0) without time zone) AS ASSIGNMENT; -- Gitee From 773b3302e822427f2dca3bc8615ffe4264c57f58 Mon Sep 17 00:00:00 2001 From: totaj Date: Tue, 5 Dec 2023 09:53:58 +0800 Subject: [PATCH 097/434] Sync server code. fd14359b3c420030fdf8d3980b68663a348a0814 --- contrib/dolphin/plugin_catalog/heap.cpp | 2 +- contrib/dolphin/plugin_executor/execQual.cpp | 31 +++++++------------ .../plugin_optimizer/commands/alter.cpp | 16 ++++++++++ .../commands/functioncmds.cpp | 28 +++++++++++++++++ .../plugin_optimizer/commands/schemacmds.cpp | 8 +++++ .../plugin_optimizer/commands/typecmds.cpp | 7 +++++ contrib/dolphin/plugin_utils/adt/acl.cpp | 19 ++---------- contrib/dolphin/tablecmds.cpp | 23 ++++++++++++++ 8 files changed, 98 insertions(+), 36 deletions(-) diff --git a/contrib/dolphin/plugin_catalog/heap.cpp b/contrib/dolphin/plugin_catalog/heap.cpp index 92c634a6e..1a4f214ce 100644 --- a/contrib/dolphin/plugin_catalog/heap.cpp +++ b/contrib/dolphin/plugin_catalog/heap.cpp @@ -7592,7 +7592,7 @@ Oid getPartitionIdFromTuple(Relation rel, void *tuple, EState* estate, TupleTabl { char* partExprKeyStr = NULL; Oid targetOid = InvalidOid; - bool partExprKeyIsNull = PartExprKeyIsNull(rel, NULL, &partExprKeyStr); + bool partExprKeyIsNull = PartExprKeyIsNull(rel, &partExprKeyStr); if (partExprKeyIsNull) { targetOid = heapTupleGetPartitionId(rel, tuple, partitionno, isDDL, canIgnore); } else { diff --git a/contrib/dolphin/plugin_executor/execQual.cpp b/contrib/dolphin/plugin_executor/execQual.cpp index 88ec8af89..34a9d393d 100644 --- a/contrib/dolphin/plugin_executor/execQual.cpp +++ b/contrib/dolphin/plugin_executor/execQual.cpp @@ -3569,32 +3569,25 @@ static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext, bool *is cursor_return_number = fcache->fcinfo_data.refcursor_data.return_number; if (func->funcformat == COERCE_EXPLICIT_CAST || func->funcformat == COERCE_IMPLICIT_CAST) { - target_type = func->funcresulttype; - source_type = fcache->fcinfo_data.argTypes[0]; HeapTuple proc_tuple = SearchSysCache(PROCOID, ObjectIdGetDatum(func->funcid), 0, 0, 0); if (HeapTupleIsValid(proc_tuple)) { Form_pg_proc proc_struct = (Form_pg_proc)GETSTRUCT(proc_tuple); source_type = proc_struct->proargtypes.values[0]; ReleaseSysCache(proc_tuple); - } - HeapTuple cast_tuple = SearchSysCache2(CASTSOURCETARGET, ObjectIdGetDatum(source_type), - ObjectIdGetDatum(target_type)); - - if (HeapTupleIsValid(cast_tuple)) { - Relation cast_rel = heap_open(CastRelationId, AccessShareLock); - int castowner_Anum = Anum_pg_cast_castowner; - if (castowner_Anum <= (int)HeapTupleHeaderGetNatts(cast_tuple->t_data, cast_rel->rd_att)) { - bool isnull = true; - Datum datum = fastgetattr(cast_tuple, Anum_pg_cast_castowner, cast_rel->rd_att, &isnull); - if (!isnull) { - u_sess->exec_cxt.cast_owner = DatumGetObjectId(datum); - } else { - u_sess->exec_cxt.cast_owner = InvalidCastOwnerId; - } + target_type = func->funcresulttype; + HeapTuple cast_tuple = SearchSysCache2(CASTSOURCETARGET, ObjectIdGetDatum(source_type), + ObjectIdGetDatum(target_type)); + if (HeapTupleIsValid(cast_tuple)) { + bool isnull = false; + Datum datum = SysCacheGetAttr(CASTSOURCETARGET, cast_tuple, Anum_pg_cast_castowner, &isnull); + if (!isnull) { + u_sess->exec_cxt.cast_owner = DatumGetObjectId(datum); + } else { + u_sess->exec_cxt.cast_owner = InvalidCastOwnerId; + } + ReleaseSysCache(cast_tuple); } - heap_close(cast_rel, AccessShareLock); - ReleaseSysCache(cast_tuple); } } diff --git a/contrib/dolphin/plugin_optimizer/commands/alter.cpp b/contrib/dolphin/plugin_optimizer/commands/alter.cpp index 6c9f208dd..fda4691ac 100644 --- a/contrib/dolphin/plugin_optimizer/commands/alter.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/alter.cpp @@ -66,6 +66,7 @@ #include "utils/rel_gs.h" #include "utils/syscache.h" #include "gs_policy/gs_policy_masking.h" +#include "catalog/gs_dependencies_fn.h" /* * Executes an ALTER OBJECT / RENAME TO statement. Based on the object @@ -736,6 +737,21 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid) IsThereFunctionInNamespace(NameStr(proc->proname), proc->pronargs, &proc->proargtypes, nspOid); + + if (enable_plpgsql_gsdependency_guc()) { + const char* old_func_format = format_procedure_no_visible(objid); + const char* old_func_name = NameStr(proc->proname); + bool is_null = false; + Datum package_oid_datum = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_packageid, &is_null); + Oid pkg_oid = DatumGetObjectId(package_oid_datum); + if (gsplsql_exists_func_obj(oldNspOid, pkg_oid, old_func_format, old_func_name)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The set schema operator of %s is not allowed, " + "because it is referenced by the other object.", + NameStr(proc->proname)))); + } + } } else if (classId == CollationRelationId) { Form_pg_collation coll = (Form_pg_collation) GETSTRUCT(tup); diff --git a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp index 8f953aab4..c849ead23 100755 --- a/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/functioncmds.cpp @@ -2016,6 +2016,20 @@ ObjectAddress RenameFunction(List* name, List* argtypes, const char* newname) if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceOid)); + if (enable_plpgsql_gsdependency_guc()) { + const char* old_func_format = format_procedure_no_visible(procOid); + const char* old_func_name = strVal(llast(name)); + bool is_null = false; + Datum package_oid_datum = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_packageid, &is_null); + Oid pkg_oid = DatumGetObjectId(package_oid_datum); + if (gsplsql_exists_func_obj(namespaceOid, pkg_oid, old_func_format, old_func_name)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The rename operator of %s is not allowed, because it is referenced by the other object.", + NameStr(procForm->proname)))); + } + } + /* rename */ (void)namestrcpy(&(procForm->proname), newname); simple_heap_update(rel, &tup->t_self, tup); @@ -3289,6 +3303,20 @@ Oid AlterFunctionNamespace_oid(Oid procOid, Oid nspOid) } } #endif + if (enable_plpgsql_gsdependency_guc()) { + const char* old_func_format = format_procedure_no_visible(procOid); + const char* old_func_name = NameStr(proc->proname); + bool is_null = false; + Datum package_oid_datum = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_packageid, &is_null); + Oid pkg_oid = DatumGetObjectId(package_oid_datum); + if (gsplsql_exists_func_obj(oldNspOid, pkg_oid, old_func_format, old_func_name)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The set schema operator of %s is not allowed, " + "because it is referenced by the other object.", + NameStr(proc->proname)))); + } + } /* OK, modify the pg_proc row */ /* tup is a copy, so we can scribble directly on it */ proc->pronamespace = nspOid; diff --git a/contrib/dolphin/plugin_optimizer/commands/schemacmds.cpp b/contrib/dolphin/plugin_optimizer/commands/schemacmds.cpp index e0dbf86d0..66d22d43e 100644 --- a/contrib/dolphin/plugin_optimizer/commands/schemacmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/schemacmds.cpp @@ -41,6 +41,7 @@ #include "utils/snapmgr.h" #include "gs_ledger/ledger_utils.h" #include "gs_ledger/userchain.h" +#include "catalog/gs_dependencies_fn.h" #ifdef PGXC #include "pgxc/pgxc.h" @@ -589,6 +590,13 @@ ObjectAddress RenameSchema(const char* oldname, const char* newname) existTimeSeriesTbl->data))); } + if (enable_plpgsql_gsdependency_guc() && gsplsql_exists_schema_name(oldname)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The rename operator of %s is not allowed, because it is referenced by the other object.", + oldname))); + } + /* Before rename schema (with blockchain) rename related ledger tables first */ bool is_null = true; Datum datum = SysCacheGetAttr(NAMESPACENAME, tup, Anum_pg_namespace_nspblockchain, &is_null); diff --git a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp index 678de0ffd..b3371e810 100644 --- a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp @@ -3688,6 +3688,13 @@ Oid AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses* objsMoved) errmsg("cannot alter array type %s", format_type_be(typeOid)), errhint("You can alter type %s, which will alter the array type as well.", format_type_be(elemOid)))); + if (enable_plpgsql_gsdependency_guc() && + gsplsql_is_object_depend(typeOid, GSDEPEND_OBJECT_TYPE_TYPE)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The set schema operator of %s is not allowed, because it is referenced by the other object.", + get_typename(typeOid)))); + } /* and do the work */ return AlterTypeNamespaceInternal(typeOid, nspOid, false, true, objsMoved); } diff --git a/contrib/dolphin/plugin_utils/adt/acl.cpp b/contrib/dolphin/plugin_utils/adt/acl.cpp index 32c3b7e0c..ed86e65ef 100644 --- a/contrib/dolphin/plugin_utils/adt/acl.cpp +++ b/contrib/dolphin/plugin_utils/adt/acl.cpp @@ -5763,13 +5763,7 @@ static Oid get_role_oid_or_public(const char* rolname) bool is_role_independent(Oid roleid) { HeapTuple rtup = NULL; - bool isNull = false; bool flag = false; - - Relation relation = heap_open(AuthIdRelationId, AccessShareLock); - - TupleDesc pg_authid_dsc = RelationGetDescr(relation); - /* Look up the information in pg_authid. */ rtup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleid)); if (HeapTupleIsValid(rtup)) { @@ -5777,18 +5771,11 @@ bool is_role_independent(Oid roleid) * For upgrade reason, we must get field value through heap_getattr function * although it is a char type value. */ - Datum authidrolkindDatum = heap_getattr(rtup, Anum_pg_authid_rolkind, pg_authid_dsc, &isNull); - - if (DatumGetChar(authidrolkindDatum) == ROLKIND_INDEPENDENT) - flag = true; - else - flag = false; - + bool isNull = false; + Datum authidrolkindDatum = SysCacheGetAttr(AUTHOID, rtup, Anum_pg_authid_rolkind, &isNull); + flag = !isNull && DatumGetChar(authidrolkindDatum) == ROLKIND_INDEPENDENT; ReleaseSysCache(rtup); } - - heap_close(relation, AccessShareLock); - return flag; } diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index dcca1bd63..7c20f1e4b 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -9116,6 +9116,14 @@ static void sqlcmd_alter_exec_convert_charset(AlteredTableInfo* tab, Relation re heap_close(attrelation, RowExclusiveLock); } +static bool sqlcmd_partition_index_ddl_cmd(AlterTableType cmd) +{ + /* AT_UnusableAllIndexOnSubPartition is not supported */ + return ((cmd) == AT_UnusableIndexPartition || (cmd) == AT_UnusableAllIndexOnPartition || + (cmd) == AT_UnusableIndex || (cmd) == AT_AddIndex || (cmd) == AT_ReAddIndex || + (cmd) == AT_AddIndexConstraint); +} + static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef) { List *columnOptions = columnDef->columnOptions; @@ -9140,11 +9148,18 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT elog(ES_LOGLEVEL, "[ATExecCmd] cmd subtype: %d", cmd->subtype); if (PARTITION_DDL_CMD(cmd->subtype) && RELATION_IS_PARTITIONED(rel)) { + /* Register invalidation of the relation's relcache entry. */ + CacheInvalidateRelcache(rel); int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id)); if (!PARTITIONNO_IS_VALID(partitionno)) { RelationResetPartitionno(rel->rd_id, ShareUpdateExclusiveLock); } } + + if (sqlcmd_partition_index_ddl_cmd(cmd->subtype) && RelationIsIndex(rel)) { + Oid rel_id = IndexGetRelation(rel->rd_id, false); + CacheInvalidateRelcacheByRelid(rel_id); + } switch (cmd->subtype) { case AT_AddColumn: /* ADD COLUMN */ @@ -22045,6 +22060,14 @@ void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, Object Assert(objsMoved != NULL); + if (enable_plpgsql_gsdependency_guc() && + gsplsql_is_object_depend(rel->rd_rel->reltype, GSDEPEND_OBJECT_TYPE_TYPE)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The set schema operator of %s is not allowed, " + "because it is referenced by another object.", NameStr(rel->rd_rel->relname)))); + } + /* OK, modify the pg_class row and pg_depend entry */ classRel = heap_open(RelationRelationId, RowExclusiveLock); -- Gitee From 31351589c9fc31b5b74d82affe65ab62e94281cb Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Mon, 4 Dec 2023 19:55:14 +0800 Subject: [PATCH 098/434] =?UTF-8?q?[=E9=9C=80=E6=B1=82]=E9=83=A8=E5=88=86?= =?UTF-8?q?=E5=85=B3=E9=94=AE=E5=AD=97=E9=99=8D=E7=BA=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../case_sensitive_test/alter_table.out | 4 +- .../case_sensitive_test/create_index.out | 12 - .../case_sensitive_test/create_view1.out | 2 +- .../case_sensitive_test/create_view5.out | 2 +- .../alter_table.out | 4 +- contrib/dolphin/expected/charset_gbk_b_db.out | 526 +++++++------- .../dolphin/expected/charset_utf8mb4_b_db.out | 608 ++++++++--------- contrib/dolphin/expected/column_name.out | 4 +- .../expected/db_b_date_time_functions.out | 6 - .../dolphin/expected/db_b_new_gram_test.out | 4 - contrib/dolphin/expected/kwlist.out | 424 ++++++++++++ contrib/dolphin/expected/show_create.out | 2 +- .../dolphin/expected/test_current_user.out | 2 + contrib/dolphin/expected/test_system_user.out | 18 +- .../dolphin/include/plugin_parser/kwlist.h | 84 ++- contrib/dolphin/include/plugin_postgres.h | 1 + contrib/dolphin/parallel_schedule_dolphin | 2 +- .../plugin_optimizer/commands/alter.cpp | 14 +- contrib/dolphin/plugin_parser/analyze.cpp | 4 + contrib/dolphin/plugin_parser/gram.y | 644 +++++++++++------- contrib/dolphin/plugin_utility.cpp | 14 +- .../dolphin/plugin_utils/adt/ri_triggers.cpp | 46 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 4 + .../rollback_script/dolphin--3.0--2.0.sql | 3 + .../sql/case_sensitive_test/alter_table.sql | 4 +- .../sql/case_sensitive_test/create_index.sql | 4 - .../sql/case_sensitive_test/create_view1.sql | 2 +- .../sql/case_sensitive_test/create_view5.sql | 2 +- .../alter_table.sql | 4 +- contrib/dolphin/sql/charset_gbk_b_db.sql | 434 ++++++------ contrib/dolphin/sql/charset_utf8mb4_b_db.sql | 552 +++++++-------- contrib/dolphin/sql/column_name.sql | 4 +- .../dolphin/sql/db_b_date_time_functions.sql | 1 - contrib/dolphin/sql/db_b_new_gram_test.sql | 1 - contrib/dolphin/sql/kwlist.sql | 195 ++++++ contrib/dolphin/sql/show_create.sql | 2 +- contrib/dolphin/sql/test_system_user.sql | 2 - .../upgrade_script/dolphin--2.0--3.0.sql | 4 +- 38 files changed, 2223 insertions(+), 1422 deletions(-) create mode 100644 contrib/dolphin/expected/kwlist.out create mode 100644 contrib/dolphin/sql/kwlist.sql diff --git a/contrib/dolphin/expected/case_sensitive_test/alter_table.out b/contrib/dolphin/expected/case_sensitive_test/alter_table.out index 108732228..b2630daed 100644 --- a/contrib/dolphin/expected/case_sensitive_test/alter_table.out +++ b/contrib/dolphin/expected/case_sensitive_test/alter_table.out @@ -196,7 +196,7 @@ ERROR: CREATE TABLE ... INHERITS is not yet supported. \d "Constraint_Rename_Test2" ALTER TABLE Constraint_Rename_Test2 RENAME CONSTRAINT con1 TO con1foo; -- fail ERROR: relation "Constraint_Rename_Test2" does not exist -ALTER TABLE ONLY Constraint_Rename_Test RENAME CONSTRAINT con1 TO con1foo; -- fail +ALTER TABLE ONLY (Constraint_Rename_Test) RENAME CONSTRAINT con1 TO con1foo; -- fail ALTER TABLE Constraint_Rename_Test RENAME CONSTRAINT con1 TO con1foo; -- ok ERROR: constraint "con1" for table "Constraint_Rename_Test" does not exist \d "Constraint_Rename_Test" @@ -211,7 +211,7 @@ Check constraints: \d "Constraint_Rename_Test2" ALTER TABLE Constraint_Rename_Test ADD CONSTRAINT con2 CHECK (b > 0) NO InheRIT; -ALTER TABLE ONLY Constraint_Rename_Test RENAME CONSTRAINT con2 TO con2foo; -- ok +ALTER TABLE ONLY (Constraint_Rename_Test) RENAME CONSTRAINT con2 TO con2foo; -- ok ALTER TABLE Constraint_Rename_Test RENAME CONSTRAINT con2foo TO con2bar; -- ok \d "Constraint_Rename_Test" Table "public.Constraint_Rename_Test" diff --git a/contrib/dolphin/expected/case_sensitive_test/create_index.out b/contrib/dolphin/expected/case_sensitive_test/create_index.out index 4c655bf50..9e1319182 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_index.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_index.out @@ -1845,18 +1845,6 @@ explain (costs off) select /*+ rows(t_rep_table #100000) */ * from t_hash_tabl Index Cond: (a = t_hash_table.a) (4 rows) -create index test0 on t_rep_table(rownum); -ERROR: specified ROWNUM is not allowed here. -LINE 1: create index test0 on t_rep_table(rownum); - ^ -create index test0 on t_rep_table(sin(a), sin(rownum)); -ERROR: specified ROWNUM is not allowed here. -LINE 1: create index test0 on t_rep_table(sin(a), sin(rownum)); - ^ -create index test0 on t_rep_table(sin(a), sin(rownum+1)); -ERROR: specified ROWNUM is not allowed here. -LINE 1: create index test0 on t_rep_table(sin(a), sin(rownum+1)); - ^ drop index idx_rep_table; drop table t_hash_table; drop table t_rep_table; diff --git a/contrib/dolphin/expected/case_sensitive_test/create_view1.out b/contrib/dolphin/expected/case_sensitive_test/create_view1.out index b2cbbb6c5..1d612b100 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_view1.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_view1.out @@ -8,7 +8,7 @@ set dolphin.lower_case_table_names TO 0; CREATE VIEW street AS SELECT r.name, r.thepath, c.cname AS cname - FROM ONLY Road r, Real_City c + FROM ONLY (Road) r, Real_City c WHERE c.outline ## r.thepath; ERROR: operator does not exist: path ## path LINE 4: WHERE c.outline ## r.thepath; diff --git a/contrib/dolphin/expected/case_sensitive_test/create_view5.out b/contrib/dolphin/expected/case_sensitive_test/create_view5.out index 78560da57..2a4fc4a22 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_view5.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_view5.out @@ -180,7 +180,7 @@ ERROR: view "tt23v" does not exist create view tt20v as select * from coalesce(1,2) as c, - collation for ('x'::text) col, + pg_collation_for ('x'::text) col, current_date as d, cast(1+2 as int4) as i4, cast(1+2 as int8) as i8; diff --git a/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out b/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out index bd200bfc6..eb49faec3 100644 --- a/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out +++ b/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out @@ -200,7 +200,7 @@ ERROR: CREATE TABLE ... INHERITS is not yet supported. \d "Constraint_Rename_Test2" ALTER TABLE `Constraint_Rename_Test2` RENAME CONSTRAINT `con1` TO `con1foo`; -- fail ERROR: relation "Constraint_Rename_Test2" does not exist -ALTER TABLE ONLY `Constraint_Rename_Test` RENAME CONSTRAINT `con1` TO `con1foo`; -- fail +ALTER TABLE ONLY (`Constraint_Rename_Test`) RENAME CONSTRAINT `con1` TO `con1foo`; -- fail ALTER TABLE `Constraint_Rename_Test` RENAME CONSTRAINT `con1` TO `con1foo`; -- ok ERROR: constraint "con1" for table "Constraint_Rename_Test" does not exist \d "Constraint_Rename_Test" @@ -215,7 +215,7 @@ Check constraints: \d "Constraint_Rename_Test2" ALTER TABLE `Constraint_Rename_Test` ADD CONSTRAINT `con2` CHECK (`b` > 0) NO InheRIT; -ALTER TABLE ONLY `Constraint_Rename_Test` RENAME CONSTRAINT `con2` TO `con2foo`; -- ok +ALTER TABLE ONLY (`Constraint_Rename_Test`) RENAME CONSTRAINT `con2` TO `con2foo`; -- ok ALTER TABLE `Constraint_Rename_Test` RENAME CONSTRAINT `con2foo` TO `con2bar`; -- ok \d "Constraint_Rename_Test" Table "public.Constraint_Rename_Test" diff --git a/contrib/dolphin/expected/charset_gbk_b_db.out b/contrib/dolphin/expected/charset_gbk_b_db.out index 81307c807..de3a73464 100644 --- a/contrib/dolphin/expected/charset_gbk_b_db.out +++ b/contrib/dolphin/expected/charset_gbk_b_db.out @@ -203,7 +203,7 @@ SELECT CONVERT_TO(_utf8mb4'楂樻柉', 'gbk'); -- ERROR ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 SELECT CONVERT_TO(_utf8mb4'高斯', 'gbk'); -- ERROR ERROR: the character set of convert_to function arguments must be server_encoding -SELECT COLLATION FOR(CAST('高斯' AS binary)::text); +SELECT pg_collation_for(CAST('高斯' AS binary)::text); pg_collation_for ------------------ gbk_chinese_ci @@ -590,343 +590,343 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB' COLLATE gbk_bin); (1 row) -- -- -- same charset & implicit collation -SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_chinese_ci (1 row) -- -- -- diff charset & explicit collation -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...' COLLATE gb18030_chinese_ci) result, collation for(result); +LINE 1: ...OLLATE gb18030_chinese_ci) result, pg_collation_for(result); ^ -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_bin" -LINE 1: ...˜æ–¯DB' COLLATE gb18030_bin) result, collation for(result); - ^ -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR +LINE 1: ...–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); + ^ +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_chinese_ci" -LINE 1: ...' COLLATE gb18030_chinese_ci) result, collation for(result); +LINE 1: ...OLLATE gb18030_chinese_ci) result, pg_collation_for(result); ^ -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...˜æ–¯DB' COLLATE gb18030_bin) result, collation for(result); - ^ +LINE 1: ...–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); + ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------+------------------ 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------+------------------ 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...˜æ–¯DB' , _gb18030'高斯DB') result, collation for(result); - ^ -SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, collation for(result); -- ERROR +LINE 1: ...DB' , _gb18030'高斯DB') result, pg_collation_for(result); + ^ +SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...18030'高斯DB' , '高斯DB') result, collation for(result); +LINE 1: ...30'高斯DB' , '高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------+------------------ 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------+------------------ \xe9ab98e696af4442高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, collation for(result); +SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------+------------------ \xe9ab98e696af4442高斯DB | gbk_chinese_ci (1 row) -- -- -- explicit & implicit -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | "C" (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.gbk") result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.gbk") result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | "zh_CN.gbk" (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯db | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 高斯DB高斯db | gb18030_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | gb18030_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+------------------ ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 高斯DB高斯DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gb18030_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- \xe9ab98e696af4442¸ß˹DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------+------------------ \xe9ab98e696af4442高斯DB | gbk_chinese_ci (1 row) -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------------------+------------------ \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------------------------+------------------ \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------+-------------------- ¸ß˹DB高斯DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------------+------------------ ¸ß˹DB高斯DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------------------+------------------ 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------------+-------------------- ¸ß˹DB高斯DB\xe9ab98e696af4442 | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -- ERROR -ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...˜æ–¯DB', _utf8mb4'高斯DB') result, collation for(result); - ^ -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, collation for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...高斯DB', _binary'高斯DB') result, collation for(result); +LINE 1: ...–¯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" +LINE 1: ...–¯DB', _binary'高斯DB') result, pg_collation_for(result); + ^ +SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------+-------------------- 高斯DB高斯DB¸ß˹DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------------------+------------------ 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci @@ -962,25 +962,25 @@ ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chin LINE 1: ...B楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB'); ^ -- -- -- const CONCAT CONCAT -SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); result | pg_collation_for ----------------------------------+-------------------- 高斯DB高斯DB高斯DB高斯DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); result | pg_collation_for ----------------------------------+------------------ 高斯DB高斯DB高斯DB高斯DB | utf8mb4_bin @@ -988,98 +988,98 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk' -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, collation for(result); +SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 高斯DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), '高斯DB') result, collation for(result); +SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 5.1.1高斯DB | utf8_general_ci (1 row) -SELECT CONCAT('高斯DB', 123) result, collation for(result); +SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 高斯DB123 | gbk_chinese_ci (1 row) -SELECT CONCAT(123, '高斯DB') result, collation for(result); +SELECT CONCAT(123, '高斯DB') result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 123高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT('高斯DB', DATE '2023-05-01') result, collation for(result); +SELECT CONCAT('高斯DB', DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 高斯DB2023-05-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 2023-05-01高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT('高斯DB', NULL) result, collation for(result); +SELECT CONCAT('高斯DB', NULL) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, '高斯DB') result, collation for(result); +SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -- -- -- -- diff charset -SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ ¸ß˹DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 5.1.1¸ß˹DB | utf8_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', 123) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', 123) result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- ¸ß˹DB123 | utf8mb4_general_ci (1 row) -SELECT CONCAT(123, _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(123, _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- 123¸ß˹DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', DATE '2023-05-01') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- ¸ß˹DB2023-05-01 | utf8mb4_general_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 2023-05-01¸ß˹DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', NULL) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', NULL) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci @@ -1087,207 +1087,207 @@ SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, collation for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 高斯DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 5.1.1高斯DB | utf8_general_ci (1 row) -SELECT CONCAT(CONCAT('高斯DB'), 123) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 高斯DB123 | gbk_chinese_ci (1 row) -SELECT CONCAT(123, CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(123, CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 123高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 高斯DB2023-05-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 2023-05-01高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT('高斯DB'), NULL) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), NULL) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, collation for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ ¸ß˹DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, collation for(result); +SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 5.1.1¸ß˹DB | utf8_general_ci (1 row) -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, collation for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- ¸ß˹DB123 | utf8mb4_general_ci (1 row) -SELECT CONCAT(123, CONCAT(_utf8mb4'高斯DB')) result, collation for(result); +SELECT CONCAT(123, CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- 123¸ß˹DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), DATE '2023-05-01') result, collation for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- ¸ß˹DB2023-05-01 | utf8mb4_general_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', CONCAT(_utf8mb4'高斯DB')) result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 2023-05-01¸ß˹DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), NULL) result, collation for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), NULL) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, CONCAT(_utf8mb4'高斯DB')) result, collation for(result); +SELECT CONCAT(NULL, CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -- -- -- CONCAT NUMBERS -SELECT CONCAT('100', 200) result, collation for(result); +SELECT CONCAT('100', 200) result, pg_collation_for(result); result | pg_collation_for --------+------------------ 100200 | gbk_chinese_ci (1 row) -SELECT CONCAT('100', date'2021-01-01') result, collation for(result); +SELECT CONCAT('100', date'2021-01-01') result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 1002021-01-01 | gbk_chinese_ci (1 row) -SELECT CONCAT('100', NULL) result, collation for(result); +SELECT CONCAT('100', NULL) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT('100', NULL::varbinary(16)) result, collation for(result); +SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT('100', NULL::text) result, collation for(result); +SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(100, 200) result, collation for(result); +SELECT CONCAT(100, 200) result, pg_collation_for(result); result | pg_collation_for --------+------------------ 100200 | gbk_chinese_ci (1 row) -SELECT CONCAT(100, date'2021-01-01') result, collation for(result); +SELECT CONCAT(100, date'2021-01-01') result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 1002021-01-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(100, NULL) result, collation for(result); +SELECT CONCAT(100, NULL) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(100, NULL::varbinary(16)) result, collation for(result); +SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(100, NULL::text) result, collation for(result); +SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, NULL::varbinary(16)) result, collation for(result); +SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, NULL::text) result, collation for(result); +SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL), '100') result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::text), '100') result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL), 100) result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::text), 100) result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci @@ -1683,110 +1683,110 @@ LINE 1: ...30_chi, fgb18030_chi = fgbk_chi FROM t_diff_charset_columns; ^ -- -- COLUMN concat COLUMN -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(fgbk_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯db | gbk_bin (1 row) -SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯db | gb18030_bin (1 row) -SELECT CONCAT(futf8_gen, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- result is _bin +SELECT CONCAT(futf8_gen, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- result is _bin ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_uni, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯db高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯db高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(fgbk_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_chi, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- concat column and @uservar set enable_set_variable_b_format=on; -- -- -- string var utf8mb4_general_ci set @var_utf8_gen = _utf8mb4'高斯DB' COLLATE utf8mb4_general_ci; -- should support -SELECT collation for(@var_utf8_gen); +SELECT pg_collation_for(@var_utf8_gen); pg_collation_for -------------------- utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_uni, @var_utf8_gen) result, collation for(result) FROM t_diff_charset_columns; -- null collation +SELECT CONCAT(futf8_uni, @var_utf8_gen) result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation ERROR: collation mismatch between collations "utf8mb4_unicode_ci" and "utf8mb4_general_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_bin, @var_utf8_gen) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_general_ci +SELECT CONCAT(fgbk_bin, @var_utf8_gen) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_general_ci result | pg_collation_for ----------------+-------------------- 高斯DB¸ß˹DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(@var_utf8_gen, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- _bin +SELECT CONCAT(@var_utf8_gen, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- _bin result | pg_collation_for ----------------+------------------ ¸ß˹DB高斯DB | utf8mb4_bin @@ -1794,19 +1794,19 @@ SELECT CONCAT(@var_utf8_gen, futf8_bin) result, collation for(result) FROM t_dif -- -- -- string var gbk_chinese_ci set @var_gbk_chi = '高斯DB' COLLATE gbk_chinese_ci; -- should support -SELECT collation for(@var_gbk_chi); +SELECT pg_collation_for(@var_gbk_chi); pg_collation_for ------------------ gbk_chinese_ci (1 row) -SELECT CONCAT(futf8_uni, @var_gbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- futf8_uni +SELECT CONCAT(futf8_uni, @var_gbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- futf8_uni result | pg_collation_for ------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_bin @@ -1814,7 +1814,7 @@ SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, collation for(result) FROM t_diff_ -- -- -- number var set @var_num = 5.0; -SELECT CONCAT(@var_num, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT CONCAT(@var_num, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin result | pg_collation_for -----------+------------------ 5高斯DB | gbk_bin @@ -1822,13 +1822,13 @@ SELECT CONCAT(@var_num, fgbk_bin) result, collation for(result) FROM t_diff_char -- -- -- varbinary var set @var_binary = _binary'高斯DB'; -SELECT CONCAT(futf8_bin, @var_binary) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, @var_binary) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------------+------------------ 楂樻柉DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(@var_binary, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(@var_binary, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯DB | gbk_chinese_ci @@ -1837,7 +1837,7 @@ SELECT CONCAT(@var_binary, fgbk_bin) result, collation for(result) FROM t_diff_c -- -- concat column and bind parameter -- -- -- -- PBE with implicit collation PREPARE test_merge_collation(text) AS -SELECT CONCAT(futf8_uni, $1) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, $1) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); -- futf8_uni collation has priority @@ -1852,7 +1852,7 @@ EXECUTE test_merge_collation(_utf8mb4'高斯DB'); -- same as above 高斯db¸ß˹DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above result | pg_collation_for ----------------+-------------------- 高斯db¸ß˹DB | utf8mb4_unicode_ci @@ -1872,7 +1872,7 @@ EXECUTE test_merge_collation(_gbk'高斯DB'); -- same as above 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above result | pg_collation_for ------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci @@ -1895,7 +1895,7 @@ EXECUTE test_merge_collation(_utf8mb4'高斯DB' collate utf8mb4_unicode_ci); -- DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); @@ -1927,7 +1927,7 @@ EXECUTE test_merge_collation(_gbk'高斯DB'); -- utf8mb4_unicode_ci DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR +SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR ERROR: COLLATION "utf8mb4_unicode_ci" is not valid for CHARACTER SET "GBK" LINE 2: SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) resu... ^ @@ -1987,178 +1987,178 @@ DEALLOCATE test_revalidate; SET NAMES gbk; -- -- concat for DERIVATION -- -- -- same charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, collation for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict ERROR: collation mismatch between collations "utf8mb4_unicode_ci" and "utf8mb4_general_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(futf8_uni, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+-------------------- 高斯db5.1.1 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(opengauss_version(), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+-------------------- 5.1.1高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(fgbk_chi, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯db高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT('高斯DB', fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯DB高斯db | gbk_chinese_ci (1 row) -SELECT CONCAT(futf8_uni, 123) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- 高斯db123 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(123, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- 123高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------+-------------------- 高斯db2023-05-01 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------+-------------------- 2023-05-01高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, NULL) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(NULL, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- | utf8mb4_unicode_ci (1 row) -- -- -- diff charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; ERROR: collation mismatch between collations "gb18030_bin" and "gbk_bin" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+------------------ 高斯db5.1.1 | gbk_chinese_ci (1 row) -SELECT CONCAT(opengauss_version(), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+------------------ 5.1.1高斯DB | gbk_bin (1 row) -SELECT CONCAT(futf8_uni, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT('高斯DB', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯DB高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(fgbk_chi, 123) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ 高斯db123 | gbk_chinese_ci (1 row) -SELECT CONCAT(123, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ 123高斯DB | gbk_bin (1 row) -SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------+------------------ 高斯db2023-05-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------+------------------ 2023-05-01高斯DB | gbk_bin (1 row) -SELECT CONCAT(fgbk_chi, NULL) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ | gbk_bin @@ -2166,33 +2166,33 @@ SELECT CONCAT(NULL, fgbk_bin) result, collation for(result) FROM t_diff_charset_ -- -- test explicit collate on concat -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯DB高斯db | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci r... ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci re... ^ -- -- -- with binary & implicit collation -SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text ERROR: COLLATION "binary" is not valid for CHARACTER SET "GBK" -LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, coll... +LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_c... ^ -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text ERROR: COLLATION "utf8mb4_general_ci" is not valid for CHARACTER SET "GBK" LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci re... ^ @@ -2211,73 +2211,73 @@ SELECT CAST('DEADBEEF' AS blob) COLLATE "binary" result; -- -- case when -- -- -- condition same charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | gbk_bin (1 row) -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | gbk_bin (1 row) -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "utf8mb4_general_ci" and "utf8mb4_unicode_ci" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- condition same charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+-------------------- 高斯db | utf8mb4_unicode_ci (1 row) -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+-------------------- 高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gb18030_bin" and "gbk_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+-------------------- 高斯DB | utf8mb4_unicode_ci @@ -2285,73 +2285,73 @@ SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result -- -- -- condition diff charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | gbk_bin (1 row) -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | gbk_bin (1 row) -SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- condition diff charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- in -- -- -- column utf8 @@ -2477,46 +2477,46 @@ ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_bin" LINE 1: ... in (SELECT t2.fgb18030_bin FROM t_diff_charset_columns t2); ^ -- -- COALESCE -SELECT COALESCE(fgbk_chi, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT COALESCE(fgbk_chi, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT COALESCE(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; +SELECT COALESCE(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT COALESCE(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict +SELECT COALESCE(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict ERROR: collation mismatch between collations "utf8mb4_unicode_ci" and "utf8mb4_general_ci" -LINE 1: ... result, collation for (result) FROM t_diff_charset_columns; +LINE 1: ...sult, pg_collation_for (result) FROM t_diff_charset_columns; ^ -SELECT COALESCE(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT COALESCE(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ... result, collation for (result) FROM t_diff_charset_columns; +LINE 1: ...sult, pg_collation_for (result) FROM t_diff_charset_columns; ^ -- -- GREATEST -SELECT GREATEST(fgbk_chi, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; +SELECT GREATEST(fgbk_chi, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT GREATEST(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; +SELECT GREATEST(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; result | pg_collation_for ----------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT GREATEST(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict +SELECT GREATEST(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict ERROR: collation mismatch between collations "utf8mb4_unicode_ci" and "utf8mb4_general_ci" -LINE 1: ... result, collation for (result) FROM t_diff_charset_columns; +LINE 1: ...sult, pg_collation_for (result) FROM t_diff_charset_columns; ^ -SELECT GREATEST(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT GREATEST(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ... result, collation for (result) FROM t_diff_charset_columns; +LINE 1: ...sult, pg_collation_for (result) FROM t_diff_charset_columns; ^ -- -- XMLEXPR SELECT xmlelement(NAME a, fgbk_chi, futf8_bin) result FROM t_diff_charset_columns; diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index f379726e2..33d4b3ddb 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -978,345 +978,345 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB' COLLATE utf8mb (1 row) -- -- -- same charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci (1 row) -- -- -- diff charset & explicit collation -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...' COLLATE gb18030_chinese_ci) result, collation for(result); +LINE 1: ...OLLATE gb18030_chinese_ci) result, pg_collation_for(result); ^ -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_bin" -LINE 1: ...'高斯DB' COLLATE gb18030_bin) result, collation for(result); +LINE 1: ...æ–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); ^ -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_chinese_ci" -LINE 1: ...' COLLATE gb18030_chinese_ci) result, collation for(result); +LINE 1: ...OLLATE gb18030_chinese_ci) result, pg_collation_for(result); ^ -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...'高斯DB' COLLATE gb18030_bin) result, collation for(result); +LINE 1: ...æ–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 楂樻柉DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, collation for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...k'高斯DB' , _gb18030'高斯DB') result, collation for(result); - ^ -SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, collation for(result); +LINE 1: ...æ–¯DB' , _gb18030'高斯DB') result, pg_collation_for(result); + ^ +SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 楂樻柉DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- \xe9ab98e696af4442高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, collation for(result); +SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- \xe9ab98e696af4442高斯DB | utf8mb4_general_ci (1 row) -- -- -- explicit & implicit -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, pg_collation_for(result); result | pg_collation_for --------------+------------------ 高斯DB高斯DB | "C" (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.utf8") result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.utf8") result, pg_collation_for(result); result | pg_collation_for --------------+------------------ 高斯DB高斯DB | "zh_CN.utf8" (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_unicode_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); result | pg_collation_for --------------+-------------------- 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_bin) result, pg_collation_for(result); result | pg_collation_for --------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 楂樻柉DB楂樻柉db | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | gbk_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 楂樻柉DB楂樻柉db | gb18030_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | gb18030_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------+-------------------- 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+------------------ 高斯DB\xe9ab98e696af4442 | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 楂樻柉DB楂樻柉DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 楂樻柉DB楂樻柉DB | gb18030_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 楂樻柉DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 楂樻柉DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 楂樻柉DB楂樻柉DB | gbk_bin (1 row) -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- \xe9ab98e696af4442高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- \xe9ab98e696af4442高斯DB | utf8mb4_general_ci (1 row) -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------------+-------------------- \xe9ab98e696af4442高斯DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------------------------+------------------ \xe9ab98e696af4442楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------+-------------------- 高斯DB楂樻柉DB楂樻柉DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); result | pg_collation_for ------------------------+------------------ 高斯DB楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------------------+-------------------- 高斯DB高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ----------------------------------+-------------------- 高斯DB楂樻柉DB\xe9ab98e696af4442 | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...bk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); +LINE 1: ...高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, collation for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...gbk'高斯DB', _binary'高斯DB') result, collation for(result); +LINE 1: ...'高斯DB', _binary'高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------+-------------------- 楂樻柉DB楂樻柉DB高斯DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------------------+------------------ 楂樻柉DB楂樻柉DB\xe9ab98e696af4442 | gbk_chinese_ci @@ -1352,25 +1352,25 @@ ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chin LINE 1: ...'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB'); ^ -- -- -- const CONCAT CONCAT -SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 楂樻柉DB楂樻柉DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); result | pg_collation_for ----------------------------------+-------------------- 楂樻柉DB楂樻柉DB楂樻柉DB楂樻柉DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for ------------------+------------------ 楂樻柉DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); result | pg_collation_for ----------------------------------+------------------ 楂樻柉DB楂樻柉DB楂樻柉DB楂樻柉DB | utf8mb4_bin @@ -1378,98 +1378,98 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk' -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, collation for(result); +SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 高斯DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), '高斯DB') result, collation for(result); +SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 5.1.1高斯DB | utf8_general_ci (1 row) -SELECT CONCAT('高斯DB', 123) result, collation for(result); +SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- 高斯DB123 | utf8mb4_general_ci (1 row) -SELECT CONCAT(123, '高斯DB') result, collation for(result); +SELECT CONCAT(123, '高斯DB') result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- 123高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT('高斯DB', DATE '2023-05-01') result, collation for(result); +SELECT CONCAT('高斯DB', DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 高斯DB2023-05-01 | utf8mb4_general_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 2023-05-01高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT('高斯DB', NULL) result, collation for(result); +SELECT CONCAT('高斯DB', NULL) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, '高斯DB') result, collation for(result); +SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -- -- -- -- diff charset -SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 楂樻柉DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 5.1.1楂樻柉DB | utf8_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB', 123) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB', 123) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 楂樻柉DB123 | gbk_chinese_ci (1 row) -SELECT CONCAT(123, _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(123, _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 123楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB', DATE '2023-05-01') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB', DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 楂樻柉DB2023-05-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 2023-05-01楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_gbk'高斯DB', NULL) result, collation for(result); +SELECT CONCAT(_gbk'高斯DB', NULL) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(NULL, _gbk'高斯DB') result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci @@ -1477,207 +1477,207 @@ SELECT CONCAT(NULL, _gbk'高斯DB') result, collation for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 高斯DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 5.1.1高斯DB | utf8_general_ci (1 row) -SELECT CONCAT(CONCAT('高斯DB'), 123) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- 高斯DB123 | utf8mb4_general_ci (1 row) -SELECT CONCAT(123, CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(123, CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for -----------+-------------------- 123高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 高斯DB2023-05-01 | utf8mb4_general_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for ------------------+-------------------- 2023-05-01高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT('高斯DB'), NULL) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), NULL) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, collation for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 楂樻柉DB5.1.1 | utf8_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ 5.1.1楂樻柉DB | utf8_general_ci (1 row) -SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, collation for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 楂樻柉DB123 | gbk_chinese_ci (1 row) -SELECT CONCAT(123, CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(123, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for -------------+------------------ 123楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(_gbk'高斯DB'), DATE '2023-05-01') result, collation for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 楂樻柉DB2023-05-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for --------------------+------------------ 2023-05-01楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(_gbk'高斯DB'), NULL) result, collation for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), NULL) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(NULL, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -- -- -- CONCAT NUMBERS -SELECT CONCAT('100', 200) result, collation for(result); +SELECT CONCAT('100', 200) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- 100200 | utf8mb4_general_ci (1 row) -SELECT CONCAT('100', date'2021-01-01') result, collation for(result); +SELECT CONCAT('100', date'2021-01-01') result, pg_collation_for(result); result | pg_collation_for ---------------+-------------------- 1002021-01-01 | utf8mb4_general_ci (1 row) -SELECT CONCAT('100', NULL) result, collation for(result); +SELECT CONCAT('100', NULL) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT('100', NULL::varbinary(16)) result, collation for(result); +SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT('100', NULL::text) result, collation for(result); +SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(100, 200) result, collation for(result); +SELECT CONCAT(100, 200) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- 100200 | utf8mb4_general_ci (1 row) -SELECT CONCAT(100, date'2021-01-01') result, collation for(result); +SELECT CONCAT(100, date'2021-01-01') result, pg_collation_for(result); result | pg_collation_for ---------------+-------------------- 1002021-01-01 | utf8mb4_general_ci (1 row) -SELECT CONCAT(100, NULL) result, collation for(result); +SELECT CONCAT(100, NULL) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(100, NULL::varbinary(16)) result, collation for(result); +SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(100, NULL::text) result, collation for(result); +SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, NULL::varbinary(16)) result, collation for(result); +SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, NULL::text) result, collation for(result); +SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL), '100') result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::text), '100') result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL), 100) result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::text), 100) result, collation for(result); +SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); result | pg_collation_for --------+-------------------- | utf8mb4_general_ci @@ -2107,141 +2107,141 @@ SELECT fgb18030_bin = fblob, fblob = fgb18030_bin FROM t_diff_charset_columns; - ERROR: invalid hexadecimal digit: "¸" -- -- COLUMN concat COLUMN -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(fgbk_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯db | gbk_bin (1 row) -SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯db | gb18030_bin (1 row) -SELECT CONCAT(futf8_gen, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- result is _bin +SELECT CONCAT(futf8_gen, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- result is _bin result | pg_collation_for --------------+------------------ 高斯db高斯db | (1 row) -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_uni, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯db高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯db高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(fgbk_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_chi, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- with binary & implicit collation -SELECT CONCAT(futf8_bin, fbin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------------+-------------------- 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_bin, fblob) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------+-------------------- 高斯DBE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_uni, fbin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------------+-------------------- 高斯db\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_uni, fblob) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------+-------------------- 高斯dbE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(fgbk_bin, fbin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------------------+-------------------- ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(fgbk_bin, fblob) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------------+-------------------- ˹DBE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(fgb18030_bin, fbin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------------------+-------------------- ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) -SELECT CONCAT(fgb18030_bin, fblob) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------------+-------------------- ˹DBE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(fbin, fblob) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fbin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------------------------+-------------------- \xe9ab98e696af4442E9AB98E696AF | utf8mb4_general_ci @@ -2251,25 +2251,25 @@ SELECT CONCAT(fbin, fblob) result, collation for(result) FROM t_diff_charset_col set enable_set_variable_b_format=on; -- -- -- string var utf8mb4_general_ci set @var_utf8_uni = '高斯DB' COLLATE utf8mb4_unicode_ci; -- should support -SELECT collation for(@var_utf8_uni); +SELECT pg_collation_for(@var_utf8_uni); pg_collation_for -------------------- utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_gen, @var_utf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- null collation +SELECT CONCAT(futf8_gen, @var_utf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation result | pg_collation_for --------------+------------------ 高斯db高斯DB | (1 row) -SELECT CONCAT(fgbk_bin, @var_utf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci +SELECT CONCAT(fgbk_bin, @var_utf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci result | pg_collation_for --------------+-------------------- 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(@var_utf8_uni, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- _bin +SELECT CONCAT(@var_utf8_uni, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- _bin result | pg_collation_for --------------+------------------ 高斯DB高斯DB | utf8mb4_bin @@ -2277,19 +2277,19 @@ SELECT CONCAT(@var_utf8_uni, futf8_bin) result, collation for(result) FROM t_dif -- -- -- string var gbk_chinese_ci set @var_gbk_chi = _gbk'高斯DB' COLLATE gbk_chinese_ci; -- should support -SELECT collation for(@var_gbk_chi); +SELECT pg_collation_for(@var_gbk_chi); pg_collation_for ------------------ gbk_chinese_ci (1 row) -SELECT CONCAT(futf8_uni, @var_gbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- futf8_uni +SELECT CONCAT(futf8_uni, @var_gbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- futf8_uni result | pg_collation_for ----------------+-------------------- 高斯db楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin result | pg_collation_for ----------------+------------------ 楂樻柉DB高斯DB | gbk_bin @@ -2297,13 +2297,13 @@ SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, collation for(result) FROM t_diff_ -- -- -- number var set @var_num = 5.0; -SELECT CONCAT(futf8_bin, @var_num) result, collation for(result) FROM t_diff_charset_columns; -- futf8_bin +SELECT CONCAT(futf8_bin, @var_num) result, pg_collation_for(result) FROM t_diff_charset_columns; -- futf8_bin result | pg_collation_for ---------+------------------ 高斯DB5 | utf8mb4_bin (1 row) -SELECT CONCAT(@var_num, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT CONCAT(@var_num, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin result | pg_collation_for ---------+------------------ 5高斯DB | gbk_bin @@ -2311,13 +2311,13 @@ SELECT CONCAT(@var_num, fgbk_bin) result, collation for(result) FROM t_diff_char -- -- -- varbinary var set @var_binary = _binary'高斯DB'; -SELECT CONCAT(futf8_bin, @var_binary) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, @var_binary) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(@var_binary, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(@var_binary, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------+-------------------- 高斯DB˹DB | utf8mb4_general_ci @@ -2326,7 +2326,7 @@ SELECT CONCAT(@var_binary, fgbk_bin) result, collation for(result) FROM t_diff_c -- -- concat column and bind parameter -- -- -- -- PBE with implicit collation PREPARE test_merge_collation(text) AS -SELECT CONCAT(futf8_uni, $1) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, $1) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); -- futf8_uni collation has priority @@ -2341,7 +2341,7 @@ EXECUTE test_merge_collation(_utf8mb4'高斯DB'); -- same as above 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above result | pg_collation_for --------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci @@ -2361,7 +2361,7 @@ EXECUTE test_merge_collation(_gbk'高斯DB'); -- same as above 高斯db楂樻柉DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above result | pg_collation_for ----------------+-------------------- 高斯db楂樻柉DB | utf8mb4_unicode_ci @@ -2384,7 +2384,7 @@ EXECUTE test_merge_collation(_utf8mb4'高斯DB' collate utf8mb4_unicode_ci); -- DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); @@ -2416,7 +2416,7 @@ EXECUTE test_merge_collation(_gbk'高斯DB'); -- utf8mb4_unicode_ci DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR +SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 2: SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, ... ^ @@ -2481,7 +2481,7 @@ DECLARE collation_res text; concat_res_assign varchar(64); CURSOR c1 IS - SELECT CONCAT(p1, p2) result, collation for(result); + SELECT CONCAT(p1, p2) result, pg_collation_for(result); begin OPEN c1; LOOP @@ -2501,7 +2501,7 @@ CALL merge_collation_func('高斯DB', _gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation | @concat_res_assign ----------------+--------------------+--------------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_general_ci | utf8mb4_general_ci | 高斯DB楂樻柉DB @@ -2513,7 +2513,7 @@ CALL merge_collation_func(_gb18030'高斯DB', _gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation | @concat_res_assign ------------------+--------------------+--------------------+-------------------- 楂樻柉DB楂樻柉DB | utf8mb4_general_ci | utf8mb4_general_ci | 楂樻柉DB楂樻柉DB @@ -2525,7 +2525,7 @@ CALL merge_collation_func(_gb18030'高斯DB', _gbk'高斯DB' collate gbk_chinese (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation | @concat_res_assign ------------------+--------------------+--------------------+-------------------- 楂樻柉DB楂樻柉DB | utf8mb4_general_ci | utf8mb4_general_ci | 楂樻柉DB楂樻柉DB @@ -2539,7 +2539,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(futf8_uni, p1) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(futf8_uni, p1) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -2557,7 +2557,7 @@ CALL merge_collation_func('高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict @concat_res | @collation_res | real_collation --------------+----------------+-------------------- 高斯db高斯DB | | utf8mb4_general_ci @@ -2569,7 +2569,7 @@ CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict @concat_res | @collation_res | real_collation --------------+----------------+-------------------- 高斯db高斯DB | | utf8mb4_general_ci @@ -2581,7 +2581,7 @@ CALL merge_collation_func(_gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation;; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation;; -- conflict @concat_res | @collation_res | real_collation ----------------+----------------+-------------------- 高斯db楂樻柉DB | | utf8mb4_general_ci @@ -2593,7 +2593,7 @@ CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation;; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation;; -- conflict @concat_res | @collation_res | real_collation ----------------+----------------+-------------------- 高斯db楂樻柉DB | | utf8mb4_general_ci @@ -2607,7 +2607,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(p1, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(p1, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -2625,7 +2625,7 @@ CALL merge_collation_func('高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation --------------+--------------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci | utf8mb4_general_ci @@ -2637,7 +2637,7 @@ CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation --------------+--------------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci | utf8mb4_general_ci @@ -2649,7 +2649,7 @@ CALL merge_collation_func(_gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation ----------------+--------------------+-------------------- 楂樻柉DB高斯DB | utf8mb4_general_ci | utf8mb4_general_ci @@ -2661,7 +2661,7 @@ CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci @concat_res | @collation_res | real_collation ----------------+--------------------+-------------------- 楂樻柉DB高斯DB | utf8mb4_general_ci | utf8mb4_general_ci @@ -2675,7 +2675,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(p1, fgbk_bin collate gbk_bin) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(p1, fgbk_bin collate gbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -2693,7 +2693,7 @@ CALL merge_collation_func('高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation --------------+----------------+-------------------- 高斯DB高斯DB | gbk_bin | utf8mb4_general_ci @@ -2705,7 +2705,7 @@ CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation --------------+----------------+-------------------- 高斯DB高斯DB | gbk_bin | utf8mb4_general_ci @@ -2717,7 +2717,7 @@ CALL merge_collation_func(_gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation ----------------+----------------+-------------------- 楂樻柉DB高斯DB | gbk_bin | utf8mb4_general_ci @@ -2729,7 +2729,7 @@ CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation ----------------+----------------+-------------------- 楂樻柉DB高斯DB | gbk_bin | utf8mb4_general_ci @@ -2743,7 +2743,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(fgbk_bin, p1) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(fgbk_bin, p1) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -2761,7 +2761,7 @@ CALL merge_collation_func(100); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation -------------+----------------+-------------------- 高斯DB100 | gbk_bin | utf8mb4_general_ci @@ -2775,7 +2775,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -2789,9 +2789,9 @@ end; / CALL merge_collation_func('高斯DB'); -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" -LINE 1: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci)result, c... +LINE 1: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci)result, p... ^ -QUERY: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci)result, collation for(result)FROM t_diff_charset_columns +QUERY: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci)result, pg_collation_for(result)FROM t_diff_charset_columns CONTEXT: PL/pgSQL function merge_collation_func(text) line 7 at OPEN DROP procedure merge_collation_func; -- -- concat column and PROCEDURE parameter with select into @@ -2799,7 +2799,7 @@ DROP procedure merge_collation_func; create or replace procedure merge_collation_func(p1 text) as begin - SELECT CONCAT(futf8_uni, p1) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; + SELECT CONCAT(futf8_uni, p1) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; end; / CALL merge_collation_func('高斯DB'); @@ -2808,7 +2808,7 @@ CALL merge_collation_func('高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict @concat_res | @collation_res | real_collation --------------+--------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci | utf8mb4_unicode_ci @@ -2820,7 +2820,7 @@ CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict @concat_res | @collation_res | real_collation --------------+--------------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci | utf8mb4_unicode_ci @@ -2832,7 +2832,7 @@ CALL merge_collation_func(_gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_unicode_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_unicode_ci @concat_res | @collation_res | real_collation ----------------+--------------------+-------------------- 高斯db楂樻柉DB | utf8mb4_unicode_ci | utf8mb4_unicode_ci @@ -2844,7 +2844,7 @@ CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_unicode_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_unicode_ci @concat_res | @collation_res | real_collation ----------------+--------------------+-------------------- 高斯db楂樻柉DB | utf8mb4_unicode_ci | utf8mb4_unicode_ci @@ -2855,7 +2855,7 @@ DROP procedure merge_collation_func; create or replace procedure merge_collation_func(p1 text) as begin - SELECT CONCAT(p1, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; + SELECT CONCAT(p1, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; end; / CALL merge_collation_func('高斯DB'); @@ -2864,7 +2864,7 @@ CALL merge_collation_func('高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation --------------+----------------+---------------- 高斯DB高斯DB | gbk_bin | gbk_bin @@ -2876,7 +2876,7 @@ CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation --------------+----------------+---------------- 高斯DB高斯DB | gbk_bin | gbk_bin @@ -2888,7 +2888,7 @@ CALL merge_collation_func(_gbk'高斯DB'); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation ----------------+----------------+---------------- 楂樻柉DB高斯DB | gbk_bin | gbk_bin @@ -2900,7 +2900,7 @@ CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); (1 row) -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin @concat_res | @collation_res | real_collation ----------------+----------------+---------------- 楂樻柉DB高斯DB | gbk_bin | gbk_bin @@ -2911,222 +2911,222 @@ DROP procedure merge_collation_func; create or replace procedure merge_collation_func(p1 text) as begin - SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; + SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; end; / CALL merge_collation_func('高斯DB'); -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, ... ^ -QUERY: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res +QUERY: SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res CONTEXT: PL/pgSQL function merge_collation_func(text) line 2 at SQL statement DROP procedure merge_collation_func; -- -- concat for DERIVATION -- -- -- same charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯DB | (1 row) -SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside result | pg_collation_for --------------------+------------------ 高斯DB高斯db高斯db | (1 row) -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci result | pg_collation_for --------------------+-------------------- 高斯db高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci +SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci result | pg_collation_for --------------------+-------------------- 高斯DB高斯db高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict result | pg_collation_for --------------+------------------ 高斯db高斯db | (1 row) -SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, collation for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict result | pg_collation_for --------------+------------------ 高斯db高斯db | (1 row) -SELECT CONCAT(futf8_uni, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- 高斯db5.1.1 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(opengauss_version(), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- 5.1.1高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯db高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT('高斯DB', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯DB高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, 123) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------+-------------------- 高斯db123 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(123, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------+-------------------- 123高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 高斯db2023-05-01 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+-------------------- 2023-05-01高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, NULL) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(NULL, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- | utf8mb4_unicode_ci (1 row) -- -- -- diff charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯db | (1 row) -SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯db | (1 row) -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯db | gbk_chinese_ci (1 row) -SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci +SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯db | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(futf8_gen, fgbk_chi), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(CONCAT(futf8_gen, fgbk_chi), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯db | (1 row) -SELECT CONCAT(futf8_uni, CONCAT(futf8_gen, fgbk_chi)) result, collation for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, CONCAT(futf8_gen, fgbk_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict result | pg_collation_for --------------------+------------------ 高斯db高斯db高斯db | (1 row) -SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; ERROR: collation mismatch between collations "gb18030_bin" and "gbk_bin" -LINE 1: ...) result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ 高斯db5.1.1 | gbk_chinese_ci (1 row) -SELECT CONCAT(opengauss_version(), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ 5.1.1高斯DB | gbk_bin (1 row) -SELECT CONCAT(fgbk_chi, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯db高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT('高斯DB', fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(fgbk_chi, 123) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------+------------------ 高斯db123 | gbk_chinese_ci (1 row) -SELECT CONCAT(123, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -----------+------------------ 123高斯DB | gbk_bin (1 row) -SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 高斯db2023-05-01 | gbk_chinese_ci (1 row) -SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ------------------+------------------ 2023-05-01高斯DB | gbk_bin (1 row) -SELECT CONCAT(fgbk_chi, NULL) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ | gbk_bin @@ -3134,33 +3134,33 @@ SELECT CONCAT(NULL, fgbk_bin) result, collation for(result) FROM t_diff_charset_ -- -- test explicit collate on concat -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯DB高斯db | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci r... ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci re... ^ -- -- -- with binary & implicit collation -SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" -LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, coll... +LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_c... ^ -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text result | pg_collation_for --------------------------+-------------------- \xe9ab98e696af4442高斯db | utf8mb4_general_ci @@ -3181,50 +3181,50 @@ SELECT CAST('DEADBEEF' AS blob) COLLATE "binary" result; -- -- case when -- -- -- condition same charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | gbk_bin (1 row) -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation result | pg_collation_for --------+------------------ 高斯db | (1 row) -SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR result | pg_collation_for --------+------------------ 高斯db | gbk_bin (1 row) -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | gbk_bin (1 row) -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation result | pg_collation_for --------+------------------ 高斯db | (1 row) -SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR result | pg_collation_for --------+------------------ 高斯db | gbk_bin @@ -3232,30 +3232,30 @@ SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, -- -- -- condition same charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- 高斯db | utf8mb4_unicode_ci (1 row) -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- 高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gb18030_bin" and "gbk_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+-------------------- 高斯DB | utf8mb4_unicode_ci @@ -3263,78 +3263,78 @@ SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result -- -- -- condition diff charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | gbk_bin (1 row) -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | gbk_bin (1 row) -SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- condition diff charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯DB | utf8mb4_bin (1 row) -SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- -- conflict -SELECT CASE _gb18030'高斯' WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE _gb18030'高斯' WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_bin" -LINE 1: ...D result, collation for(result) FROM t_diff_charset_columns; +LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- in -- -- -- column utf8 @@ -3470,48 +3470,48 @@ ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_bin" LINE 1: ... in (SELECT t2.fgb18030_bin FROM t_diff_charset_columns t2); ^ -- -- COALESCE -SELECT COALESCE(fgbk_chi, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT COALESCE(fgbk_chi, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT COALESCE(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; +SELECT COALESCE(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT COALESCE(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict +SELECT COALESCE(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict result | pg_collation_for --------+------------------ 高斯db | (1 row) -SELECT COALESCE(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT COALESCE(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ... result, collation for (result) FROM t_diff_charset_columns; +LINE 1: ...sult, pg_collation_for (result) FROM t_diff_charset_columns; ^ -- -- GREATEST -SELECT GREATEST(fgbk_chi, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; +SELECT GREATEST(fgbk_chi, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT GREATEST(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; +SELECT GREATEST(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | utf8mb4_bin (1 row) -SELECT GREATEST(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict +SELECT GREATEST(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict ERROR: could not determine which collation to use for string comparison HINT: Use the COLLATE clause to set the collation explicitly. CONTEXT: referenced column: result -SELECT GREATEST(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT GREATEST(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chinese_ci" -LINE 1: ... result, collation for (result) FROM t_diff_charset_columns; +LINE 1: ...sult, pg_collation_for (result) FROM t_diff_charset_columns; ^ -- -- rowcompare SELECT futf8_bin, futf8_uni, fgbk_bin, fgbk_chi, fgb18030_bin, fgb18030_chi FROM t_diff_charset_columns @@ -3692,61 +3692,61 @@ select _utf8mb4'GS', GROUP_CONCAT(fgbk_bin, fgbk_chi), fgbk_bin FROM t_diff_char -- -- test COLLATE for function -- -- -- for string function -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE utf8mb4_general_ci result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE utf8mb4_general_ci result, pg_collation_for(result); result | pg_collation_for ----------------+-------------------- 高斯DB楂樻柉DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE gbk_chinese_ci result, collation for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE gbk_chinese_ci result, pg_collation_for(result); -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: ...unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE gb... ^ -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE utf8mb4_bin result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE utf8mb4_bin result, pg_collation_for(result); result | pg_collation_for ----------------+------------------ 高斯DB楂樻柉DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE gbk_chinese_ci result, collation for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE gbk_chinese_ci result, pg_collation_for(result); -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE gbk_c... ^ -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE "binary" result, collation for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE "binary" result, pg_collation_for(result); -- ERROR ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE "bina... ^ -SELECT CONCAT(futf8_uni , futf8_gen) COLLATE utf8mb4_bin result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni , futf8_gen) COLLATE utf8mb4_bin result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯db高斯db | utf8mb4_bin (1 row) -SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci ... ^ -- -- -- for binary argument string function -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result, collation for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result, pg_collation_for(result); -- ERROR ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "bi... ^ -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, pg_collation_for(result); result | pg_collation_for --------------------------+-------------------- 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" -LINE 1: SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, coll... +LINE 1: SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_c... ^ -SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result... ^ -- -- -- for convert function -SELECT CONVERT(futf8_uni USING 'GBK') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONVERT(futf8_uni USING 'GBK') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------+------------------ 高斯db | gbk_chinese_ci @@ -3762,13 +3762,13 @@ SELECT CONVERT(futf8_uni USING 'GBK') COLLATE "binary" result FROM t_diff_charse ERROR: COLLATION "binary" is not valid for CHARACTER SET "GBK" LINE 1: SELECT CONVERT(futf8_uni USING 'GBK') COLLATE "binary" resul... ^ -SELECT CONCAT(CONVERT(futf8_uni USING 'GBK'), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONVERT(futf8_uni USING 'GBK'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+-------------------- 高斯db高斯db | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for --------------+------------------ 高斯db高斯db | gbk_bin diff --git a/contrib/dolphin/expected/column_name.out b/contrib/dolphin/expected/column_name.out index 1b5e4bcb1..a20f70b81 100644 --- a/contrib/dolphin/expected/column_name.out +++ b/contrib/dolphin/expected/column_name.out @@ -28,7 +28,7 @@ NOTICE: table "a1" does not exist, skipping drop table if exists a2; NOTICE: table "a2" does not exist, skipping select currenT_User into a1; -select session_UsEr into a2; +select current_UsEr into a2; --create use some index drop table if exists warehouse_t22; NOTICE: table "warehouse_t22" does not exist, skipping @@ -804,7 +804,7 @@ NOTICE: table "a1" does not exist, skipping drop table if exists a2; NOTICE: table "a2" does not exist, skipping select currenT_User into a1; -select session_UsEr into a2; +select current_UsEr into a2; --create use some index drop table if exists warehouse_t22; NOTICE: table "warehouse_t22" does not exist, skipping diff --git a/contrib/dolphin/expected/db_b_date_time_functions.out b/contrib/dolphin/expected/db_b_date_time_functions.out index 70973842e..d28d68a94 100644 --- a/contrib/dolphin/expected/db_b_date_time_functions.out +++ b/contrib/dolphin/expected/db_b_date_time_functions.out @@ -416,12 +416,6 @@ ERROR: Too-big precision 7 specified for 'now'. Maximum is 6. CONTEXT: referenced column: now(7) set dolphin.b_db_timestamp = 0.0; -- test sysdate(precision) -select sysdate; - sysdate ---------------------- ---?.* -(1 row) - select sysdate(); sysdate() --------------------- diff --git a/contrib/dolphin/expected/db_b_new_gram_test.out b/contrib/dolphin/expected/db_b_new_gram_test.out index 85f7f8193..ef1a4ff5f 100644 --- a/contrib/dolphin/expected/db_b_new_gram_test.out +++ b/contrib/dolphin/expected/db_b_new_gram_test.out @@ -1229,10 +1229,6 @@ select c1 as authid from authid_t1 as authid; drop table authid; drop table authid_t1; -create table body(body int); -ERROR: syntax error at or near "body" -LINE 1: create table body(body int); - ^ CREATE TABLE `Student` ( `Sno` varchar(3) NOT NULL, `Sname` varchar(8) NOT NULL, diff --git a/contrib/dolphin/expected/kwlist.out b/contrib/dolphin/expected/kwlist.out new file mode 100644 index 000000000..270dc0175 --- /dev/null +++ b/contrib/dolphin/expected/kwlist.out @@ -0,0 +1,424 @@ +create schema keyword_test; +set search_path to keyword_test; +/* cast */ +create table cast(cast int); +insert into cast values(1); +select cast from cast; + cast +------ + 1 +(1 row) + +/* last_day */ +create table last_day(last_day int); +insert into last_day values(1); +select last_day from last_day; + last_day +---------- + 1 +(1 row) + +/* less */ +create table less(less int); +insert into less values(1); +select less from less; + less +------ + 1 +(1 row) + +/* modify */ +create table modify(modify int); +insert into modify values(1); +select modify from modify; + modify +-------- + 1 +(1 row) + +/* modify */ +create table modify(modify int); +ERROR: relation "modify" already exists in schema "keyword_test" +DETAIL: creating new table with existing name in the same schema +insert into modify values(1); +select modify from modify; + modify +-------- + 1 + 1 +(2 rows) + +/* notnull */ +create table notnull(notnull int); +insert into notnull values(1); +select notnull from notnull; + notnull +--------- + 1 +(1 row) + +/* recyclebin */ +create table recyclebin(recyclebin int); +insert into recyclebin values(1); +select recyclebin from recyclebin; + recyclebin +------------ + 1 +(1 row) + +/* analyse */ +create table analyse(analyse int); +insert into analyse values(1); +select analyse from analyse; + analyse +--------- + 1 +(1 row) + +-- analyse unsupported +analyse; +ERROR: syntax error at or near "analyse" +LINE 1: analyse; + ^ +explain analyse select 1; +ERROR: syntax error at or near "analyse select" +LINE 1: explain analyse select 1; + ^ +analyse verbose; +ERROR: syntax error at or near "analyse" +LINE 1: analyse verbose; + ^ +alter table analyse analyse /* unsupported analyse keyword*/ verbose partition all; +ERROR: syntax error at or near "analyse" +LINE 1: alter table analyse analyse /* unsupported analyse keyword*/... + ^ +/* buckets */ +create table buckets(buckets int); +insert into buckets values(1); +select buckets from buckets; + buckets +--------- + 1 +(1 row) + +select * from buckets buckets(1,2,3,4) /* unsupported buckets keyword*/; +ERROR: syntax error at or near "1" +LINE 1: select * from buckets buckets(1,2,3,4) /* unsupported bucket... + ^ +alter node group test copy buckets from test2 /* unsupported */; +ERROR: syntax error at or near "copy" +LINE 1: alter node group test copy buckets from test2 /* unsupported... + ^ +create node group mergegroup1 with (datanode1, datanode3, datanode5); /* unsupported */ +ERROR: syntax error at or near "group" +LINE 1: create node group mergegroup1 with (datanode1, datanode3, da... + ^ +/* compact */ +create table compact(compact int); +insert into compact values(1); +select compact from compact; + compact +--------- + 1 +(1 row) + +analyze compact test/* table name */; +ERROR: syntax error at or near "test" +LINE 1: analyze compact test/* table name */; + ^ +-- compact unsupported +create table test(id int) with (row_format=compact); +ERROR: unrecognized parameter "row_format" +/* rownum */ +create table rownum(rownum int); +insert into rownum values(1); +select rownum from rownum; + rownum +-------- + 1 +(1 row) + +select rownum rownum from rownum; /* rownum can be alias name */ + rownum +-------- + 1 +(1 row) + +/* user */ +create table user(user int); +insert into user values(1); +select user from user; + user +------ + 1 +(1 row) + +select user(); --success +--?.* +--?.* +--?.* +(1 row) + +select user; --failed +ERROR: column "user" does not exist +LINE 1: select user; + ^ +CONTEXT: referenced column: user +create user user identified by 'xxxx@xx1xx'; -- username user can be created +NOTICE: The iteration value of password is not recommended.Setting the iteration value too small reduces the security of the password, and setting it too large results in performance degradation. +create user mapping for user server mot_server; -- username: user +create user mapping for current_user server mot_server; -- username: current_user +select count(distinct usename) from pg_user_mappings where usename in ('user', current_user()); -- expected: 2 + count +------- + 2 +(1 row) + +alter table user disable trigger user; -- success: disable user trigger +alter table user disable trigger "user"; -- failed: cant not find trigger(named user) +/* sysdate */ +create table sysdate(sysdate int); +insert into sysdate values(1); +select sysdate from sysdate; + sysdate +--------- + 1 +(1 row) + +/* body */ +create table body(body int); +insert into body values(1); +select body from body; + body +------ + 1 +(1 row) + +/* collation */ +create table collation(collation int); +insert into collation values(1); +select collation from collation; + collation +----------- + 1 +(1 row) + +/* minus */ +create table minus(minus int); +insert into minus values(1); +select minus from minus; + minus +------- + 1 +(1 row) + +/* any */ +create table any(any int); +insert into any values(1); +select any from any; + any +----- + 1 +(1 row) + +/* do */ +create table do(do int); +insert into do values(1); +select do from do; + do +---- + 1 +(1 row) + +-- binary/prior do +create rule "test" as on select to t1 where binary do instead select * from t2; -- unsupported +ERROR: syntax error at or near "instead" +LINE 1: ...te rule "test" as on select to t1 where binary do instead se... + ^ +create rule "test" as on select to t1 where prior do instead select * from t2; -- unsupported +ERROR: syntax error at or near "instead" +LINE 1: ...ate rule "test" as on select to t1 where prior do instead se... + ^ +create rule "test" as on select to t1 where binary do do instead select * from t2; -- t1 no exists +ERROR: relation "t1" does not exist +create rule "test" as on select to t1 where prior do do instead select * from t2; -- t1 no exists +ERROR: relation "t1" does not exist +create rule "test" as on select to t1 where (binary) do instead select * from t2; -- t1 no exists +ERROR: relation "t1" does not exist +create rule "test" as on select to t1 where (prior) do instead select * from t2; -- t1 no exists +ERROR: relation "t1" does not exist +create rule "test" as on select to t1 where test do instead select * from t2; -- t1 no exists +ERROR: relation "t1" does not exist +create rule "test" as on select to t1 where 1=1 do instead select * from t2; -- t1 no exists +ERROR: relation "t1" does not exist +/* end */ +create table end(end int); +insert into end values(1); +select end from end; + end +----- + 1 +(1 row) + +select case when end then binary else binary end from end; -- unsupported +ERROR: syntax error at or near "from" +LINE 1: select case when end then binary else binary end from end; + ^ +select case when end then binary else (binary) end from end; -- binary un exists +ERROR: column "binary" does not exist +LINE 1: select case when end then binary else (binary) end from end; + ^ +CONTEXT: referenced column: binary +select case when end then binary end else binary end end from end; + end +------ + \x31 +(1 row) + +/* only */ +create table only(only int); +insert into only values(1); +select only from only; + only +------ + 1 +(1 row) + +select * from only test; -- scan only(alias: test) table + only +------ + 1 +(1 row) + +select * from only (test); -- scan test(scan InhOption: INH_NO) table +ERROR: relation "test" does not exist on datanode1 +LINE 1: select * from only (test); + ^ +/* verbose */ +create table verbose(verbose int); +insert into verbose values(1); +select verbose from verbose; + verbose +--------- + 1 +(1 row) + +-- cluster +cluster verbose verbose; --unsupported +ERROR: syntax error at or near "verbose" +LINE 1: cluster verbose verbose; + ^ +cluster verbose; --supported +ERROR: there is no previously clustered index for table "verbose" +cluster (verbose) verbose; --supported +ERROR: there is no previously clustered index for table "verbose" +cluster verbose partition (test); --supported +ERROR: table is not partitioned +cluster (verbose) verbose partition (test); --supported +ERROR: table is not partitioned +cluster; --supported +cluster (verbose); --supported +cluster (verbose) verbose on verbose; --supported +ERROR: "verbose" is not an index +cluster verbose on verbose; --supported +ERROR: "verbose" is not an index +-- vacuum +vacuum full verbose partition (test); --supported +ERROR: partition "test" of relation "verbose" does not exist +vacuum full verbose subpartition (test); --supported +ERROR: subpartition "test" of relation "verbose" does not exist +VACUUM full analyze verbose; --supported +vacuum (full,freeze,verbose) verbose; --supported +--?.* +--?.* +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +vacuum verbose; +vacuum full verbose; +vacuum full freeze verbose; --unsupported +ERROR: syntax error at or near "verbose" +LINE 1: vacuum full freeze verbose; + ^ +vacuum full freeze verbose compact; --unsupported +ERROR: syntax error at or near "verbose" +LINE 1: vacuum full freeze verbose compact; + ^ +-- analyse +analyze verbose; --supported +analyze verbose verbose; --unsupported +ERROR: syntax error at or near "verbose" +LINE 1: analyze verbose verbose; + ^ +analyze (verbose) verbose; --supported +--?.* +--?.* +analyze verbose; --supported +analyze (verbose) verbose; --supported +--?.* +--?.* +analyze foreign tables; --supported +WARNING: Running analyze on table/tables reside in HDFS directly from data node is not supported. +analyze (verbose) foreign tables; --supported +WARNING: Running analyze on table/tables reside in HDFS directly from data node is not supported. +analyze verbose foreign tables; --unsupported +ERROR: syntax error at or near "foreign" +LINE 1: analyze verbose foreign tables; + ^ +create index verbose_index on verbose(verbose); +/* excluded */ +create table excluded(excluded int); +insert into excluded values(1); +select excluded from excluded; + excluded +---------- + 1 +(1 row) + +insert into excluded values (1) on duplicate key update excluded = excluded.excluded; +insert into excluded values (1) as excluded on duplicate key update excluded = excluded.excluded; -- ERROR +ERROR: table reference "excluded" is ambiguous +LINE 1: ...1) as excluded on duplicate key update excluded = excluded.e... + ^ +CONTEXT: referenced column: excluded +insert into excluded values (1) as t on duplicate key update excluded = excluded.excluded; +-- name test +CREATE TABLE x (id int); +CREATE FUNCTION fn_x_before () RETURNS TRIGGER AS ' + BEGIN + NEW.e := ''before trigger fired''::text; + return NEW; + END; +' LANGUAGE plpgsql; +CREATE TRIGGER "user" AFTER INSERT ON x FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); -- unsupported name +ERROR: user cannot be trigger name +CREATE TRIGGER test AFTER INSERT ON x FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); +ALTER TRIGGER test ON x RENAME TO user; -- unsupported name +ERROR: user cannot be trigger name + +reset search_path; +drop schema keyword_test cascade; +NOTICE: drop cascades to 23 other objects +DETAIL: drop cascades to table keyword_test."cast" +drop cascades to table keyword_test.last_day +drop cascades to table keyword_test."less" +drop cascades to table keyword_test."modify" +drop cascades to table keyword_test."notnull" +drop cascades to table keyword_test."recyclebin" +drop cascades to table keyword_test."analyse" +drop cascades to table keyword_test."buckets" +drop cascades to table keyword_test."compact" +drop cascades to table keyword_test."rownum" +drop cascades to table keyword_test."user" +drop cascades to table keyword_test."sysdate" +drop cascades to table keyword_test.body +drop cascades to table keyword_test."collation" +drop cascades to table keyword_test."minus" +drop cascades to table keyword_test."any" +drop cascades to table keyword_test."do" +drop cascades to table keyword_test."end" +drop cascades to table keyword_test."only" +drop cascades to table keyword_test."verbose" +drop cascades to table keyword_test."excluded" +drop cascades to table keyword_test.x +drop cascades to function keyword_test.fn_x_before() +drop user user cascade; diff --git a/contrib/dolphin/expected/show_create.out b/contrib/dolphin/expected/show_create.out index b56b484f7..9fe891e03 100644 --- a/contrib/dolphin/expected/show_create.out +++ b/contrib/dolphin/expected/show_create.out @@ -542,7 +542,7 @@ drop view tt1; create view tt20v as select * from coalesce(1,2) as c, - collation for ('x'::text) col, + pg_collation_for ('x'::text) col, current_date as d, cast(1+2 as int4) as i4, cast(1+2 as int8) as i8; diff --git a/contrib/dolphin/expected/test_current_user.out b/contrib/dolphin/expected/test_current_user.out index d0f6cc638..5683b29f6 100644 --- a/contrib/dolphin/expected/test_current_user.out +++ b/contrib/dolphin/expected/test_current_user.out @@ -80,7 +80,9 @@ DROP USER MAPPING FOR current_user SERVER s1; CREATE USER MAPPING FOR current_user() SERVER s1; DROP USER MAPPING FOR current_user() SERVER s1; CREATE USER MAPPING FOR USER SERVER s1; +ERROR: role "USER" does not exist DROP USER MAPPING FOR USER SERVER s1; +ERROR: role "USER" does not exist CREATE USER MAPPING FOR u1 SERVER s1; DROP USER MAPPING FOR u1 SERVER s1; drop user u1; diff --git a/contrib/dolphin/expected/test_system_user.out b/contrib/dolphin/expected/test_system_user.out index 5711d4a7b..ae9d665b0 100644 --- a/contrib/dolphin/expected/test_system_user.out +++ b/contrib/dolphin/expected/test_system_user.out @@ -1,26 +1,14 @@ create schema test_system_user; set current_schema to 'test_system_user'; -select session_user; - session_user --------------- ---?.* -(1 row) - select session_user(); session_user -------------- --?.* (1 row) -select user; - current_user --------------- ---?.* -(1 row) - select user(); - current_user --------------- +--?.* +--?.* --?.* (1 row) @@ -31,4 +19,4 @@ select system_user(); (1 row) drop schema test_system_user cascade; -reset current_schema; +reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/include/plugin_parser/kwlist.h b/contrib/dolphin/include/plugin_parser/kwlist.h index 51b598ae1..5b2375bc1 100644 --- a/contrib/dolphin/include/plugin_parser/kwlist.h +++ b/contrib/dolphin/include/plugin_parser/kwlist.h @@ -44,10 +44,12 @@ PG_KEYWORD("all", ALL, RESERVED_KEYWORD) PG_KEYWORD("also", ALSO, UNRESERVED_KEYWORD) PG_KEYWORD("alter", ALTER, UNRESERVED_KEYWORD) PG_KEYWORD("always", ALWAYS, UNRESERVED_KEYWORD) -PG_KEYWORD("analyse", ANALYSE, RESERVED_KEYWORD) /* British spelling */ +/* PG_KEYWORD("analyse", ANALYSE, RESERVED_KEYWORD) *//* British spelling */ PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD) PG_KEYWORD("and", AND, RESERVED_KEYWORD) -PG_KEYWORD("any", ANY, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("any", ANY, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("app", APP, UNRESERVED_KEYWORD) PG_KEYWORD("append", APPEND, UNRESERVED_KEYWORD) PG_KEYWORD("archive", ARCHIVE, UNRESERVED_KEYWORD) @@ -97,16 +99,14 @@ PG_KEYWORD("bit", BIT, COL_NAME_KEYWORD) PG_KEYWORD("blanks", BLANKS, UNRESERVED_KEYWORD) PG_KEYWORD("blob", BLOB_P, UNRESERVED_KEYWORD) PG_KEYWORD("blockchain", BLOCKCHAIN, UNRESERVED_KEYWORD) -#ifdef DOLPHIN -PG_KEYWORD("body", BODY_P, RESERVED_KEYWORD) // origin: UNRESERVED_KEYWORD -#endif +PG_KEYWORD("body", BODY_P, UNRESERVED_KEYWORD) PG_KEYWORD("boolean", BOOLEAN_P, COL_NAME_KEYWORD) PG_KEYWORD("both", BOTH, RESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("box", DB_B_BOX, UNRESERVED_KEYWORD) #endif PG_KEYWORD("bucketcnt", BUCKETCNT, COL_NAME_KEYWORD) -PG_KEYWORD("buckets", BUCKETS, RESERVED_KEYWORD) +/* PG_KEYWORD("buckets", BUCKETS, RESERVED_KEYWORD) */ PG_KEYWORD("by", BY, UNRESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("bytea", DB_B_BYTEA, UNRESERVED_KEYWORD) @@ -120,7 +120,9 @@ PG_KEYWORD("cancelable", CANCELABLE, UNRESERVED_KEYWORD) PG_KEYWORD("cascade", CASCADE, UNRESERVED_KEYWORD) PG_KEYWORD("cascaded", CASCADED, UNRESERVED_KEYWORD) PG_KEYWORD("case", CASE, RESERVED_KEYWORD) -PG_KEYWORD("cast", CAST, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("cast", CAST, COL_NAME_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("catalog", CATALOG_P, UNRESERVED_KEYWORD) PG_KEYWORD("catalog_name", CATALOG_NAME, UNRESERVED_KEYWORD) PG_KEYWORD("chain", CHAIN, UNRESERVED_KEYWORD) @@ -152,7 +154,9 @@ PG_KEYWORD("close", CLOSE, UNRESERVED_KEYWORD) PG_KEYWORD("cluster", CLUSTER, UNRESERVED_KEYWORD) PG_KEYWORD("coalesce", COALESCE, COL_NAME_KEYWORD) PG_KEYWORD("collate", COLLATE, RESERVED_KEYWORD) -PG_KEYWORD("collation", COLLATION, TYPE_FUNC_NAME_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("collation", COLLATION, UNRESERVED_KEYWORD) /* ORIGIN: TYPE_FUNC_NAME_KEYWORD */ +#endif PG_KEYWORD("column", COLUMN, RESERVED_KEYWORD) PG_KEYWORD("column_encryption_key", COLUMN_ENCRYPTION_KEY, UNRESERVED_KEYWORD) PG_KEYWORD("column_encryption_keys", COLUMN_ENCRYPTION_KEYS, UNRESERVED_KEYWORD) @@ -162,7 +166,7 @@ PG_KEYWORD("comment", COMMENT, UNRESERVED_KEYWORD) PG_KEYWORD("comments", COMMENTS, UNRESERVED_KEYWORD) PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD) PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD) -PG_KEYWORD("compact", COMPACT, TYPE_FUNC_NAME_KEYWORD) +/* PG_KEYWORD("compact", COMPACT, TYPE_FUNC_NAME_KEYWORD) */ PG_KEYWORD("compatible_illegal_chars", COMPATIBLE_ILLEGAL_CHARS, UNRESERVED_KEYWORD) PG_KEYWORD("compile", COMPILE, UNRESERVED_KEYWORD) PG_KEYWORD("complete", COMPLETE, UNRESERVED_KEYWORD) @@ -282,8 +286,8 @@ PG_KEYWORD("distribution", DISTRIBUTION, UNRESERVED_KEYWORD) #endif #ifdef DOLPHIN PG_KEYWORD("div", DIV, TYPE_FUNC_NAME_KEYWORD) +PG_KEYWORD("do", DO, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ #endif -PG_KEYWORD("do", DO, RESERVED_KEYWORD) PG_KEYWORD("document", DOCUMENT_P, UNRESERVED_KEYWORD) PG_KEYWORD("domain", DOMAIN_P, UNRESERVED_KEYWORD) PG_KEYWORD("double", DOUBLE_P, UNRESERVED_KEYWORD) @@ -303,7 +307,9 @@ PG_KEYWORD("encrypted", ENCRYPTED, UNRESERVED_KEYWORD) PG_KEYWORD("encrypted_value", ENCRYPTED_VALUE, UNRESERVED_KEYWORD) PG_KEYWORD("encryption", ENCRYPTION, UNRESERVED_KEYWORD) PG_KEYWORD("encryption_type", ENCRYPTION_TYPE, UNRESERVED_KEYWORD) -PG_KEYWORD("end", END_P, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("end", END_P, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("ends", ENDS, UNRESERVED_KEYWORD) PG_KEYWORD("enforced", ENFORCED, UNRESERVED_KEYWORD) #ifdef DOLPHIN @@ -323,9 +329,11 @@ PG_KEYWORD("every", EVERY, UNRESERVED_KEYWORD) PG_KEYWORD("except", EXCEPT, RESERVED_KEYWORD) PG_KEYWORD("exchange", EXCHANGE, UNRESERVED_KEYWORD) PG_KEYWORD("exclude", EXCLUDE, UNRESERVED_KEYWORD) -#ifndef ENABLE_MULTIPLE_NODES -PG_KEYWORD("excluded", EXCLUDED, RESERVED_KEYWORD) -#endif +/* + * #ifndef ENABLE_MULTIPLE_NODES + * PG_KEYWORD("excluded", EXCLUDED, RESERVED_KEYWORD) + * #endif +*/ PG_KEYWORD("excluding", EXCLUDING, UNRESERVED_KEYWORD) PG_KEYWORD("exclusive", EXCLUSIVE, UNRESERVED_KEYWORD) PG_KEYWORD("execute", EXECUTE, UNRESERVED_KEYWORD) @@ -370,7 +378,9 @@ PG_KEYWORD("format", DB_B_FORMAT, COL_NAME_KEYWORD) #endif PG_KEYWORD("formatter", FORMATTER, UNRESERVED_KEYWORD) PG_KEYWORD("forward", FORWARD, UNRESERVED_KEYWORD) -PG_KEYWORD("freeze", FREEZE, TYPE_FUNC_NAME_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("freeze", FREEZE, UNRESERVED_KEYWORD) /* ORIGIN: TYPE_FUNC_NAME_KEYWORD */ +#endif PG_KEYWORD("from", FROM, RESERVED_KEYWORD) PG_KEYWORD("full", FULL, TYPE_FUNC_NAME_KEYWORD) #ifdef DOLPHIN @@ -481,7 +491,7 @@ PG_KEYWORD("language", LANGUAGE, UNRESERVED_KEYWORD) PG_KEYWORD("large", LARGE_P, UNRESERVED_KEYWORD) PG_KEYWORD("last", LAST_P, UNRESERVED_KEYWORD) #ifdef DOLPHIN -PG_KEYWORD("last_day", LAST_DAY_FUNC, RESERVED_KEYWORD) +PG_KEYWORD("last_day", LAST_DAY_FUNC, COL_NAME_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ #endif PG_KEYWORD("lc_collate", LC_COLLATE_P, UNRESERVED_KEYWORD) PG_KEYWORD("lc_ctype", LC_CTYPE_P, UNRESERVED_KEYWORD) @@ -489,7 +499,9 @@ PG_KEYWORD("leading", LEADING, RESERVED_KEYWORD) PG_KEYWORD("leakproof", LEAKPROOF, UNRESERVED_KEYWORD) PG_KEYWORD("least", LEAST, COL_NAME_KEYWORD) PG_KEYWORD("left", LEFT, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("less", LESS, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("less", LESS, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("level", LEVEL, UNRESERVED_KEYWORD) PG_KEYWORD("like", LIKE, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD) @@ -546,7 +558,7 @@ PG_KEYWORD("mid", MID, COL_NAME_KEYWORD) PG_KEYWORD("min_rows", MIN_ROWS, UNRESERVED_KEYWORD) #endif PG_KEYWORD("minextents", MINEXTENTS, UNRESERVED_KEYWORD) -PG_KEYWORD("minus", MINUS_P, RESERVED_KEYWORD) +/* PG_KEYWORD("minus", MINUS_P, RESERVED_KEYWORD) */ PG_KEYWORD("minute", MINUTE_P, UNRESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("minute_microsecond", MINUTE_MICROSECOND_P, UNRESERVED_KEYWORD) @@ -560,8 +572,8 @@ PG_KEYWORD("mode", MODE, UNRESERVED_KEYWORD) PG_KEYWORD("model", MODEL, UNRESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("modifies", MODIFIES, UNRESERVED_KEYWORD) +PG_KEYWORD("modify", MODIFY_P, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ #endif -PG_KEYWORD("modify", MODIFY_P, RESERVED_KEYWORD) PG_KEYWORD("month", MONTH_P, UNRESERVED_KEYWORD) PG_KEYWORD("move", MOVE, UNRESERVED_KEYWORD) PG_KEYWORD("movement", MOVEMENT, UNRESERVED_KEYWORD) @@ -591,8 +603,8 @@ PG_KEYWORD("none", NONE, COL_NAME_KEYWORD) PG_KEYWORD("not", NOT, RESERVED_KEYWORD) PG_KEYWORD("nothing", NOTHING, UNRESERVED_KEYWORD) PG_KEYWORD("notify", NOTIFY, UNRESERVED_KEYWORD) -PG_KEYWORD("notnull", NOTNULL, TYPE_FUNC_NAME_KEYWORD) #ifdef DOLPHIN +PG_KEYWORD("notnull", NOTNULL, UNRESERVED_KEYWORD) /* ORIGIN: TYPE_FUNC_NAME_KEYWORD */ PG_KEYWORD("now", NOW_FUNC, RESERVED_KEYWORD) #endif PG_KEYWORD("nowait", NOWAIT, UNRESERVED_KEYWORD) @@ -612,7 +624,9 @@ PG_KEYWORD("off", OFF, UNRESERVED_KEYWORD) PG_KEYWORD("offset", OFFSET, RESERVED_KEYWORD) PG_KEYWORD("oids", OIDS, UNRESERVED_KEYWORD) PG_KEYWORD("on", ON, RESERVED_KEYWORD) -PG_KEYWORD("only", ONLY, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("only", ONLY, COL_NAME_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif #ifdef DOLPHIN PG_KEYWORD("open", OPEN, UNRESERVED_KEYWORD) #endif @@ -715,7 +729,9 @@ PG_KEYWORD("reassign", REASSIGN, UNRESERVED_KEYWORD) PG_KEYWORD("rebuild", REBUILD, UNRESERVED_KEYWORD) PG_KEYWORD("recheck", RECHECK, UNRESERVED_KEYWORD) PG_KEYWORD("recursive", RECURSIVE, UNRESERVED_KEYWORD) -PG_KEYWORD("recyclebin", RECYCLEBIN, TYPE_FUNC_NAME_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("recyclebin", RECYCLEBIN, UNRESERVED_KEYWORD) /* ORIGIN: TYPE_FUNC_NAME_KEYWORD */ +#endif PG_KEYWORD("redisanyvalue", REDISANYVALUE, UNRESERVED_KEYWORD) PG_KEYWORD("ref", REF, UNRESERVED_KEYWORD) PG_KEYWORD("references", REFERENCES, RESERVED_KEYWORD) @@ -724,7 +740,9 @@ PG_KEYWORD("refresh", REFRESH, UNRESERVED_KEYWORD) PG_KEYWORD("regexp", REGEXP, TYPE_FUNC_NAME_KEYWORD) #endif PG_KEYWORD("reindex", REINDEX, UNRESERVED_KEYWORD) -PG_KEYWORD("reject", REJECT_P, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("reject", REJECT_P, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("relative", RELATIVE_P, UNRESERVED_KEYWORD) PG_KEYWORD("release", RELEASE, UNRESERVED_KEYWORD) PG_KEYWORD("reloptions", RELOPTIONS, UNRESERVED_KEYWORD) @@ -776,9 +794,11 @@ PG_KEYWORD("row_count", ROW_COUNT, UNRESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("row_format", ROW_FORMAT, UNRESERVED_KEYWORD) #endif -#ifndef ENABLE_MULTIPLE_NODES -PG_KEYWORD("rownum", ROWNUM, RESERVED_KEYWORD) -#endif +/* + * #ifndef ENABLE_MULTIPLE_NODES + * PG_KEYWORD("rownum", ROWNUM, RESERVED_KEYWORD) + * #endif +*/ PG_KEYWORD("rows", ROWS, UNRESERVED_KEYWORD) PG_KEYWORD("rowtype", ROWTYPE_P, UNRESERVED_KEYWORD) PG_KEYWORD("rule", RULE, UNRESERVED_KEYWORD) @@ -877,7 +897,9 @@ PG_KEYWORD("substring", SUBSTRING, COL_NAME_KEYWORD) PG_KEYWORD("symmetric", SYMMETRIC, RESERVED_KEYWORD) PG_KEYWORD("synonym", SYNONYM, UNRESERVED_KEYWORD) PG_KEYWORD("sys_refcursor", SYS_REFCURSOR, UNRESERVED_KEYWORD) -PG_KEYWORD("sysdate", SYSDATE, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("sysdate", SYSDATE, COL_NAME_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("sysid", SYSID, UNRESERVED_KEYWORD) PG_KEYWORD("system", SYSTEM_P, UNRESERVED_KEYWORD) PG_KEYWORD("table", TABLE, RESERVED_KEYWORD) @@ -956,7 +978,9 @@ PG_KEYWORD("update", UPDATE, UNRESERVED_KEYWORD) PG_KEYWORD("use", USE, UNRESERVED_KEYWORD) #endif PG_KEYWORD("useeof", USEEOF, UNRESERVED_KEYWORD) -PG_KEYWORD("user", USER, RESERVED_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("user", USER, UNRESERVED_KEYWORD) /* ORIGIN: RESERVED_KEYWORD */ +#endif PG_KEYWORD("using", USING, RESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("utc_date", UTC_DATE, RESERVED_KEYWORD) @@ -979,7 +1003,9 @@ PG_KEYWORD("variables", VARIABLES, UNRESERVED_KEYWORD) PG_KEYWORD("variadic", VARIADIC, RESERVED_KEYWORD) PG_KEYWORD("varying", VARYING, UNRESERVED_KEYWORD) PG_KEYWORD("vcgroup", VCGROUP, UNRESERVED_KEYWORD) -PG_KEYWORD("verbose", VERBOSE, TYPE_FUNC_NAME_KEYWORD) +#ifdef DOLPHIN +PG_KEYWORD("verbose", VERBOSE, UNRESERVED_KEYWORD) /* ORIGIN: TYPE_FUNC_NAME_KEYWORD */ +#endif PG_KEYWORD("verify", VERIFY, RESERVED_KEYWORD) PG_KEYWORD("version", VERSION_P, UNRESERVED_KEYWORD) PG_KEYWORD("view", VIEW, UNRESERVED_KEYWORD) diff --git a/contrib/dolphin/include/plugin_postgres.h b/contrib/dolphin/include/plugin_postgres.h index 019d13100..786459258 100644 --- a/contrib/dolphin/include/plugin_postgres.h +++ b/contrib/dolphin/include/plugin_postgres.h @@ -166,6 +166,7 @@ typedef struct BSqlPluginContext { bool is_ast_stmt; bool group_by_error; bool is_create_alter_stmt; + Alias *upSertAliasName; #endif } bSqlPluginContext; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index 72f198aaa..e5f8cc622 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -7,7 +7,7 @@ test: ast b_compatibility_time_type db_b_new_gram_test group_concat_test test_co test: db_b_parser1 db_b_parser2 db_b_parser3 db_b_parser4 second_microsecond test_set_password_for_user test_timestamp_overflow -test: db_b_plpgsql_test default_guc describe explain_desc kill set_password network test_dayofweek test_timestampn +test: db_b_plpgsql_test default_guc describe explain_desc kill set_password network test_dayofweek test_timestampn kwlist test: empty_value_list empty_value_lists empty_value_support_value create_index test_guc_select_and_set test_copy_year2 test_default diff --git a/contrib/dolphin/plugin_optimizer/commands/alter.cpp b/contrib/dolphin/plugin_optimizer/commands/alter.cpp index fda4691ac..599503df2 100644 --- a/contrib/dolphin/plugin_optimizer/commands/alter.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/alter.cpp @@ -376,9 +376,8 @@ ExecRenameStmt(RenameStmt *stmt) case OBJECT_RLSPOLICY: return RenameRlsPolicy(stmt); - case OBJECT_ROLE: + case OBJECT_ROLE: return RenameRole(stmt->subname, stmt->newname); - case OBJECT_USER: { address = RenameRole(stmt->subname, stmt->newname); @@ -411,8 +410,17 @@ ExecRenameStmt(RenameStmt *stmt) return renameatt(stmt); case OBJECT_TRIGGER: - return renametrig(stmt); + { +#ifdef DOLPHIN + RenameStmt *renameStmt = (RenameStmt *)stmt; + if (pg_strncasecmp(renameStmt->newname, "user", strlen(renameStmt->newname)) == 0) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("%s cannot be trigger name", renameStmt->newname))); + } +#endif + return renametrig(stmt); + } case OBJECT_DOMAIN: case OBJECT_TYPE: return RenameType(stmt); diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index e7c2bb470..6236235d2 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -2763,7 +2763,11 @@ static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upser if (upsertClause->targetList != NIL) { pstate->p_is_insert = false; action = UPSERT_UPDATE; +#ifdef DOLPHIN + exclRte = addRangeTableEntryForRelation(pstate, targetrel, upsertClause->aliasName, false, false); +#else exclRte = addRangeTableEntryForRelation(pstate, targetrel, makeAlias("excluded", NIL), false, false); +#endif exclRte->isexcluded = true; exclRelIndex = list_length(pstate->p_rtable); diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 4d521848c..da9cdb330 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -525,6 +525,12 @@ static SelectStmt *MakeShowGrantStmt(char *arg, int location, core_yyscan_t yysc static List* handleCreateDolphinFuncOptions(List* input_options); static char* appendString(char* source, char* target, int offset); static inline void ChangeBpcharCastType(TypeName* typname); +/** + * ANY KEYWORD + */ +static inline List* NakeLikeOpList(); +static inline List* MakeNotLikeOpList(); +static inline Node* MakeSubLinkWithOp(SubLinkType subType, Node* testExpr, char* op, Node* subSelect, int location); %} %define api.pure @@ -640,7 +646,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); DropOwnedStmt ReassignOwnedStmt AlterTSConfigurationStmt AlterTSDictionaryStmt AnonyBlockStmt BarrierStmt AlterNodeStmt CreateNodeStmt DropNodeStmt AlterCoordinatorStmt - CreateNodeGroupStmt AlterNodeGroupStmt DropNodeGroupStmt + AlterNodeGroupStmt DropNodeGroupStmt CreatePolicyLabelStmt AlterPolicyLabelStmt DropPolicyLabelStmt CreateAuditPolicyStmt AlterAuditPolicyStmt DropAuditPolicyStmt CreateMaskingPolicyStmt AlterMaskingPolicyStmt DropMaskingPolicyStmt @@ -670,7 +676,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); /* */ %type select_no_parens select_no_parens_without_withclause select_with_parens select_clause - simple_select values_clause insert_empty_values + simple_select values_clause insert_empty_values insert_mysql_rest_selectStmt %type alter_column_default opclass_item opclass_drop alter_using AutoIncrementValue %type add_drop opt_asc_desc opt_nulls_order con_asc_desc @@ -699,7 +705,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); %type createdb_opt_list alterdb_opt_list copy_opt_list transaction_mode_list weak_password_string_list create_extension_opt_list alter_extension_opt_list - pgxcnode_list pgxcnodes bucket_maps bucket_list lines_options_fin + pgxcnode_list pgxcnodes bucket_list lines_options_fin opt_pgxcnodes fields_options_list fields_options_fin lines_options_list %type createdb_opt_item alterdb_opt_item copy_opt_item characterset_option transaction_mode_item lines_option_item fields_options_item @@ -734,6 +740,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); %type TriggerEvents TriggerOneEvent %type TriggerFuncArg %type TriggerWhen +%type opt_values_reference %type event_trigger_when_list event_trigger_value_list %type event_trigger_when_item %type enable_trigger @@ -744,7 +751,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); index_name cluster_index_specification dolphin_index_name pgxcnode_name pgxcgroup_name resource_pool_name workload_group_name application_name password_string hint_string dolphin_force_index_name -%type func_name func_name_opt_arg dolphin_func_name_opt_arg pkg_name handler_name qual_Op qual_all_Op subquery_Op dolphin_func_name +%type func_name func_name_opt_arg dolphin_func_name_opt_arg pkg_name handler_name qual_Op qual_all_Op dolphin_func_name opt_class opt_inline_handler opt_validator validator_clause opt_collation collate_option @@ -856,8 +863,8 @@ static inline void ChangeBpcharCastType(TypeName* typname); %type overlay_placing substr_from substr_for optional_precision get_format_time_type %type opt_instead opt_incremental -%type opt_unique opt_concurrently opt_verbose opt_full opt_deltamerge opt_compact opt_hdfsdirectory opt_verify opt_global OptQuickExt -%type opt_freeze opt_default opt_recheck opt_cascade +%type opt_unique opt_concurrently opt_verbose opt_verbose_empty opt_verbose_with_brance opt_full opt_deltamerge opt_compact opt_hdfsdirectory opt_verify opt_global OptQuickExt +%type opt_freeze opt_freeze_empty opt_default opt_recheck opt_cascade %type opt_oids copy_delimiter opt_noescaping %type OptCopyLogError OptCopyRejectLimit opt_load @@ -882,7 +889,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); %type SeqOptElem /* INSERT */ -%type insert_rest insert_mysql_rest +%type insert_rest insert_mysql_rest insert_rest_without_select insert_mysql_rest_normal insert_mysql_rest_upsert insert_mysql_rest_ignore %type upsert_clause %type merge_insert merge_update @@ -1163,16 +1170,16 @@ static inline void ChangeBpcharCastType(TypeName* typname); /* ordinary key words in alphabetical order */ /* PGXC - added DISTRIBUTE, DIRECT, COORDINATOR, CLEAN, NODE, BARRIER, SLICE, DATANODE */ %token ABORT_P ABSOLUTE_P ACCESS ACCOUNT ACTION ADD_P ADMIN AFTER - AGGREGATE ALGORITHM ALL ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APP APPEND ARCHIVE ARRAY AS ASC ASCII + AGGREGATE ALGORITHM ALL ALSO ALTER ALWAYS ANALYZE AND ANY APP APPEND ARCHIVE ARRAY AS ASC ASCII ASSERTION ASSIGNMENT ASYMMETRIC AT ATTRIBUTE AUDIT AUTHID AUTHORIZATION AUTOEXTEND AUTOEXTEND_SIZE AUTOMAPPED AUTO_INCREMENT AVG_ROW_LENGTH AGAINST BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_P BINARY_DOUBLE BINARY_INTEGER BIT BLANKS - BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL + BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CATALOG_NAME CHAIN CHANGE CHANNEL CHAR_P CHARACTER CHARACTERISTICS CHARACTERSET CHARSET CHECK CHECKPOINT CHECKSUM CLASS CLASS_ORIGIN CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_NAME COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COLUMNS COMMENT COMMENTS COMMIT CONSISTENT - COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPILE COMPLETE COMPLETION COMPRESS COMPRESSION CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRAINTS + COMMITTED COMPATIBLE_ILLEGAL_CHARS COMPILE COMPLETE COMPLETION COMPRESS COMPRESSION CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRAINTS CONTAINS CONTENT_P CONTINUE_P CONTVIEW CONVERSION_P CONVERT CONNECT COORDINATOR COORDINATORS COPY COST CREATE CROSS CSN CSV CUBE CURRENT_P CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA @@ -1187,7 +1194,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); DROP DUPLICATE DISCONNECT DUMPFILE EACH ELASTIC ELSE ENABLE_P ENCLOSED ENCODING ENCRYPTED ENCRYPTED_VALUE ENCRYPTION ENCRYPTION_TYPE END_P ENDS ENFORCED ENGINE_ATTRIBUTE ENGINE_P ENGINES ENUM_P ERRORS ESCAPE ESCAPED EOL ESCAPING EVENT EVENTS EVERY EXCEPT EXCHANGE - EXCLUDE EXCLUDED EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPANSION EXPIRED_P EXPLAIN + EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPANSION EXPIRED_P EXPLAIN EXTENDED EXTENSION EXTERNAL EXTRACT FALSE_P FAMILY FAST FENCED FETCH FIELDS FILEHEADER_P FILL_MISSING_FIELDS FILLER FILTER FIRST_P FIXED_P FLOAT_P FLUSH FOLLOWING FOLLOWS_P FOR FORCE FOREIGN FORMATTER FORWARD @@ -1211,7 +1218,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); LABEL LANGUAGE LARGE_P LAST_DAY_FUNC LAST_P LC_COLLATE_P LC_CTYPE_P LEADING LEAKPROOF LEAST LESS LEFT LEVEL LIKE LINES LIMIT LIST LISTEN LOAD LOCAL LOCALTIME LOCALTIMESTAMP LOCATE LOCATION LOCK_P LOCKED LOG_P LOGGING LOGIN_ANY LOGIN_FAILURE LOGIN_SUCCESS LOGOUT LOGS LOOP LOW_PRIORITY - MAPPING MASKING MASTER MATCH MATERIALIZED MATCHED MAXEXTENTS MAXSIZE MAXTRANS MAXVALUE MEDIUMINT MEMORY MERGE MESSAGE_TEXT MICROSECOND_P MID MIN_ROWS MINUS_P MINUTE_P MINUTE_MICROSECOND_P MINUTE_SECOND_P MINVALUE MINEXTENTS MODE + MAPPING MASKING MASTER MATCH MATERIALIZED MATCHED MAXEXTENTS MAXSIZE MAXTRANS MAXVALUE MEDIUMINT MEMORY MERGE MESSAGE_TEXT MICROSECOND_P MID MIN_ROWS MINUTE_P MINUTE_MICROSECOND_P MINUTE_SECOND_P MINVALUE MINEXTENTS MODE MODEL MODIFY_P MONTH_P MOVE MOVEMENT MYSQL_ERRNO MOD MODIFIES MAX_ROWS // DB4AI @@ -1237,7 +1244,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); RANDOMIZED RANGE RATIO RAW READ READS REAL REASSIGN REBUILD RECHECK RECURSIVE RECYCLEBIN REDISANYVALUE REF REFERENCES REFRESH REINDEX REJECT_P RELATIVE_P RELEASE RELOPTIONS REMOTE_P REMOVE RENAME REPEAT REPEATABLE REPLACE REPLICA REPLICAS REGEXP REORGANIZE REPAIR RESET RESIZE RESOURCE RESTART RESTRICT RETURN RETURNED_SQLSTATE RETURNING RETURNS REUSE REVOKE RIGHT ROLE ROLES ROLLBACK ROLLUP - ROTATION ROW ROW_COUNT ROWNUM ROWS ROWTYPE_P RULE + ROTATION ROW ROW_COUNT ROWS ROWTYPE_P RULE RESIGNAL RLIKE ROUTINE ROW_FORMAT SCHEMAS SAMPLE SAVEPOINT SCHEDULE SCHEMA SCHEMA_NAME SCROLL SEARCH SECONDARY_ENGINE_ATTRIBUTE SECOND_P SECOND_MICROSECOND_P SECURITY SELECT SEPARATOR_P SEQUENCE SEQUENCES SERIALIZABLE SERVER SESSION SESSION_USER SET SETS SETOF SHARE SHIPPABLE SHOW SHUTDOWN SIBLINGS SIGNAL SIGNED @@ -1300,6 +1307,8 @@ static inline void ChangeBpcharCastType(TypeName* typname); LABEL_LOOP LABEL_REPEAT LABEL_WHILE WITH_PARSER /* Precedence: lowest to highest */ +%nonassoc AUTHID /* AUTHID has lower priority than the BODY_P */ +%nonassoc BODY_P /* BODY_P has higher priority than the AUTHID */ %nonassoc COMMENT %nonassoc FIRST_P AFTER %nonassoc lower_than_key @@ -1317,7 +1326,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); %right PRIOR SEPARATOR_P %nonassoc LEVEL %right FEATURES TARGET // DB4AI -%left UNION EXCEPT MINUS_P +%left UNION EXCEPT %left INTERSECT %left OR XOR %left AND @@ -1325,7 +1334,7 @@ static inline void ChangeBpcharCastType(TypeName* typname); %right '=' CmpNullOp COLON_EQUALS %left '<' '>' CmpOp %nonassoc BINARY -%nonassoc LIKE ILIKE SIMILAR SOUNDS NOT_LIKE NOT_ILIKE NOT_SIMILAR +%nonassoc LIKE ILIKE SIMILAR SOUNDS NOT_LIKE NOT_ILIKE NOT_SIMILAR ANY DO END_P %nonassoc ESCAPE %nonassoc OVERLAPS %nonassoc BETWEEN NOT_BETWEEN @@ -1567,7 +1576,6 @@ stmt : | CreateGroupStmt | CreateMatViewStmt | CreateModelStmt // DB4AI - | CreateNodeGroupStmt | CreateNodeStmt | CreateOpClassStmt | CreateOpFamilyStmt @@ -4192,7 +4200,7 @@ VariableShowStmt: SelectStmt *n = MakeShowCharacterQuery(NIL, $3->like_or_where, $3->is_like); $$ = (Node *) n; } - | SHOW COLLATION OptLikeOrWhere + | SHOW COLLATION LikeOrWhere { SelectStmt *n = MakeShowCollationQuery(NIL, $3->like_or_where, $3->is_like); $$ = (Node *) n; @@ -5812,8 +5820,12 @@ alter_table_cmd: | ENABLE_P TRIGGER name { AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_EnableTrig; - n->name = $3; + if (pg_strncasecmp($3, "user", strlen($3)) == 0) { + n->subtype = AT_EnableTrigUser; + } else { + n->subtype = AT_EnableTrig; + n->name = $3; + } $$ = (Node *)n; } /* ALTER TABLE ENABLE ALWAYS TRIGGER */ @@ -5839,19 +5851,16 @@ alter_table_cmd: n->subtype = AT_EnableTrigAll; $$ = (Node *)n; } - /* ALTER TABLE ENABLE TRIGGER USER */ - | ENABLE_P TRIGGER USER - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_EnableTrigUser; - $$ = (Node *)n; - } /* ALTER TABLE DISABLE TRIGGER */ | DISABLE_P TRIGGER name { AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_DisableTrig; - n->name = $3; + if (pg_strncasecmp($3, "user", strlen($3)) == 0) { + n->subtype = AT_DisableTrigUser; + } else { + n->subtype = AT_DisableTrig; + n->name = $3; + } $$ = (Node *)n; } /* ALTER TABLE DISABLE TRIGGER ALL */ @@ -5861,13 +5870,6 @@ alter_table_cmd: n->subtype = AT_DisableTrigAll; $$ = (Node *)n; } - /* ALTER TABLE DISABLE TRIGGER USER */ - | DISABLE_P TRIGGER USER - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_DisableTrigUser; - $$ = (Node *)n; - } /* ALTER TABLE ENABLE RULE */ | ENABLE_P RULE name { @@ -8695,10 +8697,6 @@ row_format_option: { $$ = NULL; } - | ROW_FORMAT opt_equal COMPACT - { - $$ = NULL; - } | ROW_FORMAT opt_equal FIXED_P { $$ = NULL; @@ -14038,7 +14036,6 @@ CreateUserMappingStmt: CREATE USER MAPPING FOR auth_ident SERVER name create_gen /* User mapping authorization identifier */ auth_ident: CURRENT_USER opt_bracket { $$ = "current_user"; } - | USER { $$ = "current_user"; } | DolphinRoleIdWithOutCurrentUser { $$ = DolphinObjNameCmp($1->str, "public", $1->is_quoted) ? NULL : $1->str; } ; @@ -14335,6 +14332,7 @@ row_level_security_role: DolphinRoleIdWithOutCurrentUser { char* result = "public"; $$ = DolphinObjNameCmp($1->str, "public", $1->is_quoted) ? result : $1->str; } | CURRENT_USER opt_bracket { $$ = pstrdup($1); } | SESSION_USER { $$ = pstrdup($1); } + ; RLSDefaultPermissive: @@ -25901,7 +25899,7 @@ CreateConversionStmt: *****************************************************************************/ ClusterStmt: - CLUSTER opt_verbose dolphin_qualified_name cluster_index_specification + CLUSTER opt_verbose_with_brance dolphin_qualified_name cluster_index_specification { ClusterStmt *n = makeNode(ClusterStmt); $3->partitionname = NULL; @@ -25910,7 +25908,7 @@ ClusterStmt: n->verbose = $2; $$ = (Node*)n; } - | CLUSTER opt_verbose dolphin_qualified_name PARTITION '(' name ')' cluster_index_specification + | CLUSTER opt_verbose_with_brance dolphin_qualified_name PARTITION '(' name ')' cluster_index_specification { ClusterStmt *n = makeNode(ClusterStmt); $3->partitionname = $6; @@ -25919,7 +25917,7 @@ ClusterStmt: n->verbose = $2; $$ = (Node*)n; } - | CLUSTER opt_verbose + | CLUSTER opt_verbose_with_brance { ClusterStmt *n = makeNode(ClusterStmt); n->relation = NULL; @@ -25928,7 +25926,7 @@ ClusterStmt: $$ = (Node*)n; } /* kept for pre-8.3 compatibility, dolphin_index_name used to deal with the conflict*/ - | CLUSTER opt_verbose dolphin_index_name ON dolphin_qualified_name + | CLUSTER opt_verbose_with_brance dolphin_index_name ON dolphin_qualified_name { ClusterStmt *n = makeNode(ClusterStmt); n->relation = $5; @@ -25989,7 +25987,7 @@ VacuumStmt: n->relation = $3; $$ = (Node *)n; } - | VACUUM opt_full opt_freeze opt_verbose opt_compact + | VACUUM opt_full opt_freeze_empty opt_verbose_empty opt_compact { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_VACUUM; @@ -26024,7 +26022,7 @@ VacuumStmt: n->va_cols = NIL; $$ = (Node *)n; } - | VACUUM opt_full opt_freeze opt_verbose opt_compact dolphin_qualified_name + | VACUUM opt_full opt_freeze_empty opt_verbose_empty opt_compact dolphin_qualified_name { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_VACUUM; @@ -26059,7 +26057,7 @@ VacuumStmt: n->va_cols = NIL; $$ = (Node *)n; } - | VACUUM opt_full opt_freeze opt_verbose opt_compact dolphin_qualified_name PARTITION '('name')' + | VACUUM opt_full opt_freeze_empty opt_verbose_empty opt_compact dolphin_qualified_name PARTITION '('name')' { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_VACUUM; @@ -26081,7 +26079,7 @@ VacuumStmt: $6->partitionname = $9; $$ = (Node *)n; } - | VACUUM opt_full opt_freeze opt_verbose opt_compact dolphin_qualified_name SUBPARTITION '('name')' + | VACUUM opt_full opt_freeze_empty opt_verbose_empty opt_compact dolphin_qualified_name SUBPARTITION '('name')' { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_VACUUM; @@ -26103,7 +26101,7 @@ VacuumStmt: $6->subpartitionname = $9; $$ = (Node *)n; } - | VACUUM opt_full opt_freeze opt_verbose opt_compact AnalyzeStmt + | VACUUM opt_full opt_freeze_empty opt_verbose_empty opt_compact AnalyzeStmt { VacuumStmt *n = (VacuumStmt *) $6; n->options |= VACOPT_VACUUM; @@ -26193,7 +26191,7 @@ vacuum_option_elem: ; AnalyzeStmt: - analyze_keyword opt_verbose + analyze_keyword opt_verbose_with_brance { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_ANALYZE; @@ -26205,7 +26203,7 @@ AnalyzeStmt: n->va_cols = NIL; $$ = (Node *)n; } - | analyze_keyword opt_verbose dolphin_qualified_name opt_analyze_column_define + | analyze_keyword opt_verbose_with_brance dolphin_qualified_name opt_analyze_column_define { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_ANALYZE; @@ -26217,7 +26215,7 @@ AnalyzeStmt: n->va_cols = $4; $$ = (Node *)n; } - | analyze_keyword opt_verbose dolphin_qualified_name opt_name_list PARTITION '('name')' + | analyze_keyword opt_verbose_with_brance dolphin_qualified_name opt_name_list PARTITION '('name')' { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_ANALYZE; @@ -26233,7 +26231,7 @@ AnalyzeStmt: /* * @hdfs Support command "analyze [verbose] foreign tables" */ - | analyze_keyword opt_verbose FOREIGN TABLES + | analyze_keyword opt_verbose_with_brance FOREIGN TABLES { VacuumStmt *n = (VacuumStmt*)makeNode(VacuumStmt); n->options = VACOPT_ANALYZE; @@ -26247,7 +26245,7 @@ AnalyzeStmt: $$ = (Node *)n; } - | analyze_keyword opt_verbose opt_no_write_to_binlog TABLE qualified_name_list + | analyze_keyword opt_verbose_with_brance opt_no_write_to_binlog TABLE qualified_name_list { SelectStmt *n = makeNode(SelectStmt); @@ -26318,7 +26316,6 @@ VerifyStmt: analyze_keyword: ANALYZE {} - | ANALYSE /* British */ {} ; opt_verify_options: @@ -26330,13 +26327,19 @@ opt_verbose: VERBOSE { $$ = TRUE; } | /*EMPTY*/ { $$ = FALSE; } ; +opt_verbose_empty: + /*EMPTY*/ { $$ = FALSE; } + ; +opt_verbose_with_brance: + '(' VERBOSE ')' { $$ = TRUE; } + | /*EMPTY*/ { $$ = FALSE; } + ; opt_full: FULL { $$ = TRUE; } | /*EMPTY*/ { $$ = FALSE; } ; -opt_compact: COMPACT { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_compact: /*EMPTY*/ { $$ = FALSE; } ; opt_hdfsdirectory: HDFSDIRECTORY { $$ = TRUE; } @@ -26345,6 +26348,9 @@ opt_hdfsdirectory: HDFSDIRECTORY { $$ = TRUE; } opt_freeze: FREEZE { $$ = TRUE; } | /*EMPTY*/ { $$ = FALSE; } ; + +opt_freeze_empty: /*EMPTY*/ { $$ = FALSE; } + ; opt_deltamerge: DELTAMERGE {$$ = TRUE;} ; @@ -26443,11 +26449,6 @@ pgxcnode_list: | pgxcnode_name { $$ = list_make1(makeString($1)); } ; -bucket_maps: - BUCKETS '(' bucket_list ')' { $$ = $3; } - | /*EMPTY*/ { $$ = NIL; } - ; - bucket_list: bucket_list ',' Iconst { $$ = lappend($1, makeInteger($3)); } | Iconst { $$ = list_make1(makeInteger($1)); } @@ -26560,29 +26561,6 @@ opt_pgxcnodes: WITH pgxcnodes $$ = NIL; } ; - -/***************************************************************************** - * - * QUERY: - * CREATE NODE GROUP groupname WITH (node1,...,nodeN) [BUCKETS (0,1,2,...)] - * - *****************************************************************************/ - -CreateNodeGroupStmt: CREATE NODE GROUP_P pgxcgroup_name opt_pgxcnodes bucket_maps opt_vcgroup opt_redistributed bucket_cnt pgxcgroup_parent - { - CreateGroupStmt *n = makeNode(CreateGroupStmt); - IsValidGroupname($4); - n->group_name = $4; - n->nodes = $5; - n->buckets = $6; - n->vcgroup = $7; - n->src_group_name = $8; - n->bucketcnt = $9; - n->group_parent = $10; - $$ = (Node *)n; - } - ; - /***************************************************************************** * * QUERY: @@ -26621,14 +26599,6 @@ AlterNodeGroupStmt: ALTER NODE GROUP_P pgxcgroup_name SET DEFAULT n->alter_type = AG_SET_TABLE_GROUP; $$ = (Node *)n; } - | ALTER NODE GROUP_P pgxcgroup_name COPY BUCKETS FROM pgxcgroup_name - { - AlterGroupStmt *n = makeNode(AlterGroupStmt); - n->group_name = $4; - n->install_name = $8; - n->alter_type = AG_SET_BUCKETS; - $$ = (Node *)n; - } | ALTER NODE GROUP_P pgxcgroup_name ADD_P NODE pgxcnodes { AlterGroupStmt *n = makeNode(AlterGroupStmt); @@ -27975,7 +27945,6 @@ explain_option_elem: explain_option_name: ColId { $$ = $1; } | analyze_keyword { $$ = "analyze"; } - | VERBOSE { $$ = "verbose"; } ; explain_option_arg: @@ -28320,7 +28289,7 @@ update_delete_partition_clause: PARTITION '(' name ')' * *****************************************************************************/ -InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest returning_clause +InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest_ignore returning_clause { $7->relation = $6; $7->returningList = $8; @@ -28381,9 +28350,9 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ } } - | opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest upsert_clause returning_clause + | opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest_upsert opt_values_reference upsert_clause returning_clause { - if ($9 != NIL) { + if ($10 != NIL) { const char* message = "RETURNING clause is not yet supported whithin INSERT ON DUPLICATE KEY UPDATE statement."; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); ereport(errstate, @@ -28399,7 +28368,7 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("WITH clause is not yet supported whithin INSERT ON DUPLICATE KEY UPDATE statement."))); } - + /* enable_upsert_to_merge is always false */ if (u_sess->attr.attr_sql.enable_upsert_to_merge #ifdef ENABLE_MULTIPLE_NODES ||t_thrd.proc->workingVersionNum < UPSERT_ROW_STORE_VERSION_NUM @@ -28431,17 +28400,17 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ /* for UPSERT, keep the INSERT statement as well */ $7->relation = $6; - $7->returningList = $9; + $7->returningList = $10; $7->isReplace = false; $7->withClause = $1; $7->hasIgnore = $4; #ifdef ENABLE_MULTIPLE_NODES if (t_thrd.proc->workingVersionNum >= UPSERT_ROW_STORE_VERSION_NUM) { UpsertClause *uc = makeNode(UpsertClause); - if ($8 == NULL) + if ($9 == NULL) uc->targetList = NIL; else - uc->targetList = ((MergeWhenClause *)$8)->targetList; + uc->targetList = ((MergeWhenClause *)$9)->targetList; $7->upsertClause = uc; } #endif @@ -28453,9 +28422,8 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ Alias *a1 = makeAlias(($6->relname), NIL); $6->alias = a1; - Alias *a2 = makeAlias("excluded", NIL); RangeSubselect *r = makeNode(RangeSubselect); - r->alias = a2; + r->alias = GetSessionContext()->upSertAliasName; r->subquery = (Node *) ($7->selectStmt); m->source_relation = (Node *) r; @@ -28466,20 +28434,24 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ n->values = NULL; m->mergeWhenClauses = list_make1((Node *) n); - if ($8 != NULL) - m->mergeWhenClauses = list_concat(list_make1($8), m->mergeWhenClauses); + if ($9 != NULL) + m->mergeWhenClauses = list_concat(list_make1($9), m->mergeWhenClauses); m->hintState = create_hintstate($3); - + GetSessionContext()->upSertAliasName = NULL; + GetSessionContext()->isUpsert = false; $$ = (Node *)m; } else { $7->relation = $6; - $7->returningList = $9; + $7->returningList = $10; $7->withClause = $1; - $7->upsertClause = (UpsertClause *)$8; + $7->upsertClause = (UpsertClause *)$9; + $7->upsertClause->aliasName = GetSessionContext()->upSertAliasName; $7->isReplace = false; $7->hintState = create_hintstate($3); $7->hasIgnore = $4; + GetSessionContext()->upSertAliasName = NULL; + GetSessionContext()->isUpsert = false; $$ = (Node *) $7; } } @@ -28536,10 +28508,37 @@ insert_target: } ; -insert_mysql_rest: +opt_values_reference: + /* EMPTY */ + { + GetSessionContext()->upSertAliasName = makeAlias("delay", NIL); + $$ =(Node *)NULL; + } + | AS DolphinColId '(' insert_column_list ')' + { + Alias *a2 = makeAlias(GetDolphinObjName($2->str, $2->is_quoted), $4); + GetSessionContext()->upSertAliasName = a2; + $$ = (Node *) a2; + } + | AS DolphinColId + { + Alias *a2 = makeAlias(GetDolphinObjName($2->str, $2->is_quoted), NIL); + GetSessionContext()->upSertAliasName = a2; + $$ = (Node *) a2; + } + +insert_mysql_rest_upsert: + insert_mysql_rest_normal + { $$ = $1; } + | insert_mysql_rest + { $$ = $1; } +insert_mysql_rest_ignore: insert_rest { $$ = $1; } - | SET insert_set_list + | insert_mysql_rest + { $$ = $1; } +insert_mysql_rest: + SET insert_set_list { $$ = makeNode(InsertStmt); SelectStmt *n = makeNode(SelectStmt); @@ -28557,6 +28556,35 @@ insert_mysql_rest: } ; +insert_mysql_rest_selectStmt: select_with_parens { $$ = (Node*)$1; } + | values_clause { $$ = $1; } + +insert_mysql_rest_normal: insert_mysql_rest_selectStmt + { + $$ = makeNode(InsertStmt); + $$->cols = NIL; + $$->selectStmt = (Node*)$1; + $$->isRewritten = false; + } + | '(' insert_column_list ')' insert_mysql_rest_selectStmt + { + $$ = makeNode(InsertStmt); + $$->cols = $2; + $$->selectStmt = (Node*)$4; + $$->isRewritten = false; + } + | '(' ')' insert_mysql_rest_selectStmt + { + $$ = makeNode(InsertStmt); + $$->cols = NIL; + $$->selectStmt = (Node*)$3; + $$->isRewritten = false; + } + | insert_rest_without_select + { + $$ = $1; + } + insert_rest: SelectStmt { @@ -28579,7 +28607,12 @@ insert_rest: $$->selectStmt = $3; $$->isRewritten = false; } - | DEFAULT VALUES + | insert_rest_without_select + { + $$ = $1; + } +insert_rest_without_select: + DEFAULT VALUES { $$ = makeNode(InsertStmt); $$->cols = NIL; @@ -29592,10 +29625,6 @@ simple_select: { $$ = makeSetOp(SETOP_EXCEPT, $3, $1, $4); } - | select_clause MINUS_P opt_all select_clause - { - $$ = makeSetOp(SETOP_EXCEPT, $3, $1, $4); - } ; hint_string: @@ -30438,12 +30467,6 @@ table_ref: relation_expr %prec UMINUS $1->issubpartition = true; $$ = (Node *)$1; } - | relation_expr BUCKETS '(' bucket_list ')' - { - $1->buckets = $4; - $1->isbucket = true; - $$ = (Node *)$1; - } | relation_expr PARTITION_FOR '(' expr_list ')' { $1->partitionKeyValuesList = $4; @@ -30469,13 +30492,6 @@ table_ref: relation_expr %prec UMINUS $1->indexhints = $6; $$ = (Node *)$1; } - | relation_expr BUCKETS '(' bucket_list ')' index_hint_list - { - $1->buckets = $4; - $1->isbucket = true; - $1->indexhints = $6; - $$ = (Node *)$1; - } | relation_expr PARTITION_FOR '(' expr_list ')' index_hint_list { $1->partitionKeyValuesList = $4; @@ -30876,13 +30892,7 @@ relation_expr: $$->inhOpt = INH_YES; $$->alias = NULL; } - | ONLY dolphin_qualified_name - { - /* no inheritance */ - $$ = $2; - $$->inhOpt = INH_NO; - $$->alias = NULL; - } + /* remove ONLY dolphin_qualified_name: RESERVED_KEYWORD(only)->UNRESERVED_KEYWORD */ | ONLY '(' dolphin_qualified_name ')' { /* no inheritance, SQL99-style syntax */ @@ -33123,7 +33133,7 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } $$ = (Node *) makeSimpleA_Expr(AEXPR_IN, "<>", $1, $3, @2); } } - | a_expr subquery_Op sub_type select_with_parens %prec Op + | a_expr qual_Op sub_type select_with_parens %prec Op { SubLink *n = makeNode(SubLink); n->subLinkType = (SubLinkType)$3; @@ -33133,13 +33143,202 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } n->location = @2; $$ = (Node *)n; } - | a_expr subquery_Op sub_type '(' a_expr ')' %prec Op + | a_expr qual_Op sub_type '(' a_expr ')' %prec Op { if ($3 == ANY_SUBLINK) $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, $2, $1, $5, @2); else $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, $2, $1, $5, @2); } + | a_expr LIKE sub_type select_with_parens %prec Op + { + SubLink *n = makeNode(SubLink); + n->subLinkType = (SubLinkType)$3; + n->operName = NakeLikeOpList(); + n->testexpr = $1; + n->operName = NakeLikeOpList(); + n->subselect = $4; + n->location = @2; + $$ = (Node *)n; + } + | a_expr LIKE sub_type '(' a_expr ')' %prec Op + { + if ($3 == ANY_SUBLINK) + $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, NakeLikeOpList(), $1, $5, @2); + else + $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, NakeLikeOpList(), $1, $5, @2); + } + | a_expr NOT_LIKE sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "!~~*", $4, @2); + } + | a_expr NOT_LIKE sub_type '(' a_expr ')' %prec Op + { + if ($3 == ANY_SUBLINK) + $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, MakeNotLikeOpList(), $1, $5, @2); + else + $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, MakeNotLikeOpList(), $1, $5, @2); + } + | a_expr ILIKE sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "~~*", $4, @2); + } + | a_expr ILIKE sub_type '(' a_expr ')' %prec Op + { + if ($3 == ANY_SUBLINK) + $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, list_make1(makeString("~~*")), $1, $5, @2); + else + $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, list_make1(makeString("~~*")), $1, $5, @2); + } + | a_expr NOT_ILIKE sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "!~~*", $4, @2); + } + | a_expr NOT_ILIKE sub_type '(' a_expr ')' %prec Op + { + if ($3 == ANY_SUBLINK) + $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, list_make1(makeString("!~~*")), $1, $5, @2); + else + $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, list_make1(makeString("!~~*")), $1, $5, @2); + } + | a_expr CmpOp sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, $2, $4, @2); + } + | a_expr CmpNullOp sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, $2, $4, @2); + } + | a_expr '+' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "+", $4, @2); + } + | a_expr '-' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "-", $4, @2); + } + | a_expr '*' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "*", $4, @2); + } + | a_expr '/' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "/", $4, @2); + } + | a_expr '%' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "%", $4, @2); + } + | a_expr '^' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "^", $4, @2); + } + | a_expr '<' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "<", $4, @2); + } + | a_expr '>' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, ">", $4, @2); + } + | a_expr '=' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "=", $4, @2); + } + | a_expr '@' sub_type select_with_parens %prec Op + { + $$ = MakeSubLinkWithOp((SubLinkType)$3, $1, "@", $4, @2); + } + | a_expr CmpOp sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString($2)), $1, $5, @2); + } + | a_expr CmpNullOp sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString($2)), $1, $5, @2); + } + | a_expr '+' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("+")), $1, $5, @2); + } + | a_expr '-' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("-")), $1, $5, @2); + } + | a_expr '*' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("*")), $1, $5, @2); + } + | a_expr '/' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("/")), $1, $5, @2); + } + | a_expr '%' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("%")), $1, $5, @2); + } + | a_expr '^' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("^")), $1, $5, @2); + } + | a_expr '<' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("<")), $1, $5, @2); + } + | a_expr '=' sub_type '(' a_expr ')' %prec Op + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("=")), $1, $5, @2); + } + | a_expr '@' sub_type '(' a_expr ')' %prec Op + + { + $$ = (Node *) makeA_Expr($3 == ANY_SUBLINK ? AEXPR_OP_ANY : AEXPR_OP_ALL, + list_make1(makeString("@")), $1, $5, @2); + } + | a_expr LIKE BINARY sub_type select_with_parens %prec Op + { + SubLink *n = makeNode(SubLink); + n->subLinkType = (SubLinkType)$4; + n->testexpr = $1; + n->operName = list_make1(makeString("~~")); + n->subselect = $5; + n->location = @2; + $$ = (Node *)n; + } + | a_expr LIKE BINARY sub_type '(' a_expr ')' %prec Op + { + if ($4 == ANY_SUBLINK) + $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, list_make1(makeString("~~")), $1, $6, @2); + else + $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, list_make1(makeString("~~")), $1, $6, @2); + } + | a_expr NOT_LIKE BINARY sub_type select_with_parens %prec Op + { + SubLink *n = makeNode(SubLink); + n->subLinkType = (SubLinkType)$4; + n->testexpr = $1; + n->operName = list_make1(makeString("!~~")); + n->subselect = $5; + n->location = @2; + $$ = (Node *)n; + } + | a_expr NOT_LIKE BINARY sub_type '(' a_expr ')' %prec Op + { + if ($4 == ANY_SUBLINK) + $$ = (Node *) makeA_Expr(AEXPR_OP_ANY, list_make1(makeString("!~~")), $1, $6, @2); + else + $$ = (Node *) makeA_Expr(AEXPR_OP_ALL, list_make1(makeString("!~~")), $1, $6, @2); + } | UNIQUE select_with_parens { /* Not sure how to get rid of the parentheses @@ -33218,7 +33417,7 @@ a_expr_without_sconst: c_expr_without_sconst { $$ = $1; } parser_errposition(@3))); } - c->fields = lcons((Node *)makeString("excluded"), c->fields); + c->fields = lcons((Node *)makeString(GetSessionContext()->upSertAliasName->aliasname), c->fields); $$ = (Node *) $3; } | MATCH_FUNC fulltext_match_params ')' AGAINST '(' SCONST search_modifier ')' @@ -34018,21 +34217,7 @@ func_expr_windowless: * Special expressions that are considered to be functions; */ func_expr_common_subexpr: - COLLATION FOR '(' a_expr ')' - { - FuncCall *n = makeNode(FuncCall); - n->funcname = SystemFuncName("pg_collation_for"); - n->args = list_make1($4); - n->agg_order = NIL; - n->agg_star = FALSE; - n->agg_distinct = FALSE; - n->func_variadic = FALSE; - n->over = NULL; - n->location = @1; - n->call_func = false; - $$ = (Node *)n; - } - | current_date_func + current_date_func { /* * Translate as "text_date('now'::text)". @@ -34449,21 +34634,6 @@ func_expr_common_subexpr: n->call_func = false; $$ = (Node *)n; } - | SYSDATE - { - FuncCall *n = makeNode(FuncCall); - n->funcname = SystemFuncName("a_sysdate"); - n->colname = pstrdup("sysdate"); - n->args = NIL; - n->agg_order = NIL; - n->agg_star = FALSE; - n->agg_distinct = FALSE; - n->func_variadic = FALSE; - n->over = NULL; - n->location = @1; - n->call_func = false; - $$ = (Node *)n; - } | SYSDATE '(' optional_precision ')' { FuncCall *n = makeNode(FuncCall); @@ -34486,18 +34656,6 @@ func_expr_common_subexpr: n->call_func = false; $$ = (Node *)n; } - | ROWNUM - { -#ifdef ENABLE_MULTIPLE_NODES - const char* message = "ROWNUM is not yet supported."; - InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); - ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ROWNUM is not yet supported."))); -#endif - Rownum *r = makeNode(Rownum); - r->location = @1; - $$ = (Node *)r; - } | CURRENT_ROLE { FuncCall *n = makeNode(FuncCall); @@ -34617,20 +34775,6 @@ func_expr_common_subexpr: n->call_func = false; $$ = (Node *)n; } - | USER opt_bracket - { - FuncCall *n = makeNode(FuncCall); - n->funcname = SystemFuncName("current_user"); - n->args = NIL; - n->agg_order = NIL; - n->agg_star = FALSE; - n->agg_distinct = FALSE; - n->func_variadic = FALSE; - n->over = NULL; - n->location = @1; - n->call_func = false; - $$ = (Node *)n; - } | CURRENT_CATALOG { FuncCall *n = makeNode(FuncCall); @@ -35708,28 +35852,6 @@ qual_all_Op: | OPERATOR '(' any_operator ')' { $$ = $3; } ; - -subquery_Op: - all_Op - { $$ = list_make1(makeString($1)); } - | OPERATOR '(' any_operator ')' - { $$ = $3; } - | LIKE - { - $$ = list_make1(makeString("~~")); - } - | NOT_LIKE %prec NOT_LIKE - { - $$ = list_make1(makeString("!~~")); - } - | LIKE BINARY - { $$ = list_make1(makeString("~~")); } - | NOT_LIKE BINARY %prec NOT_LIKE - { $$ = list_make1(makeString("!~~")); } - | ILIKE - { $$ = list_make1(makeString("~~*")); } - | NOT_ILIKE %prec NOT_ILIKE - { $$ = list_make1(makeString("!~~*")); } /* cannot put SIMILAR TO here, because SIMILAR TO is a hack. * the regular expression is preprocessed by a function (similar_escape), * and the ~ operator for posix regular expressions is used. @@ -35738,7 +35860,6 @@ subquery_Op: * however the SubLink structure which handles any/some/all stuff * is not ready for such a thing. */ - ; expr_list: a_expr { @@ -36155,10 +36276,6 @@ columnref: DolphinColId } $$ = makeColumnRef(first_word, result, @1, yyscanner); } - | EXCLUDED indirection - { - $$ = makeColumnRef("excluded", $2, @2, yyscanner); - } ; indirection_el: @@ -37478,15 +37595,6 @@ ColLabel: normal_ident { $$ = $1; } | type_func_name_keyword { $$ = downcase_str(pstrdup($1), false); } | reserved_keyword { - /* ROWNUM can not be used as alias */ - if (DolphinObjNameCmp($1, "rownum", false)) { - const char* message = "ROWNUM cannot be used as an alias"; - InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); - ereport(errstate, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("ROWNUM cannot be used as an alias"), - parser_errposition(@1))); - } $$ = downcase_str(pstrdup($1), false); } ; @@ -37538,15 +37646,6 @@ DolphinColLabel: DOLPHINIDENT { $$ = MakeDolphinStringByChar($1->str, $1 | type_func_name_keyword { $$ = MakeDolphinStringByChar(pstrdup($1), false); } | reserved_keyword { - /* ROWNUM can not be used as alias */ - if (DolphinObjNameCmp($1, "rownum", false)) { - const char* message = "ROWNUM cannot be used as an alias"; - InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); - ereport(errstate, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("ROWNUM cannot be used as an alias"), - parser_errposition(@1))); - } $$ = MakeDolphinStringByChar(pstrdup($1), false); } | SCONST @@ -37595,16 +37694,25 @@ unreserved_keyword_without_key: ABORT_P | ABSOLUTE_P | ACCESS + | USER + | REJECT_P | ACCOUNT + | COLLATION | ACTION + | RECYCLEBIN | ADD_P | ADMIN | AFTER + | VERBOSE | AGGREGATE + | MODIFY_P | ALGORITHM | ALSO | ALTER + | FREEZE + | ALWAYS + | NOTNULL | APP | APPEND | ARCHIVE @@ -37612,6 +37720,7 @@ unreserved_keyword_without_key: | ASSERTION | ASSIGNMENT | AT + | LESS | ATTRIBUTE | AUDIT | AUTOEXTEND @@ -37629,6 +37738,7 @@ unreserved_keyword_without_key: | BLANKS | BLOB_P | BLOCKCHAIN + | BODY_P | BY | CACHE | CALL @@ -37732,6 +37842,7 @@ unreserved_keyword_without_key: | DISTRIBUTE | DISTRIBUTION /* PGXC_END */ + | DO | DOCUMENT_P | DOMAIN_P | DOUBLE_P @@ -37747,6 +37858,7 @@ unreserved_keyword_without_key: | ENCRYPTED_VALUE | ENCRYPTION | ENCRYPTION_TYPE + | END_P | ENDS | ENGINE_ATTRIBUTE | ENGINE_P @@ -38168,17 +38280,21 @@ unreserved_keyword_without_key: */ col_name_keyword: col_name_keyword_nonambiguous { $$ = $1; } + | CAST | CHAR_P %prec IDENT | COALESCE + | ONLY | CONVERT | DATE_P %prec IDENT | DB_B_FORMAT + | SYSDATE | DB_B_JSOBJ | EXTRACT | GET_FORMAT | GREATEST | IFNULL | INTERVAL %prec UNBOUNDED + | LAST_DAY_FUNC | LEAST | LOCATE | MID @@ -38221,6 +38337,7 @@ col_name_keyword_nonambiguous: | BIT %prec IDENT | BOOLEAN_P %prec IDENT | BUCKETCNT + | ANY | BYTEAWITHOUTORDER | BYTEAWITHOUTORDERWITHEQUAL | CHARACTER %prec IDENT @@ -38267,14 +38384,11 @@ col_name_keyword_nonambiguous: type_func_name_keyword_without_current_schema: AGAINST | AUTHORIZATION - | COLLATION - | COMPACT | CONCURRENTLY | CROSS | CSN | DELTAMERGE | DIV - | FREEZE | FULL | FULLTEXT | HDFSDIRECTORY @@ -38285,17 +38399,14 @@ type_func_name_keyword_without_current_schema: | LEFT | LIKE | NATURAL - | NOTNULL | OUTER_P | OVERLAPS - | RECYCLEBIN | REGEXP | RIGHT | RLIKE | SIMILAR | TABLESAMPLE | TIMECAPSULE - | VERBOSE | XOR ; @@ -38322,19 +38433,14 @@ type_func_name_keyword: */ reserved_keyword: ALL - | ANALYSE | ANALYZE | AND - | ANY | ARRAY | AS | ASC | ASYMMETRIC - | BODY_P | BOTH - | BUCKETS | CASE - | CAST | CHECK | COLLATE | COLUMN @@ -38353,13 +38459,10 @@ reserved_keyword: | DESC | DISTINCT | DISTINCTROW - | DO | DUAL_P | ELSE - | END_P | ENUM_P | EXCEPT - | EXCLUDED | FALSE_P | FETCH | FOR @@ -38374,23 +38477,18 @@ reserved_keyword: | INTERSECT | INTO | IS - | LAST_DAY_FUNC | LEADING - | LESS | LIMIT | LOCALTIME | LOCALTIMESTAMP | LOW_PRIORITY | MAXVALUE - | MINUS_P - | MODIFY_P | NOCYCLE | NOT | NOW_FUNC | NULL_P | OFFSET | ON - | ONLY | OR | ORDER | PERFORMANCE @@ -38398,15 +38496,12 @@ reserved_keyword: | PRIMARY | PROCEDURE | REFERENCES - | REJECT_P | RETURNING - | ROWNUM | SELECT | SESSION_USER | SHRINK | SOME | SYMMETRIC - | SYSDATE | TABLE | THEN | TO @@ -38414,7 +38509,6 @@ reserved_keyword: | TRUE_P | UNION | UNIQUE - | USER | USING | UTC_DATE | UTC_TIME @@ -38432,6 +38526,31 @@ normal_ident: DOLPHINIDENT { $$ = downcase_str($1->str, $1->is_quoted); } %% + +static List* NakeLikeOpList() { + if (GetSessionContext()->enableBCmptMode) { + return list_make1(makeString("~~*")); + } else { + return list_make1(makeString("~~")); + } +} + +static List* MakeNotLikeOpList() { + return GetSessionContext()->enableBCmptMode ? list_make1(makeString("!~~*")) : list_make1(makeString("!~~")); +} + +static Node* MakeSubLinkWithOp(SubLinkType subType, Node* testExpr, char* op, Node* subSelect, int location) +{ + SubLink *n = makeNode(SubLink); + n->subLinkType = subType; + n->testexpr = testExpr; + n->operName = list_make1(makeString(op)); + n->subselect = subSelect; + n->location = location; + return (Node*)n; +} + + /* * The signature of this function is required by bison. However, we * ignore the passed yylloc and instead use the last token position @@ -39320,6 +39439,7 @@ parser_init(base_yy_extra_type *yyext) yyext->core_yy_extra.query_string_locationlist = NIL; yyext->core_yy_extra.paren_depth = 0; GetSessionContext()->isUpsert = false; + GetSessionContext()->upSertAliasName = (Alias*)NULL; GetSessionContext()->is_schema_name = false; GetSessionContext()->is_create_alter_stmt = false; } diff --git a/contrib/dolphin/plugin_utility.cpp b/contrib/dolphin/plugin_utility.cpp index c95ef2930..f9b717e81 100644 --- a/contrib/dolphin/plugin_utility.cpp +++ b/contrib/dolphin/plugin_utility.cpp @@ -6340,7 +6340,15 @@ ProcessUtilitySlow(Node *parse_tree, EventTriggerUndoInhibitCommandCollection(); } break; - case T_CreateTrigStmt: + case T_CreateTrigStmt: +#ifdef DOLPHIN + { + CreateTrigStmt* createTrigStmt = (CreateTrigStmt*)parse_tree; + if (pg_strncasecmp(createTrigStmt->trigname, "user", strlen(createTrigStmt->trigname)) == 0) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("%s cannot be trigger name", + createTrigStmt->trigname))); + } +#endif address = CreateTrigger( (CreateTrigStmt*)parse_tree, query_string, InvalidOid, InvalidOid, InvalidOid, InvalidOid, false); #ifdef PGXC @@ -6356,7 +6364,9 @@ ProcessUtilitySlow(Node *parse_tree, } #endif break; - +#ifdef DOLPHIN + } +#endif case T_CreatePLangStmt: if (!IsInitdb && strncmp(((CreatePLangStmt*)parse_tree)->plname, "plpython", strlen("plpython")) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("new language is not yet supported."))); diff --git a/contrib/dolphin/plugin_utils/adt/ri_triggers.cpp b/contrib/dolphin/plugin_utils/adt/ri_triggers.cpp index 96d70c957..599f96df5 100644 --- a/contrib/dolphin/plugin_utils/adt/ri_triggers.cpp +++ b/contrib/dolphin/plugin_utils/adt/ri_triggers.cpp @@ -339,8 +339,13 @@ static Datum RI_FKey_check(PG_FUNCTION_ARGS) quoteRelationName(pkrelname, pk_rel); rc = snprintf_s(querystr, sizeof(querystr), sizeof(querystr) - 1, +#ifndef DOLPHIN IsShareLockForForeignKey(pk_rel) ? "SELECT 1 FROM ONLY %s x FOR SHARE OF x" : "SELECT 1 FROM ONLY %s x FOR KEY SHARE OF x", pkrelname); +#else + IsShareLockForForeignKey(pk_rel) ? "SELECT 1 FROM ONLY (%s) x FOR SHARE OF x" : + "SELECT 1 FROM ONLY (%s) x FOR KEY SHARE OF x", pkrelname); +#endif securec_check_ss(rc, "\0", "\0"); /* Prepare and save the plan */ @@ -465,7 +470,11 @@ static Datum RI_FKey_check(PG_FUNCTION_ARGS) */ initStringInfo(&querybuf); quoteRelationName(pkrelname, pk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "SELECT 1 FROM ONLY %s x", pkrelname); +#else + appendStringInfo(&querybuf, "SELECT 1 FROM ONLY (%s) x", pkrelname); +#endif querysep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]); @@ -615,7 +624,11 @@ static bool ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel, HeapTuple old_ro */ initStringInfo(&querybuf); quoteRelationName(pkrelname, pk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "SELECT 1 FROM ONLY %s x", pkrelname); +#else + appendStringInfo(&querybuf, "SELECT 1 FROM ONLY (%s) x", pkrelname); +#endif querysep = "WHERE"; int iRet = -1; for (i = 0; i < riinfo->nkeys; i++) { @@ -787,7 +800,11 @@ Datum RI_FKey_noaction(PG_FUNCTION_ARGS) */ initStringInfo(&querybuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "SELECT 1 FROM ONLY %s x", fkrelname); +#else + appendStringInfo(&querybuf, "SELECT 1 FROM ONLY (%s) x", fkrelname); +#endif querysep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]); @@ -971,7 +988,11 @@ Datum RI_FKey_cascade_del(PG_FUNCTION_ARGS) */ initStringInfo(&querybuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "DELETE FROM ONLY %s", fkrelname); +#else + appendStringInfo(&querybuf, "DELETE FROM ONLY (%s)", fkrelname); +#endif querysep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]); @@ -1146,7 +1167,11 @@ Datum RI_FKey_cascade_upd(PG_FUNCTION_ARGS) initStringInfo(&querybuf); initStringInfo(&qualbuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "UPDATE ONLY %s SET", fkrelname); +#else + appendStringInfo(&querybuf, "UPDATE ONLY (%s) SET", fkrelname); +#endif querysep = ""; qualsep = "WHERE"; for (i = 0, j = riinfo.nkeys; i < riinfo.nkeys; i++, j++) { @@ -1324,7 +1349,11 @@ Datum RI_FKey_restrict(PG_FUNCTION_ARGS) */ initStringInfo(&querybuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "SELECT 1 FROM ONLY %s x", fkrelname); +#else + appendStringInfo(&querybuf, "SELECT 1 FROM ONLY (%s) x", fkrelname); +#endif querysep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]); @@ -1521,7 +1550,11 @@ Datum RI_FKey_setnull_del(PG_FUNCTION_ARGS) initStringInfo(&querybuf); initStringInfo(&qualbuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "UPDATE ONLY %s SET", fkrelname); +#else + appendStringInfo(&querybuf, "UPDATE ONLY (%s) SET", fkrelname); +#endif querysep = ""; qualsep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { @@ -1712,7 +1745,11 @@ Datum RI_FKey_setnull_upd(PG_FUNCTION_ARGS) initStringInfo(&querybuf); initStringInfo(&qualbuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "UPDATE ONLY %s SET", fkrelname); +#else + appendStringInfo(&querybuf, "UPDATE ONLY (%s) SET", fkrelname); +#endif querysep = ""; qualsep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { @@ -1902,7 +1939,11 @@ Datum RI_FKey_setdefault(PG_FUNCTION_ARGS) initStringInfo(&querybuf); initStringInfo(&qualbuf); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, "UPDATE ONLY %s SET", fkrelname); +#else + appendStringInfo(&querybuf, "UPDATE ONLY (%s) SET", fkrelname); +#endif querysep = ""; qualsep = "WHERE"; for (i = 0; i < riinfo.nkeys; i++) { @@ -2188,8 +2229,11 @@ bool RI_Initial_Check(Trigger* trigger, Relation fk_rel, Relation pk_rel) quoteRelationName(pkrelname, pk_rel); quoteRelationName(fkrelname, fk_rel); +#ifndef DOLPHIN appendStringInfo(&querybuf, " FROM ONLY %s fk LEFT OUTER JOIN ONLY %s pk ON", fkrelname, pkrelname); - +#else + appendStringInfo(&querybuf, " FROM ONLY (%s) fk LEFT OUTER JOIN ONLY (%s) pk ON", fkrelname, pkrelname); +#endif rc = strcpy_s(pkattname, sizeof(pkattname), "pk."); securec_check_ss(rc, "\0", "\0"); rc = strcpy_s(fkattname, sizeof(fkattname), "fk."); diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index fb64b8eda..4cc282c71 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -3849,7 +3849,11 @@ static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int initStringInfo(&buf); if (fullCommand && OidIsValid(conForm->conrelid)) { +#ifndef DOLPHIN appendStringInfo(&buf, "ALTER TABLE ONLY %s ADD CONSTRAINT %s ", +#else + appendStringInfo(&buf, "ALTER TABLE ONLY (%s) ADD CONSTRAINT %s ", +#endif generate_relation_name(conForm->conrelid, NIL), quote_identifier(NameStr(conForm->conname))); } diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index da442bcb0..59daea8ae 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -129,6 +129,7 @@ BEGIN RETURN 0; END; $$ +LANGUAGE plpgsql; DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(text); DROP FUNCTION IF EXISTS pg_catalog.dayofmonth(numeric); @@ -140,3 +141,5 @@ DROP FUNCTION IF EXISTS pg_catalog.timestamp_bool(timestamp(0) without time zone DROP FUNCTION IF EXISTS pg_catalog.date_format (time without time zone, text); DROP FUNCTION IF EXISTS pg_catalog.to_char(time without time zone, text); + +drop function IF EXISTS pg_catalog."user"(); \ No newline at end of file diff --git a/contrib/dolphin/sql/case_sensitive_test/alter_table.sql b/contrib/dolphin/sql/case_sensitive_test/alter_table.sql index 29dce4cdc..d37eb1416 100644 --- a/contrib/dolphin/sql/case_sensitive_test/alter_table.sql +++ b/contrib/dolphin/sql/case_sensitive_test/alter_table.sql @@ -224,12 +224,12 @@ CREATE TABLE Constraint_Rename_Test (a int CONSTRAINT con1 CHECK (a > 0), b int, CREATE TABLE Constraint_Rename_Test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) InheRITS (Constraint_Rename_Test); \d "Constraint_Rename_Test2" ALTER TABLE Constraint_Rename_Test2 RENAME CONSTRAINT con1 TO con1foo; -- fail -ALTER TABLE ONLY Constraint_Rename_Test RENAME CONSTRAINT con1 TO con1foo; -- fail +ALTER TABLE ONLY (Constraint_Rename_Test) RENAME CONSTRAINT con1 TO con1foo; -- fail ALTER TABLE Constraint_Rename_Test RENAME CONSTRAINT con1 TO con1foo; -- ok \d "Constraint_Rename_Test" \d "Constraint_Rename_Test2" ALTER TABLE Constraint_Rename_Test ADD CONSTRAINT con2 CHECK (b > 0) NO InheRIT; -ALTER TABLE ONLY Constraint_Rename_Test RENAME CONSTRAINT con2 TO con2foo; -- ok +ALTER TABLE ONLY (Constraint_Rename_Test) RENAME CONSTRAINT con2 TO con2foo; -- ok ALTER TABLE Constraint_Rename_Test RENAME CONSTRAINT con2foo TO con2bar; -- ok \d "Constraint_Rename_Test" \d "Constraint_Rename_Test2" diff --git a/contrib/dolphin/sql/case_sensitive_test/create_index.sql b/contrib/dolphin/sql/case_sensitive_test/create_index.sql index d504f4695..c873102a0 100644 --- a/contrib/dolphin/sql/case_sensitive_test/create_index.sql +++ b/contrib/dolphin/sql/case_sensitive_test/create_index.sql @@ -957,10 +957,6 @@ create index idx_rep_table on t_rep_table(a); explain (costs off) select /*+ rows(t_rep_table #100000) */ * from t_hash_table where t_hash_table.a in (select a from t_rep_table); explain (costs off) select /*+ rows(t_rep_table #100000) */ * from t_hash_table where '1' = '0' or t_hash_table.a in (select a from t_rep_table); -create index test0 on t_rep_table(rownum); -create index test0 on t_rep_table(sin(a), sin(rownum)); -create index test0 on t_rep_table(sin(a), sin(rownum+1)); - drop index idx_rep_table; drop table t_hash_table; drop table t_rep_table; diff --git a/contrib/dolphin/sql/case_sensitive_test/create_view1.sql b/contrib/dolphin/sql/case_sensitive_test/create_view1.sql index 1e68f8b71..d56122cf2 100644 --- a/contrib/dolphin/sql/case_sensitive_test/create_view1.sql +++ b/contrib/dolphin/sql/case_sensitive_test/create_view1.sql @@ -9,7 +9,7 @@ set dolphin.lower_case_table_names TO 0; CREATE VIEW street AS SELECT r.name, r.thepath, c.cname AS cname - FROM ONLY Road r, Real_City c + FROM ONLY (Road) r, Real_City c WHERE c.outline ## r.thepath; CREATE VIEW iexit AS diff --git a/contrib/dolphin/sql/case_sensitive_test/create_view5.sql b/contrib/dolphin/sql/case_sensitive_test/create_view5.sql index f7d02ad2b..207dbaa26 100644 --- a/contrib/dolphin/sql/case_sensitive_test/create_view5.sql +++ b/contrib/dolphin/sql/case_sensitive_test/create_view5.sql @@ -70,7 +70,7 @@ DROP VIEW tt23v; create view tt20v as select * from coalesce(1,2) as c, - collation for ('x'::text) col, + pg_collation_for ('x'::text) col, current_date as d, cast(1+2 as int4) as i4, cast(1+2 as int8) as i8; diff --git a/contrib/dolphin/sql/case_sensitive_test_backquote/alter_table.sql b/contrib/dolphin/sql/case_sensitive_test_backquote/alter_table.sql index 636601f01..16aab40cb 100644 --- a/contrib/dolphin/sql/case_sensitive_test_backquote/alter_table.sql +++ b/contrib/dolphin/sql/case_sensitive_test_backquote/alter_table.sql @@ -224,12 +224,12 @@ CREATE TABLE `Constraint_Rename_Test` (`a` int CONSTRAINT `con1` CHECK (`a` > 0) CREATE TABLE `Constraint_Rename_Test2` (`a` int CONSTRAINT `con1` CHECK (`a` > 0), `d` int) InheRITS (`Constraint_Rename_Test`); \d "Constraint_Rename_Test2" ALTER TABLE `Constraint_Rename_Test2` RENAME CONSTRAINT `con1` TO `con1foo`; -- fail -ALTER TABLE ONLY `Constraint_Rename_Test` RENAME CONSTRAINT `con1` TO `con1foo`; -- fail +ALTER TABLE ONLY (`Constraint_Rename_Test`) RENAME CONSTRAINT `con1` TO `con1foo`; -- fail ALTER TABLE `Constraint_Rename_Test` RENAME CONSTRAINT `con1` TO `con1foo`; -- ok \d "Constraint_Rename_Test" \d "Constraint_Rename_Test2" ALTER TABLE `Constraint_Rename_Test` ADD CONSTRAINT `con2` CHECK (`b` > 0) NO InheRIT; -ALTER TABLE ONLY `Constraint_Rename_Test` RENAME CONSTRAINT `con2` TO `con2foo`; -- ok +ALTER TABLE ONLY (`Constraint_Rename_Test`) RENAME CONSTRAINT `con2` TO `con2foo`; -- ok ALTER TABLE `Constraint_Rename_Test` RENAME CONSTRAINT `con2foo` TO `con2bar`; -- ok \d "Constraint_Rename_Test" \d "Constraint_Rename_Test2" diff --git a/contrib/dolphin/sql/charset_gbk_b_db.sql b/contrib/dolphin/sql/charset_gbk_b_db.sql index 0d650029a..b3dc5799f 100644 --- a/contrib/dolphin/sql/charset_gbk_b_db.sql +++ b/contrib/dolphin/sql/charset_gbk_b_db.sql @@ -49,7 +49,7 @@ SELECT _gb18030'高斯'; SELECT _gbk X'e9ab98e696af'; SELECT CONVERT_TO(_utf8mb4'楂樻柉', 'gbk'); -- ERROR SELECT CONVERT_TO(_utf8mb4'高斯', 'gbk'); -- ERROR -SELECT COLLATION FOR(CAST('高斯' AS binary)::text); +SELECT pg_collation_for(CAST('高斯' AS binary)::text); -- -- 中文 const compare -- -- -- same charset & explicit collation @@ -130,70 +130,70 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB' COLLATE gbk_bin SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB' COLLATE gbk_chinese_ci); -- ERROR SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB' COLLATE gbk_bin); -- -- -- same charset & implicit collation -SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); -- -- -- diff charset & explicit collation -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, collation for(result); -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); -- -- -- explicit & implicit -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.gbk") result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, collation for(result); -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.gbk") result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); -- -- -- const compare CONCAT SELECT _utf8mb4'楂樻柉DB' = CONCAT(_gbk'高斯DB'); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci); @@ -201,67 +201,67 @@ SELECT _utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin = CONCAT(_gbk'高斯DB'); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin = CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB'); -- ERROR -- -- -- const CONCAT CONCAT -SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), '高斯DB') result, collation for(result); -SELECT CONCAT('高斯DB', 123) result, collation for(result); -SELECT CONCAT(123, '高斯DB') result, collation for(result); -SELECT CONCAT('高斯DB', DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, collation for(result); -SELECT CONCAT('高斯DB', NULL) result, collation for(result); -SELECT CONCAT(NULL, '高斯DB') result, collation for(result); +SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); +SELECT CONCAT(123, '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', 123) result, collation for(result); -SELECT CONCAT(123, _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', NULL) result, collation for(result); -SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', 123) result, pg_collation_for(result); +SELECT CONCAT(123, _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, pg_collation_for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT('高斯DB'), 123) result, collation for(result); -SELECT CONCAT(123, CONCAT('高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT('高斯DB'), NULL) result, collation for(result); -SELECT CONCAT(NULL, CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); +SELECT CONCAT(123, CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, collation for(result); -SELECT CONCAT(123, CONCAT(_utf8mb4'高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', CONCAT(_utf8mb4'高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), NULL) result, collation for(result); -SELECT CONCAT(NULL, CONCAT(_utf8mb4'高斯DB')) result, collation for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, pg_collation_for(result); +SELECT CONCAT(123, CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); -- -- -- CONCAT NUMBERS -SELECT CONCAT('100', 200) result, collation for(result); -SELECT CONCAT('100', date'2021-01-01') result, collation for(result); -SELECT CONCAT('100', NULL) result, collation for(result); -SELECT CONCAT('100', NULL::varbinary(16)) result, collation for(result); -SELECT CONCAT('100', NULL::text) result, collation for(result); -SELECT CONCAT(100, 200) result, collation for(result); -SELECT CONCAT(100, date'2021-01-01') result, collation for(result); -SELECT CONCAT(100, NULL) result, collation for(result); -SELECT CONCAT(100, NULL::varbinary(16)) result, collation for(result); -SELECT CONCAT(100, NULL::text) result, collation for(result); -SELECT CONCAT(NULL, NULL::varbinary(16)) result, collation for(result); -SELECT CONCAT(NULL, NULL::text) result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL), '100') result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::text), '100') result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL), 100) result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::text), 100) result, collation for(result); +SELECT CONCAT('100', 200) result, pg_collation_for(result); +SELECT CONCAT('100', date'2021-01-01') result, pg_collation_for(result); +SELECT CONCAT('100', NULL) result, pg_collation_for(result); +SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); +SELECT CONCAT(100, 200) result, pg_collation_for(result); +SELECT CONCAT(100, date'2021-01-01') result, pg_collation_for(result); +SELECT CONCAT(100, NULL) result, pg_collation_for(result); +SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); +SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); -- -- 中文 with column charset CREATE TABLE t_diff_charset_columns( @@ -380,58 +380,58 @@ SELECT fgbk_chi = fgb18030_chi, fgb18030_chi = fgbk_chi FROM t_diff_charset_colu -- -- COLUMN concat COLUMN -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_gen, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- result is _bin +SELECT CONCAT(futf8_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_gen, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- result is _bin -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(fgbk_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(fgbk_chi, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_chi, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- concat column and @uservar set enable_set_variable_b_format=on; -- -- -- string var utf8mb4_general_ci set @var_utf8_gen = _utf8mb4'高斯DB' COLLATE utf8mb4_general_ci; -- should support -SELECT collation for(@var_utf8_gen); -SELECT CONCAT(futf8_uni, @var_utf8_gen) result, collation for(result) FROM t_diff_charset_columns; -- null collation -SELECT CONCAT(fgbk_bin, @var_utf8_gen) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_general_ci -SELECT CONCAT(@var_utf8_gen, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- _bin +SELECT pg_collation_for(@var_utf8_gen); +SELECT CONCAT(futf8_uni, @var_utf8_gen) result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation +SELECT CONCAT(fgbk_bin, @var_utf8_gen) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_general_ci +SELECT CONCAT(@var_utf8_gen, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- _bin -- -- -- string var gbk_chinese_ci set @var_gbk_chi = '高斯DB' COLLATE gbk_chinese_ci; -- should support -SELECT collation for(@var_gbk_chi); -SELECT CONCAT(futf8_uni, @var_gbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- futf8_uni -SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT pg_collation_for(@var_gbk_chi); +SELECT CONCAT(futf8_uni, @var_gbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- futf8_uni +SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin -- -- -- number var set @var_num = 5.0; -SELECT CONCAT(@var_num, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT CONCAT(@var_num, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin -- -- -- varbinary var set @var_binary = _binary'高斯DB'; -SELECT CONCAT(futf8_bin, @var_binary) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(@var_binary, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, @var_binary) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(@var_binary, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- concat column and bind parameter -- -- -- -- PBE with implicit collation PREPARE test_merge_collation(text) AS -SELECT CONCAT(futf8_uni, $1) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, $1) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); -- futf8_uni collation has priority EXECUTE test_merge_collation(_utf8mb4'高斯DB'); -- same as above -SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above -- -- -- -- -- _gbk SET @pbe_param1 = _gbk'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); -- _gbk noneffective, futf8_uni collation has priority, _gbk'高斯DB' will not convert to utf8mb4 EXECUTE test_merge_collation(_gbk'高斯DB'); -- same as above -SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above -- -- -- -- -- _utf8mb4 utf8mb4_unicode_ci SET @pbe_param1 = _utf8mb4'高斯DB' collate utf8mb4_bin; EXECUTE test_merge_collation(@pbe_param1); -- explicit noneffective, futf8_uni collation has priority @@ -439,7 +439,7 @@ EXECUTE test_merge_collation(_utf8mb4'高斯DB' collate utf8mb4_unicode_ci); -- DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); @@ -451,7 +451,7 @@ EXECUTE test_merge_collation(_gbk'高斯DB'); -- utf8mb4_unicode_ci DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR +SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR DEALLOCATE test_merge_collation; -- -- -- -- test revalidate SELECT fgbk_chi result FROM t_diff_charset_columns WHERE fgbk_chi=_gbk'高斯db'; -- 1 rows @@ -474,52 +474,52 @@ SET NAMES gbk; -- -- concat for DERIVATION -- -- -- same charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, collation for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT('高斯DB', fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, 123) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(123, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, NULL) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(NULL, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- diff charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT('高斯DB', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, 123) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(123, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, NULL) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(NULL, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- test explicit collate on concat -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- with binary & implicit collation -SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -- -- test explicit collate on blob result SELECT CAST('DEADBEEF' AS blob) COLLATE utf8mb4_general_ci result; -- ERROR @@ -528,41 +528,41 @@ SELECT CAST('DEADBEEF' AS blob) COLLATE "binary" result; -- -- case when -- -- -- condition same charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation -SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation -SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- condition same charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- condition diff charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- condition diff charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- in -- -- -- column utf8 @@ -597,16 +597,16 @@ SELECT futf8_bin FROM t_diff_charset_columns t1 WHERE fgbk_chi in (SELECT t2.fgb SELECT futf8_bin FROM t_diff_charset_columns t1 WHERE fgbk_chi in (SELECT t2.fgb18030_bin FROM t_diff_charset_columns t2); -- ERROR -- -- COALESCE -SELECT COALESCE(fgbk_chi, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT COALESCE(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; -SELECT COALESCE(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict -SELECT COALESCE(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT COALESCE(fgbk_chi, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT COALESCE(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; +SELECT COALESCE(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict +SELECT COALESCE(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR -- -- GREATEST -SELECT GREATEST(fgbk_chi, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; -SELECT GREATEST(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; -SELECT GREATEST(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict -SELECT GREATEST(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT GREATEST(fgbk_chi, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; +SELECT GREATEST(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; +SELECT GREATEST(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict +SELECT GREATEST(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR -- -- XMLEXPR SELECT xmlelement(NAME a, fgbk_chi, futf8_bin) result FROM t_diff_charset_columns; diff --git a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql index 85fedd6d6..2ec8d971f 100644 --- a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql +++ b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql @@ -259,70 +259,70 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB' COLLATE SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB' COLLATE utf8mb4_unicode_ci); -- ERROR SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB' COLLATE utf8mb4_bin); -- -- -- same charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); -- -- -- diff charset & explicit collation -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -- ERROR -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, collation for(result); -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, collation for(result); -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); -- -- -- explicit & implicit -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.utf8") result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_unicode_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, collation for(result); -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, collation for(result); -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.utf8") result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE utf8mb4_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gbk'高斯db' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB' COLLATE gbk_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, collation for(result); -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, collation for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, collation for(result); -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, collation for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); -- -- -- const compare CONCAT SELECT _utf8mb4'楂樻柉DB' = CONCAT(_gbk'高斯DB'); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci); @@ -330,67 +330,67 @@ SELECT _utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin = CONCAT(_gbk'高斯DB'); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin = CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB'); -- ERROR -- -- -- const CONCAT CONCAT -SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, collation for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB', CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB', CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), '高斯DB') result, collation for(result); -SELECT CONCAT('高斯DB', 123) result, collation for(result); -SELECT CONCAT(123, '高斯DB') result, collation for(result); -SELECT CONCAT('高斯DB', DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, collation for(result); -SELECT CONCAT('高斯DB', NULL) result, collation for(result); -SELECT CONCAT(NULL, '高斯DB') result, collation for(result); +SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); +SELECT CONCAT(123, '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB', 123) result, collation for(result); -SELECT CONCAT(123, _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB', DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', _gbk'高斯DB') result, collation for(result); -SELECT CONCAT(_gbk'高斯DB', NULL) result, collation for(result); -SELECT CONCAT(NULL, _gbk'高斯DB') result, collation for(result); +SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB', 123) result, pg_collation_for(result); +SELECT CONCAT(123, _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB', DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB', NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, _gbk'高斯DB') result, pg_collation_for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT('高斯DB'), 123) result, collation for(result); -SELECT CONCAT(123, CONCAT('高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT('高斯DB'), NULL) result, collation for(result); -SELECT CONCAT(NULL, CONCAT('高斯DB')) result, collation for(result); +SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); +SELECT CONCAT(123, CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, collation for(result); -SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, collation for(result); -SELECT CONCAT(123, CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT(_gbk'高斯DB'), DATE '2023-05-01') result, collation for(result); -SELECT CONCAT(DATE '2023-05-01', CONCAT(_gbk'高斯DB')) result, collation for(result); -SELECT CONCAT(CONCAT(_gbk'高斯DB'), NULL) result, collation for(result); -SELECT CONCAT(NULL, CONCAT(_gbk'高斯DB')) result, collation for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, pg_collation_for(result); +SELECT CONCAT(123, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); +SELECT CONCAT(DATE '2023-05-01', CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), NULL) result, pg_collation_for(result); +SELECT CONCAT(NULL, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); -- -- -- CONCAT NUMBERS -SELECT CONCAT('100', 200) result, collation for(result); -SELECT CONCAT('100', date'2021-01-01') result, collation for(result); -SELECT CONCAT('100', NULL) result, collation for(result); -SELECT CONCAT('100', NULL::varbinary(16)) result, collation for(result); -SELECT CONCAT('100', NULL::text) result, collation for(result); -SELECT CONCAT(100, 200) result, collation for(result); -SELECT CONCAT(100, date'2021-01-01') result, collation for(result); -SELECT CONCAT(100, NULL) result, collation for(result); -SELECT CONCAT(100, NULL::varbinary(16)) result, collation for(result); -SELECT CONCAT(100, NULL::text) result, collation for(result); -SELECT CONCAT(NULL, NULL::varbinary(16)) result, collation for(result); -SELECT CONCAT(NULL, NULL::text) result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL), '100') result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::text), '100') result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL), 100) result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, collation for(result); -SELECT CONCAT(CONCAT(100, NULL::text), 100) result, collation for(result); +SELECT CONCAT('100', 200) result, pg_collation_for(result); +SELECT CONCAT('100', date'2021-01-01') result, pg_collation_for(result); +SELECT CONCAT('100', NULL) result, pg_collation_for(result); +SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); +SELECT CONCAT(100, 200) result, pg_collation_for(result); +SELECT CONCAT(100, date'2021-01-01') result, pg_collation_for(result); +SELECT CONCAT(100, NULL) result, pg_collation_for(result); +SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); +SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); -- -- 中文 with column charset CREATE TABLE t_diff_charset_columns( @@ -518,69 +518,69 @@ SELECT fgb18030_bin = fblob, fblob = fgb18030_bin FROM t_diff_charset_columns; - -- -- COLUMN concat COLUMN -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_gen, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- result is _bin +SELECT CONCAT(futf8_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_gen, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- result is _bin -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fgb18030_bin) result, collation for(result) FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(fgbk_bin, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(fgbk_chi, fgb18030_chi) result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(fgbk_chi, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- with binary & implicit collation -SELECT CONCAT(futf8_bin, fbin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fblob) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fbin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fblob) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fbin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fblob) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgb18030_bin, fbin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgb18030_bin, fblob) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fbin, fblob) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fbin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- concat column and @uservar set enable_set_variable_b_format=on; -- -- -- string var utf8mb4_general_ci set @var_utf8_uni = '高斯DB' COLLATE utf8mb4_unicode_ci; -- should support -SELECT collation for(@var_utf8_uni); -SELECT CONCAT(futf8_gen, @var_utf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- null collation -SELECT CONCAT(fgbk_bin, @var_utf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci -SELECT CONCAT(@var_utf8_uni, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- _bin +SELECT pg_collation_for(@var_utf8_uni); +SELECT CONCAT(futf8_gen, @var_utf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation +SELECT CONCAT(fgbk_bin, @var_utf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci +SELECT CONCAT(@var_utf8_uni, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- _bin -- -- -- string var gbk_chinese_ci set @var_gbk_chi = _gbk'高斯DB' COLLATE gbk_chinese_ci; -- should support -SELECT collation for(@var_gbk_chi); -SELECT CONCAT(futf8_uni, @var_gbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- futf8_uni -SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT pg_collation_for(@var_gbk_chi); +SELECT CONCAT(futf8_uni, @var_gbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- futf8_uni +SELECT CONCAT(@var_gbk_chi, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin -- -- -- number var set @var_num = 5.0; -SELECT CONCAT(futf8_bin, @var_num) result, collation for(result) FROM t_diff_charset_columns; -- futf8_bin -SELECT CONCAT(@var_num, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -- fgbk_bin +SELECT CONCAT(futf8_bin, @var_num) result, pg_collation_for(result) FROM t_diff_charset_columns; -- futf8_bin +SELECT CONCAT(@var_num, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- fgbk_bin -- -- -- varbinary var set @var_binary = _binary'高斯DB'; -SELECT CONCAT(futf8_bin, @var_binary) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(@var_binary, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, @var_binary) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(@var_binary, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- concat column and bind parameter -- -- -- -- PBE with implicit collation PREPARE test_merge_collation(text) AS -SELECT CONCAT(futf8_uni, $1) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, $1) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); -- futf8_uni collation has priority EXECUTE test_merge_collation(_utf8mb4'高斯DB'); -- same as above -SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _utf8mb4'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above -- -- -- -- -- _gbk SET @pbe_param1 = _gbk'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); -- _gbk noneffective, futf8_uni collation has priority, _gbk'高斯DB' will not convert to utf8mb4 EXECUTE test_merge_collation(_gbk'高斯DB'); -- same as above -SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, collation for(result) FROM t_diff_charset_columns; -- same as above +SELECT CONCAT(futf8_uni, _gbk'高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; -- same as above -- -- -- -- -- _utf8mb4 utf8mb4_unicode_ci SET @pbe_param1 = _utf8mb4'高斯DB' collate utf8mb4_bin; EXECUTE test_merge_collation(@pbe_param1); -- explicit noneffective, futf8_uni collation has priority @@ -588,7 +588,7 @@ EXECUTE test_merge_collation(_utf8mb4'高斯DB' collate utf8mb4_unicode_ci); -- DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT($1 collate utf8mb4_unicode_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- -- -- _utf8mb4 SET @pbe_param1 = _utf8mb4'高斯DB'; EXECUTE test_merge_collation(@pbe_param1); @@ -600,7 +600,7 @@ EXECUTE test_merge_collation(_gbk'高斯DB'); -- utf8mb4_unicode_ci DEALLOCATE test_merge_collation; -- -- -- -- PBE with explicit collation, PREPARE test_merge_collation(text) AS -SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR +SELECT CONCAT($1 collate gbk_chinese_ci, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- $1 use collation_connection, ERROR -- -- -- -- test revalidate SELECT fgbk_chi result FROM t_diff_charset_columns WHERE fgbk_chi=_utf8mb4'高斯db'; -- 1 rows PREPARE test_revalidate(text) AS @@ -628,7 +628,7 @@ DECLARE collation_res text; concat_res_assign varchar(64); CURSOR c1 IS - SELECT CONCAT(p1, p2) result, collation for(result); + SELECT CONCAT(p1, p2) result, pg_collation_for(result); begin OPEN c1; LOOP @@ -644,11 +644,11 @@ end; / CALL merge_collation_func('高斯DB', _gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci CALL merge_collation_func(_gb18030'高斯DB', _gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci CALL merge_collation_func(_gb18030'高斯DB', _gbk'高斯DB' collate gbk_chinese_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation,@concat_res_assign; -- utf8mb4_general_ci DROP procedure merge_collation_func; -- -- -- implicit collation && string create or replace procedure merge_collation_func(p1 text) @@ -657,7 +657,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(futf8_uni, p1) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(futf8_uni, p1) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -671,13 +671,13 @@ end; / CALL merge_collation_func('高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict CALL merge_collation_func(_gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation;; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation;; -- conflict CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation;; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation;; -- conflict DROP procedure merge_collation_func; -- -- -- implicit collation && string create or replace procedure merge_collation_func(p1 text) @@ -686,7 +686,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(p1, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(p1, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -701,13 +701,13 @@ end; CALL merge_collation_func('高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci CALL merge_collation_func(_gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_general_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_general_ci DROP procedure merge_collation_func; -- -- -- explicit collation && string create or replace procedure merge_collation_func(p1 text) @@ -716,7 +716,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(p1, fgbk_bin collate gbk_bin) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(p1, fgbk_bin collate gbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -731,13 +731,13 @@ end; CALL merge_collation_func('高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin CALL merge_collation_func(_gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin DROP procedure merge_collation_func; -- -- -- implicit collation && int create or replace procedure merge_collation_func(p1 int) @@ -746,7 +746,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(fgbk_bin, p1) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(fgbk_bin, p1) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -760,7 +760,7 @@ end; / CALL merge_collation_func(100); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin DROP procedure merge_collation_func; -- -- -- explicit collation && string create or replace procedure merge_collation_func(p1 text) @@ -769,7 +769,7 @@ DECLARE concat_res text; collation_res text; CURSOR c1 IS - SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns; + SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; begin OPEN c1; LOOP @@ -790,41 +790,41 @@ DROP procedure merge_collation_func; create or replace procedure merge_collation_func(p1 text) as begin - SELECT CONCAT(futf8_uni, p1) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; + SELECT CONCAT(futf8_uni, p1) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; end; / CALL merge_collation_func('高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- conflict +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- conflict CALL merge_collation_func(_gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_unicode_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_unicode_ci CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- utf8mb4_unicode_ci +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- utf8mb4_unicode_ci DROP procedure merge_collation_func; -- -- -- implicit collation create or replace procedure merge_collation_func(p1 text) as begin - SELECT CONCAT(p1, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; + SELECT CONCAT(p1, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; end; / CALL merge_collation_func('高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin CALL merge_collation_func('高斯DB' collate utf8mb4_general_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin CALL merge_collation_func(_gbk'高斯DB'); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin CALL merge_collation_func(_gbk'高斯DB' collate gbk_chinese_ci); -SELECT @concat_res,@collation_res,collation for(@concat_res) real_collation; -- gbk_bin +SELECT @concat_res,@collation_res,pg_collation_for(@concat_res) real_collation; -- gbk_bin DROP procedure merge_collation_func; -- -- -- explicit collation create or replace procedure merge_collation_func(p1 text) as begin - SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; + SELECT CONCAT(futf8_uni, p1 COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns into @concat_res,@collation_res; end; / @@ -833,54 +833,54 @@ DROP procedure merge_collation_func; -- -- concat for DERIVATION -- -- -- same charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci -SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci -SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, collation for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT('高斯DB', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, 123) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(123, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, NULL) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(NULL, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(futf8_bin, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci +SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci +SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- diff charset & diff DERIVATION -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- conflict in concat inside -SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, collation for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci -SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, collation for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci -SELECT CONCAT(CONCAT(futf8_gen, fgbk_chi), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, CONCAT(futf8_gen, fgbk_chi)) result, collation for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, opengauss_version()) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, '高斯DB') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT('高斯DB', fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, 123) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(123, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, NULL) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(NULL, fgbk_bin) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), CONCAT(fgbk_chi, fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(fgbk_chi, fgb18030_chi), CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(fgbk_chi, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside +SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) result, pg_collation_for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci +SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- gbk_chinese_ci +SELECT CONCAT(CONCAT(futf8_gen, fgbk_chi), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(futf8_uni, CONCAT(futf8_gen, fgbk_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict +SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT('高斯DB', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(123, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, DATE '2023-05-01') result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(DATE '2023-05-01', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, NULL) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(NULL, fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- test explicit collate on concat -- -- -- same charset & implicit collation -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- diff charset & implicit collation -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- with binary & implicit collation -SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, collation for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -- -- test explicit collate on blob result SELECT CAST('DEADBEEF' AS blob) COLLATE utf8mb4_general_ci result; -- ERROR @@ -889,43 +889,43 @@ SELECT CAST('DEADBEEF' AS blob) COLLATE "binary" result; -- -- case when -- -- -- condition same charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation -SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE WHEN (futf8_gen = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -- null collation -SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_gen) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- null collation +SELECT CASE futf8_gen WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- condition same charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (fgbk_chi = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (futf8_bin = futf8_uni) THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_chi WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE futf8_bin WHEN futf8_uni THEN (futf8_uni) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- -- -- condition diff charset & result same charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE futf8_uni WHEN fgbk_bin THEN (futf8_bin) ELSE (futf8_uni) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN fgb18030_bin THEN (fgbk_chi) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- condition diff charset & result diff charset & implicit collation -- -- -- -- bool condition -SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE WHEN (futf8_uni = fgbk_bin) THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = futf8_uni) THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE WHEN (fgbk_bin = fgb18030_bin) THEN (fgb18030_chi) ELSE (fgbk_chi) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- case condition -SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE futf8_uni WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgbk_bin WHEN futf8_uni THEN (futf8_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CASE fgb18030_chi WHEN fgbk_chi THEN (fgb18030_bin) ELSE (fgbk_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- -- conflict -SELECT CASE _gb18030'高斯' WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CASE _gb18030'高斯' WHEN fgbk_bin THEN (fgbk_bin) ELSE (futf8_bin) END result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- in -- -- -- column utf8 EXPLAIN (costs off) @@ -959,16 +959,16 @@ SELECT futf8_bin FROM t_diff_charset_columns t1 WHERE fgbk_chi in (SELECT t2.fgb SELECT futf8_bin FROM t_diff_charset_columns t1 WHERE fgbk_chi in (SELECT t2.fgb18030_bin FROM t_diff_charset_columns t2); -- ERROR -- -- COALESCE -SELECT COALESCE(fgbk_chi, futf8_bin) result, collation for(result) FROM t_diff_charset_columns; -SELECT COALESCE(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; -SELECT COALESCE(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict -SELECT COALESCE(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT COALESCE(fgbk_chi, futf8_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT COALESCE(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; +SELECT COALESCE(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict +SELECT COALESCE(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR -- -- GREATEST -SELECT GREATEST(fgbk_chi, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; -SELECT GREATEST(futf8_gen, futf8_bin) result, collation for (result) FROM t_diff_charset_columns; -SELECT GREATEST(futf8_uni, futf8_gen) result, collation for (result) FROM t_diff_charset_columns; -- conflict -SELECT GREATEST(fgbk_chi, fgb18030_chi) result, collation for (result) FROM t_diff_charset_columns; -- ERROR +SELECT GREATEST(fgbk_chi, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; +SELECT GREATEST(futf8_gen, futf8_bin) result, pg_collation_for (result) FROM t_diff_charset_columns; +SELECT GREATEST(futf8_uni, futf8_gen) result, pg_collation_for (result) FROM t_diff_charset_columns; -- conflict +SELECT GREATEST(fgbk_chi, fgb18030_chi) result, pg_collation_for (result) FROM t_diff_charset_columns; -- ERROR -- -- rowcompare SELECT futf8_bin, futf8_uni, fgbk_bin, fgbk_chi, fgb18030_bin, fgb18030_chi FROM t_diff_charset_columns @@ -1040,24 +1040,24 @@ select _utf8mb4'GS', GROUP_CONCAT(fgbk_bin, fgbk_chi), fgbk_bin FROM t_diff_char -- -- test COLLATE for function -- -- -- for string function -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE utf8mb4_general_ci result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE gbk_chinese_ci result, collation for(result); -- ERROR -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE utf8mb4_bin result, collation for(result); -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE gbk_chinese_ci result, collation for(result); -- ERROR -SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE "binary" result, collation for(result); -- ERROR -SELECT CONCAT(futf8_uni , futf8_gen) COLLATE utf8mb4_bin result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE utf8mb4_general_ci result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB' COLLATE gbk_chinese_ci) COLLATE gbk_chinese_ci result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE utf8mb4_bin result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE gbk_chinese_ci result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE "binary" result, pg_collation_for(result); -- ERROR +SELECT CONCAT(futf8_uni , futf8_gen) COLLATE utf8mb4_bin result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- for binary argument string function -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result, collation for(result); -- ERROR -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, collation for(result); -SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, collation for(result) FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result, collation for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, pg_collation_for(result); +SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- for convert function -SELECT CONVERT(futf8_uni USING 'GBK') result, collation for(result) FROM t_diff_charset_columns; +SELECT CONVERT(futf8_uni USING 'GBK') result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin result FROM t_diff_charset_columns; SELECT CONVERT(futf8_uni USING 'GBK') COLLATE "binary" result FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(CONVERT(futf8_uni USING 'GBK'), futf8_uni) result, collation for(result) FROM t_diff_charset_columns; -SELECT CONCAT(CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin, futf8_uni) result, collation for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONVERT(futf8_uni USING 'GBK'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; DROP TABLE t_diff_charset_columns; diff --git a/contrib/dolphin/sql/column_name.sql b/contrib/dolphin/sql/column_name.sql index 050dfc3ef..18cb9ebc7 100644 --- a/contrib/dolphin/sql/column_name.sql +++ b/contrib/dolphin/sql/column_name.sql @@ -12,7 +12,7 @@ select xxxx.column_name_case_test.t1.bBb from column_name_case_test.t1; drop table if exists a1; drop table if exists a2; select currenT_User into a1; -select session_UsEr into a2; +select current_UsEr into a2; --create use some index drop table if exists warehouse_t22; @@ -503,7 +503,7 @@ select xxxx.column_name_case_test.t1.bBb from column_name_case_test.t1; drop table if exists a1; drop table if exists a2; select currenT_User into a1; -select session_UsEr into a2; +select current_UsEr into a2; --create use some index drop table if exists warehouse_t22; diff --git a/contrib/dolphin/sql/db_b_date_time_functions.sql b/contrib/dolphin/sql/db_b_date_time_functions.sql index 983e9e21e..1898ec822 100644 --- a/contrib/dolphin/sql/db_b_date_time_functions.sql +++ b/contrib/dolphin/sql/db_b_date_time_functions.sql @@ -96,7 +96,6 @@ select now(7); set dolphin.b_db_timestamp = 0.0; -- test sysdate(precision) -select sysdate; select sysdate(); select sysdate(0); select sysdate(1); diff --git a/contrib/dolphin/sql/db_b_new_gram_test.sql b/contrib/dolphin/sql/db_b_new_gram_test.sql index 137fbf957..a24742ffd 100644 --- a/contrib/dolphin/sql/db_b_new_gram_test.sql +++ b/contrib/dolphin/sql/db_b_new_gram_test.sql @@ -481,7 +481,6 @@ insert into authid_t1(c1) values(1); select c1 as authid from authid_t1 as authid; drop table authid; drop table authid_t1; -create table body(body int); CREATE TABLE `Student` ( `Sno` varchar(3) NOT NULL, diff --git a/contrib/dolphin/sql/kwlist.sql b/contrib/dolphin/sql/kwlist.sql new file mode 100644 index 000000000..e05fa815c --- /dev/null +++ b/contrib/dolphin/sql/kwlist.sql @@ -0,0 +1,195 @@ +create schema keyword_test; +set search_path to keyword_test; +/* cast */ +create table cast(cast int); +insert into cast values(1); +select cast from cast; + +/* last_day */ +create table last_day(last_day int); +insert into last_day values(1); +select last_day from last_day; + +/* less */ +create table less(less int); +insert into less values(1); +select less from less; + +/* modify */ +create table modify(modify int); +insert into modify values(1); +select modify from modify; + +/* modify */ +create table modify(modify int); +insert into modify values(1); +select modify from modify; + +/* notnull */ +create table notnull(notnull int); +insert into notnull values(1); +select notnull from notnull; + +/* recyclebin */ +create table recyclebin(recyclebin int); +insert into recyclebin values(1); +select recyclebin from recyclebin; + +/* analyse */ +create table analyse(analyse int); +insert into analyse values(1); +select analyse from analyse; +-- analyse unsupported +analyse; +explain analyse select 1; +analyse verbose; +alter table analyse analyse /* unsupported analyse keyword*/ verbose partition all; + +/* buckets */ +create table buckets(buckets int); +insert into buckets values(1); +select buckets from buckets; +select * from buckets buckets(1,2,3,4) /* unsupported buckets keyword*/; +alter node group test copy buckets from test2 /* unsupported */; +create node group mergegroup1 with (datanode1, datanode3, datanode5); /* unsupported */ + +/* compact */ +create table compact(compact int); +insert into compact values(1); +select compact from compact; +analyze compact test/* table name */; +-- compact unsupported +create table test(id int) with (row_format=compact); + +/* rownum */ +create table rownum(rownum int); +insert into rownum values(1); +select rownum from rownum; +select rownum rownum from rownum; /* rownum can be alias name */ + +/* user */ +create table user(user int); +insert into user values(1); +select user from user; + +select user(); --success +select user; --failed +create user user identified by 'xxxx@xx1xx'; -- username user can be created +create user mapping for user server mot_server; -- username: user +create user mapping for current_user server mot_server; -- username: current_user +select count(distinct usename) from pg_user_mappings where usename in ('user', current_user()); -- expected: 2 +alter table user disable trigger user; -- success: disable user trigger +alter table user disable trigger "user"; -- failed: cant not find trigger(named user) + +/* sysdate */ +create table sysdate(sysdate int); +insert into sysdate values(1); +select sysdate from sysdate; + +/* body */ +create table body(body int); +insert into body values(1); +select body from body; + +/* collation */ +create table collation(collation int); +insert into collation values(1); +select collation from collation; + +/* minus */ +create table minus(minus int); +insert into minus values(1); +select minus from minus; + +/* any */ +create table any(any int); +insert into any values(1); +select any from any; + +/* do */ +create table do(do int); +insert into do values(1); +select do from do; +-- binary/prior do +create rule "test" as on select to t1 where binary do instead select * from t2; -- unsupported +create rule "test" as on select to t1 where prior do instead select * from t2; -- unsupported +create rule "test" as on select to t1 where binary do do instead select * from t2; -- t1 no exists +create rule "test" as on select to t1 where prior do do instead select * from t2; -- t1 no exists +create rule "test" as on select to t1 where (binary) do instead select * from t2; -- t1 no exists +create rule "test" as on select to t1 where (prior) do instead select * from t2; -- t1 no exists +create rule "test" as on select to t1 where test do instead select * from t2; -- t1 no exists +create rule "test" as on select to t1 where 1=1 do instead select * from t2; -- t1 no exists + +/* end */ +create table end(end int); +insert into end values(1); +select end from end; +select case when end then binary else binary end from end; -- unsupported +select case when end then binary else (binary) end from end; -- binary un exists +select case when end then binary end else binary end end from end; + +/* only */ +create table only(only int); +insert into only values(1); +select only from only; +select * from only test; -- scan only(alias: test) table +select * from only (test); -- scan test(scan InhOption: INH_NO) table + +/* verbose */ +create table verbose(verbose int); +insert into verbose values(1); +select verbose from verbose; +-- cluster +cluster verbose verbose; --unsupported +cluster verbose; --supported +cluster (verbose) verbose; --supported +cluster verbose partition (test); --supported +cluster (verbose) verbose partition (test); --supported +cluster; --supported +cluster (verbose); --supported +cluster (verbose) verbose on verbose; --supported +cluster verbose on verbose; --supported +-- vacuum +vacuum full verbose partition (test); --supported +vacuum full verbose subpartition (test); --supported +VACUUM full analyze verbose; --supported +vacuum (full,freeze,verbose) verbose; --supported +vacuum verbose; +vacuum full verbose; +vacuum full freeze verbose; --unsupported +vacuum full freeze verbose compact; --unsupported +-- analyse +analyze verbose; --supported +analyze verbose verbose; --unsupported +analyze (verbose) verbose; --supported +analyze verbose; --supported +analyze (verbose) verbose; --supported +analyze foreign tables; --supported +analyze (verbose) foreign tables; --supported +analyze verbose foreign tables; --unsupported + +create index verbose_index on verbose(verbose); + +/* excluded */ +create table excluded(excluded int); +insert into excluded values(1); +select excluded from excluded; +insert into excluded values (1) on duplicate key update excluded = excluded.excluded; +insert into excluded values (1) as excluded on duplicate key update excluded = excluded.excluded; -- ERROR +insert into excluded values (1) as t on duplicate key update excluded = excluded.excluded; + +-- name test +CREATE TABLE x (id int); +CREATE FUNCTION fn_x_before () RETURNS TRIGGER AS ' + BEGIN + NEW.e := ''before trigger fired''::text; + return NEW; + END; +' LANGUAGE plpgsql; +CREATE TRIGGER "user" AFTER INSERT ON x FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); -- unsupported name +CREATE TRIGGER test AFTER INSERT ON x FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); +ALTER TRIGGER test ON x RENAME TO user; -- unsupported name + +reset search_path; +drop schema keyword_test cascade; +drop user user cascade; \ No newline at end of file diff --git a/contrib/dolphin/sql/show_create.sql b/contrib/dolphin/sql/show_create.sql index 583bd3b38..dced4334e 100644 --- a/contrib/dolphin/sql/show_create.sql +++ b/contrib/dolphin/sql/show_create.sql @@ -190,7 +190,7 @@ drop view tt1; create view tt20v as select * from coalesce(1,2) as c, - collation for ('x'::text) col, + pg_collation_for ('x'::text) col, current_date as d, cast(1+2 as int4) as i4, cast(1+2 as int8) as i8; diff --git a/contrib/dolphin/sql/test_system_user.sql b/contrib/dolphin/sql/test_system_user.sql index a5bb529bd..58600e20b 100644 --- a/contrib/dolphin/sql/test_system_user.sql +++ b/contrib/dolphin/sql/test_system_user.sql @@ -1,8 +1,6 @@ create schema test_system_user; set current_schema to 'test_system_user'; -select session_user; select session_user(); -select user; select user(); select system_user(); drop schema test_system_user cascade; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 614eb844a..c7b3c8bd8 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -232,4 +232,6 @@ RETURNS timestamptz LANGUAGE SQL IMMUTABLE STRICT as CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_time(bit) RETURNS time without time zone LANGUAGE SQL IMMUTABLE STRICT as -'select cast(cast($1 as text) as time without time zone)'; \ No newline at end of file +'select cast(cast($1 as text) as time without time zone)'; + +create or replace function pg_catalog."user"() returns name as 'select current_user' LANGUAGE 'sql' IMMUTABLE; \ No newline at end of file -- Gitee From d95b109285896f12ff4969c0b962777a7cdcb513 Mon Sep 17 00:00:00 2001 From: totaj Date: Tue, 5 Dec 2023 11:40:05 +0800 Subject: [PATCH 099/434] Fix internal function bug. --- contrib/dolphin/Makefile | 2 +- contrib/dolphin/checklist/checklist.md | 1 + contrib/dolphin/plugin_postgres.cpp | 9 +- contrib/dolphin/plugin_utils/CMakeLists.txt | 2 +- contrib/dolphin/plugin_utils/Gen_fmgrtab.pl | 275 ++++++++++++++++++++ contrib/dolphin/plugin_utils/Makefile | 4 +- contrib/dolphin/plugin_utils/fmgr/fmgr.cpp | 33 ++- contrib/filelist.txt | 1 + 8 files changed, 319 insertions(+), 8 deletions(-) create mode 100644 contrib/dolphin/plugin_utils/Gen_fmgrtab.pl diff --git a/contrib/dolphin/Makefile b/contrib/dolphin/Makefile index 309b93d6b..28e39fd45 100644 --- a/contrib/dolphin/Makefile +++ b/contrib/dolphin/Makefile @@ -56,7 +56,7 @@ OBJS += $(vector)/vecfuncache.o OBJS += $(mb)/mbutils.o -OBJS += $(fmgr)/fmgr.o +OBJS += $(fmgr)/fmgr.o $(utils)/fmgrtab.o OBJS += $(executor)/execQual.o $(executor)/functions.o diff --git a/contrib/dolphin/checklist/checklist.md b/contrib/dolphin/checklist/checklist.md index 49945626c..ff6a0de79 100644 --- a/contrib/dolphin/checklist/checklist.md +++ b/contrib/dolphin/checklist/checklist.md @@ -98,6 +98,7 @@ |plugin_utils\adt|windowfuncs.cpp |src\common\backend\utils\adt\windowfuncs.cpp | |plugin_utils\fmgr|fmgr.cpp |src\common\backend\utils\fmgr\fmgr.cpp| |plugin_utils\mb|mbutils.cpp |src\common\backend\utils\mb\mbutils.cpp | +|plugin_utils|Gen_fmgrtab.pl |src\common\backend\utils\Gen_fmgrtab.pl | |plugin_vector|date.inl |src\gausskernel\runtime\vecexecutor\vecprimitive\date.inl | |plugin_vector|float.inl |src\gausskernel\runtime\vecexecutor\vecprimitive\float.inl | |plugin_vector|int4.inl |src\gausskernel\runtime\vecexecutor\vecprimitive\int4.inl | diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index 9c0f4b575..bda3cf292 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -140,6 +140,9 @@ extern void initBSQLBuiltinFuncs(); extern struct HTAB* b_nameHash; extern struct HTAB* b_oidHash; extern RegExternFunc b_plpgsql_function_table[3]; +extern int tmp_b_fmgr_nbuiltins; +extern FmgrBuiltin tmp_b_fmgr_builtins[]; + extern bool isAllTempObjects(Node* parse_tree, const char* query_string, bool sent_to_remote); extern void ts_check_feature_disable(); extern void ExecAlterDatabaseSetStmt(Node* parse_tree, const char* query_string, bool sent_to_remote); @@ -329,7 +332,11 @@ void _PG_init(void) if (b_oidHash == NULL || b_nameHash == NULL) { initBSQLBuiltinFuncs(); } - + if (b_fmgr_builtins == NULL) { + b_fmgr_builtins = tmp_b_fmgr_builtins; + pg_memory_barrier(); /* make sure b_fmgr_builtins has been assigned before b_fmgr_nbuiltins */ + b_fmgr_nbuiltins = tmp_b_fmgr_nbuiltins; + } AutoMutexLock nameHashLock(&gNameHashLock); nameHashLock.lock(); if (lockNameHash == NULL) diff --git a/contrib/dolphin/plugin_utils/CMakeLists.txt b/contrib/dolphin/plugin_utils/CMakeLists.txt index 2caf9bdfb..fa19c2f90 100644 --- a/contrib/dolphin/plugin_utils/CMakeLists.txt +++ b/contrib/dolphin/plugin_utils/CMakeLists.txt @@ -17,7 +17,7 @@ add_subdirectory(fmgr) add_subdirectory(mb) execute_process( - COMMAND perl ${PROJECT_SRC_DIR}/common/backend/utils/Gen_fmgrtab.pl ${CMAKE_CURRENT_SOURCE_DIR}/../include/builtin_funcs.ini + COMMAND perl ${CMAKE_CURRENT_SOURCE_DIR}/Gen_fmgrtab.pl ${CMAKE_CURRENT_SOURCE_DIR}/../include/builtin_funcs.ini WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) diff --git a/contrib/dolphin/plugin_utils/Gen_fmgrtab.pl b/contrib/dolphin/plugin_utils/Gen_fmgrtab.pl new file mode 100644 index 000000000..83ecdc145 --- /dev/null +++ b/contrib/dolphin/plugin_utils/Gen_fmgrtab.pl @@ -0,0 +1,275 @@ +#! /usr/bin/perl -w +#------------------------------------------------------------------------- +# +# Gen_fmgrtab.pl +# Perl script that generates fmgroids.h and fmgrtab.c from pg_builtin_proc.cpp +# +# Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# src/backend/utils/Gen_fmgrtab.pl +# +#------------------------------------------------------------------------- +use strict; +use warnings; + +# Collect arguments +my @infile; # builtin_funcs.ini +my $output_path = ''; +my %catalog; +$catalog{builtindata} = []; +my $nfuncs = 0; +my $nfmgrfuncs = 0; + +while (@ARGV) +{ + my $arg = shift @ARGV; + if ($arg !~ /^-/) + { + push @infile, $arg; + } + elsif ($arg =~ /^-o/) + { + $output_path = length($arg) > 2 ? substr($arg, 2) : shift @ARGV; + } + else + { + usage(); + } +} + +# Make sure output_path ends in a slash. +if ($output_path ne '' && substr($output_path, -1) ne '/') +{ + $output_path .= '/'; +} +foreach my $singlefile (@infile) +{ + open(INPUT_FILE, '<', $singlefile) || die "$singlefile: $!"; + + while () + { + s;/\*(.|\n)*\*/;;g; + if (m;/\*;) + { + # handle multi-line comments properly. + my $next_line = ; + die "$singlefile: ends within C-style comment\n" + if !defined $next_line; + $_ .= $next_line; + redo; + } + # Strip useless whitespace and trailing semicolons. + chomp; + s/^\s+//; + s/;\s*$//; + s/\s+/ /g; + if (/AddBuiltinFunc\((.+?)\)\)/) + { + push @{ $catalog{builtindata} }, $1; + $nfuncs = $nfuncs + 1; + } + } +} + +#parse all builtin functions +my @fmgr = (); +my $foid; #[0] +my $funcName; #[1] +my $nargs; #[2] +my $strict; #[3] +my $retset; #[4] +my $prosrc; #[25] +my $prorettype; #[6] + +foreach my $row (@{ $catalog{builtindata} }) +{ + if ($row =~ /_0\(([0-9A-Z]+)\),\s+_1\(\"(\S+)\"\),\s+_2\((\d+)\),\s+_3\((\w+)\),\s+_4\((\w+)\),\s+.+?_6\((\d+)\),.+?_25\(\"(\w+)\"\),/) + { + $foid = $1; + $funcName = $2; + $nargs = $3; + $strict = $4; + $retset = $5; + $prosrc = $7; + $prorettype = $6; + + push @fmgr, + { oid => $foid, + proname =>$funcName, + strict => $strict, + retset => $retset, + nargs => $nargs, + prosrc => $prosrc, + prorettype => $prorettype + }; + + } + + # Hack to work around memory leak in some versions of Perl + $row = undef; +} +# Emit headers for both files +my $tmpext = ".tmp$$"; +my $oidsfile = $output_path . 'fmgroids.h'; +my $tabfile = $output_path . 'fmgrtab.cpp'; +my $builtinfile = $output_path . 'pg_builtin_proc.h'; + +open H, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!"; +open T, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!"; +open B, '>', $builtinfile . $tmpext or die "Could not open $builtinfile$tmpext: $!"; + +print H +qq|/*------------------------------------------------------------------------- + * + * fmgroids.h + * Macros that define the OIDs of built-in functions. + * + * These macros can be used to avoid a catalog lookup when a specific + * fmgr-callable function needs to be referenced. + * + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * NOTES + * ****************************** + * *** DO NOT EDIT THIS FILE! *** + * ****************************** + * + * It has been GENERATED by $0 + * from $infile[0] + * + *------------------------------------------------------------------------- + */ +#ifndef FMGROIDS_H +#define FMGROIDS_H + +/* + * Constant macros for the OIDs of entries in pg_proc. + * + * NOTE: macros are named after the prosrc value, ie the actual C name + * of the implementing function, not the proname which may be overloaded. + * For example, we want to be able to assign different macro names to both + * char_text() and name_text() even though these both appear with proname + * 'text'. If the same C function appears in more than one pg_proc entry, + * its equivalent macro will be defined with the lowest OID among those + * entries. + */ +|; + +print T +qq|/*------------------------------------------------------------------------- + * + * fmgrtab.c + * The function manager's table of internal functions. + * + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * NOTES + * + * ****************************** + * *** DO NOT EDIT THIS FILE! *** + * ****************************** + * + * It has been GENERATED by $0 + * from $infile[0] + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "utils/fmgrtab.h" + +|; + +print B +qq|/*------------------------------------------------------------------------- + * + * pg_builtin_proc.h + * Macros that define the declares of built-in functions. + * + * NOTES + * ****************************** + * *** DO NOT EDIT THIS FILE! *** + * ****************************** + * + * It has been GENERATED by $0 + * from $infile[0] + * + *------------------------------------------------------------------------- + */ +|; + +# Emit #define's and extern's -- only one per prosrc value +my %seenit; +foreach my $s (sort { $a->{proname} cmp $b->{proname} } @fmgr) +{ + next if $seenit{ $s->{prosrc} }; + $seenit{ $s->{prosrc} } = 1; + print H "#define F_" . uc $s->{prosrc} . " $s->{oid}\n"; + print T "extern Datum $s->{prosrc} (PG_FUNCTION_ARGS);\n"; + print B "extern Datum $s->{prosrc} (PG_FUNCTION_ARGS);\n"; +} + +# Create the fmgr_builtins table +print T "\nFmgrBuiltin tmp_b_fmgr_builtins[] = {\n"; +foreach my $s (sort { $a->{prosrc} cmp $b->{prosrc} } @fmgr) +{ + print T +" { $s->{oid}, \"$s->{prosrc}\", $s->{nargs}, $s->{strict}, $s->{retset}, $s->{prosrc}, $s->{prorettype} },\n"; + $nfmgrfuncs = $nfmgrfuncs + 1; +} + +print H "\n#define nBuiltinFuncs $nfuncs\n"; +print H "\n#define NFMGRFUNCS $nfmgrfuncs\n"; + + +# And add the file footers. +print H "\n#endif /* FMGROIDS_H */\n"; + +print T +qq| /* dummy entry is easier than getting rid of comma after last real one */ + /* (not that there has ever been anything wrong with *having* a + comma after the last field in an array initializer) */ + { 0, NULL, 0, false, false, NULL, InvalidOid} +}; + +/* Note fmgr_nbuiltins excludes the dummy entry */ +int tmp_b_fmgr_nbuiltins = (sizeof(tmp_b_fmgr_builtins) / sizeof(FmgrBuiltin)) - 1; +|; + +close(H); +close(T); +close(B); + +# Finally, rename the completed files into place. +RenameTempFile($oidsfile, $tmpext); +RenameTempFile($tabfile, $tmpext); +RenameTempFile($builtinfile, $tmpext); + +sub RenameTempFile +{ + my $final_name = shift; + my $extension = shift; + my $temp_name = $final_name . $extension; + print "Writing $final_name\n"; + rename($temp_name, $final_name) || die "rename: $temp_name: $!"; +} + +sub usage +{ + die <. +EOM +} + +exit 0; diff --git a/contrib/dolphin/plugin_utils/Makefile b/contrib/dolphin/plugin_utils/Makefile index 5965d217d..343933ffd 100644 --- a/contrib/dolphin/plugin_utils/Makefile +++ b/contrib/dolphin/plugin_utils/Makefile @@ -31,9 +31,9 @@ $(SUBDIRS:%=%-recursive): fmgroids.h # see explanation in ../parser/Makefile fmgroids.h: fmgrtab.cpp - fmgrtab.cpp: $(top_builddir)/src/common/backend/utils/Gen_fmgrtab.pl $(top_builddir)/$(subdir)/../include/builtin_funcs.ini + fmgrtab.cpp: $(top_builddir)/$(subdir)/Gen_fmgrtab.pl $(top_builddir)/$(subdir)/../include/builtin_funcs.ini $(PERL) $< $(top_builddir)/$(subdir)/../include/builtin_funcs.ini - pg_builtin_proc.h: $(top_builddir)/src/common/backend/utils/Gen_fmgrtab.pl $(top_builddir)/$(subdir)/../include/builtin_funcs.ini + pg_builtin_proc.h: $(top_builddir)/$(subdir)/Gen_fmgrtab.pl $(top_builddir)/$(subdir)/../include/builtin_funcs.ini $(PERL) $< $(top_builddir)/$(subdir)/../include/builtin_funcs.ini clean maintainer-clean: diff --git a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp index d99f0150b..1b9cf7511 100644 --- a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp +++ b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp @@ -48,6 +48,17 @@ THR_LOCAL PGDLLIMPORT needs_fmgr_hook_type needs_fmgr_hook = NULL; THR_LOCAL PGDLLIMPORT fmgr_hook_type fmgr_hook = NULL; extern void InitFuncCallUDFInfo(FunctionCallInfoData* fcinfo, int argN, bool setFuncPtr); +#ifdef DOLPHIN +extern int tmp_b_fmgr_nbuiltins; +extern FmgrBuiltin tmp_b_fmgr_builtins[]; +#else +/* for dolphin and whale plugin */ +int a_fmgr_nbuiltins = -1; +int b_fmgr_nbuiltins = -1; +FmgrBuiltin *a_fmgr_builtins = NULL; +FmgrBuiltin *b_fmgr_builtins = NULL; +#endif + /* * Declaration for old-style function pointer type. This is now used only * in fmgr_oldstyle() and is no longer exported. @@ -202,14 +213,30 @@ const FmgrBuiltin* fmgr_isbuiltin(Oid id) */ static const FmgrBuiltin* fmgr_lookupByName(const char* name) { +#ifdef DOLPHIN + int nbuiltins = tmp_b_fmgr_nbuiltins; + const FmgrBuiltin *builtinfunc = tmp_b_fmgr_builtins; +#else + int nbuiltins = fmgr_nbuiltins; + const FmgrBuiltin *builtinfunc = fmgr_builtins; + if (CUR_THR_IS_WORKER() && IsNormalProcessingMode()) { + if (a_fmgr_nbuiltins > 0 && DB_IS_CMPT(A_FORMAT)) { + nbuiltins = a_fmgr_nbuiltins; + builtinfunc = a_fmgr_builtins; + } else if (b_fmgr_nbuiltins > 0 && DB_IS_CMPT(B_FORMAT)) { + nbuiltins = b_fmgr_nbuiltins; + builtinfunc = b_fmgr_builtins; + } + } +#endif int low = 0; - int high = fmgr_nbuiltins - 1; + int high = nbuiltins - 1; int ret; while (low <= high) { int i = (high + low) / 2; - ret = strcmp(name, fmgr_builtins[i].funcName); + ret = strcmp(name, builtinfunc[i].funcName); if (ret == 0) { - return fmgr_builtins + i; + return builtinfunc + i; } else if (ret > 0) { low = i + 1; } else { diff --git a/contrib/filelist.txt b/contrib/filelist.txt index c74440bb0..96fc9f307 100644 --- a/contrib/filelist.txt +++ b/contrib/filelist.txt @@ -253,3 +253,4 @@ windowfuncs.cpp year.cpp year.h alter.cpp +Gen_fmgrtab.pl -- Gitee From ea9271d19f49cb0a27bba201ef1168f7f8caf0c9 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Tue, 5 Dec 2023 18:35:49 +0800 Subject: [PATCH 100/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=95=B0=E5=80=BC?= =?UTF-8?q?=E8=BD=ACtime=E6=98=BE=E5=BC=8F=E8=A7=84=E5=88=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/conv_cast_test.out | 46 +++-- contrib/dolphin/include/plugin_utils/date.h | 1 + .../dolphin/plugin_parser/parse_coerce.cpp | 62 ++++++ contrib/dolphin/plugin_utils/adt/date.cpp | 191 +++++++++++++++++- .../dolphin/plugin_utils/adt/timestamp.cpp | 4 +- .../rollback_script/dolphin--3.0--2.0.sql | 14 ++ .../upgrade_script/dolphin--2.0--3.0.sql | 27 ++- 7 files changed, 320 insertions(+), 25 deletions(-) diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index 9255c2660..90b62f7a6 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -261,17 +261,15 @@ select '65535'::uint2::time; select '4294967295'::uint4::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time - time --------------- - 429497:13:35 + time +------ + (1 row) select '18446744073709551615'::uint8::time; -WARNING: invalid input syntax for type time: "18446744073709551615" -CONTEXT: referenced column: time - time ----------- - 00:00:00 + time +----------- + -00:00:01 (1 row) select '4294967295'::uint1::time; @@ -293,17 +291,17 @@ CONTEXT: referenced column: time select '4294967295'::uint4::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time - time --------------- - 429497:13:35 + time +------ + (1 row) select '4294967295'::uint8::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time - time --------------- - 429497:13:35 + time +------ + (1 row) select '4294967295'::int1::time; @@ -321,21 +319,31 @@ WARNING: value "4294967295" is out of range for type smallint LINE 1: select '4294967295'::int2::time; ^ CONTEXT: referenced column: time -ERROR: time out of range +WARNING: invalid input syntax for type time: "32767" CONTEXT: referenced column: time + time +------ + +(1 row) + select '4294967295'::int4::time; WARNING: value "4294967295" is out of range for type integer LINE 1: select '4294967295'::int4::time; ^ CONTEXT: referenced column: time -ERROR: time out of range +WARNING: invalid input syntax for type time: "2147483647" CONTEXT: referenced column: time + time +----------- + 838:59:59 +(1 row) + select '4294967295'::int8::time; WARNING: invalid input syntax for type time: "4294967295" CONTEXT: referenced column: time - time --------------- - 429497:13:35 + time +------ + (1 row) select conv('-211111111111111111111111111111111111111111111111111111111177777',10,8); diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index 40b73919e..2f834b67d 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -122,6 +122,7 @@ typedef enum extern Datum time_internal(PG_FUNCTION_ARGS, char* str, int is_time_sconst, TimeErrorType* time_error_type); +extern Datum int_cast_time_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull); char* parser_function_input(Datum txt, Oid oid); #endif diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index eb628c823..56c01e46e 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -87,6 +87,7 @@ static const doConvert convertFunctions[convertFunctionsCount] = {&String2Others #define CAST_ENUM_IDX 22 #define ENUM_CAST_IDX 19 #define CAST_SIGNED_IDX 16 +#define NUM_CAST_TIME_IDX 11 static const char* castFunction[CAST_FUNCTION_ROW][CAST_FUNCTION_COLUMN] = {{"i1_cast_ui1", "i1_cast_ui2", "i1_cast_ui4", "i1_cast_ui8"}, {"i2_cast_ui1", "i2_cast_ui2", "i2_cast_ui4", "i2_cast_ui8"}, @@ -119,6 +120,11 @@ static const char* enumCastFunction[ENUM_CAST_IDX] = {"enum_bit", "enum_int1", " "enum_set", "enum_uint1", "enum_uint2", "enum_uint4", "enum_uint8", "enum_year", "enum_varlena"}; +static const char* numCastTimeFunction[NUM_CAST_TIME_IDX] = {"int8_cast_time", "int16_cast_time", "int32_cast_time", + "int64_cast_time", "uint8_cast_time", "uint16_cast_time", + "uint32_cast_time", "uint64_cast_time", "float4_cast_time", + "float8_cast_time", "numeric_cast_time"}; + typedef enum { INVALID_COLUMN = -1, UINT1, @@ -188,6 +194,21 @@ typedef enum { S_TEXT, S_VARLENA } CastSignedIdx; + +typedef enum { + N_INVALID_IDX = -1, + N_INT1, + N_INT2, + N_INT4, + N_INT8, + N_UINT1, + N_UINT2, + N_UINT4, + N_UINT8, + N_FLOAT4, + N_FLOAT8, + N_NUMERIC +} NumCastIdx; #endif /* * @Description: same as get_element_type() except this reports error @@ -3314,6 +3335,45 @@ Oid findSignedExplicitCastFunction(Oid sourceTypeId, Oid funcid) return (idx == INVALID_IDX) ? funcid : get_func_oid(castSignedFunction[idx], PG_CATALOG_NAMESPACE, NULL); } +int findNumTimeFunctionIdx(Oid typeId) +{ + switch (typeId) { + case INT1OID: + return N_INT1; + case INT2OID: + return N_INT2; + case INT4OID: + return N_INT4; + case INT8OID: + return N_INT8; + case FLOAT4OID: + return N_FLOAT4; + case FLOAT8OID: + return N_FLOAT8; + case NUMERICOID: + return N_NUMERIC; + default: + break; + } + if (typeId == get_typeoid(PG_CATALOG_NAMESPACE, "uint1")) { + return N_UINT1; + } else if (typeId == get_typeoid(PG_CATALOG_NAMESPACE, "uint2")) { + return N_UINT2; + } else if (typeId == get_typeoid(PG_CATALOG_NAMESPACE, "uint4")) { + return N_UINT4; + } else if (typeId == get_typeoid(PG_CATALOG_NAMESPACE, "uint8")) { + return N_UINT8; + } else { + return N_INVALID_IDX; + } +} + +Oid findNumTimeExplicitCastFunction(Oid sourceTypeId, Oid funcid) +{ + int idx = findNumTimeFunctionIdx(sourceTypeId); + return (idx == INVALID_IDX) ? funcid : get_func_oid(numCastTimeFunction[idx], PG_CATALOG_NAMESPACE, NULL); +} + int findEnumFunctionIdx(Oid typeId) { switch (typeId) { @@ -3482,6 +3542,8 @@ void TryFindSpecifiedCastFunction(const Oid sourceTypeId, const Oid targetTypeId *funcId = findSignedExplicitCastFunction(sourceTypeId, defaultFuncId); } else if (sourceTypeId == BITOID) { *funcId = findBitCastTimeFunction(targetTypeId, defaultFuncId); + } else if (targetTypeId == TIMEOID) { + *funcId = findNumTimeExplicitCastFunction(sourceTypeId, defaultFuncId); } else { *funcId = findUnsignedExplicitCastFunction(targetTypeId, sourceTypeId, defaultFuncId); } diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 73feea1b8..d52cd2887 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -89,6 +89,31 @@ extern "C" DLL_PUBLIC Datum numeric_b_format_time(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(float8_b_format_time); extern "C" DLL_PUBLIC Datum float8_b_format_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int8_cast_time); +extern "C" DLL_PUBLIC Datum int8_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int16_cast_time); +extern "C" DLL_PUBLIC Datum int16_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int32_cast_time); +extern "C" DLL_PUBLIC Datum int32_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int64_cast_time); +extern "C" DLL_PUBLIC Datum int64_cast_time(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(uint8_cast_time); +extern "C" DLL_PUBLIC Datum uint8_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint16_cast_time); +extern "C" DLL_PUBLIC Datum uint16_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint32_cast_time); +extern "C" DLL_PUBLIC Datum uint32_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint64_cast_time); +extern "C" DLL_PUBLIC Datum uint64_cast_time(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(float4_cast_time); +extern "C" DLL_PUBLIC Datum float4_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(float8_cast_time); +extern "C" DLL_PUBLIC Datum float8_cast_time(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(numeric_cast_time); +extern "C" DLL_PUBLIC Datum numeric_cast_time(PG_FUNCTION_ARGS); + PG_FUNCTION_INFO_V1_PUBLIC(int8_b_format_date); extern "C" DLL_PUBLIC Datum int8_b_format_date(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(int16_b_format_date); @@ -1706,14 +1731,15 @@ char* parser_function_input(Datum txt, Oid oid) return DatumGetCString(OidOutputFunctionCall(typeOutput, txt)); } - /* * text_time_explicit, such as select cast('23:65:66' as time) * */ Datum text_time_explicit(PG_FUNCTION_ARGS) { - char* input_str = parser_function_input(PG_GETARG_DATUM(0), fcinfo->argTypes[0]); + char* input_str = fcinfo->argTypes[0] ? + parser_function_input(PG_GETARG_DATUM(0), fcinfo->argTypes[0]) : + PG_GETARG_CSTRING(0); TimeErrorType time_error_type = TIME_CORRECT; Datum datum_internal = time_internal(fcinfo, input_str, TEXT_TIME_EXPLICIT, &time_error_type); if (time_error_type == TIME_INCORRECT) { @@ -1988,6 +2014,42 @@ Datum float8_b_format_time(PG_FUNCTION_ARGS) return DirectFunctionCall3(time_in, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); } +Datum numeric_cast_time(PG_FUNCTION_ARGS) +{ + Numeric n = PG_GETARG_NUMERIC(0); + char *str = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(n))); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return datum_internal; +} + +Datum float8_cast_time(PG_FUNCTION_ARGS) +{ + float8 n = PG_GETARG_FLOAT8(0); + char *str = DatumGetCString(DirectFunctionCall1(float8out, Float8GetDatum(n))); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return datum_internal; +} + +Datum float4_cast_time(PG_FUNCTION_ARGS) +{ + float4 n = PG_GETARG_FLOAT4(0); + char *str = DatumGetCString(DirectFunctionCall1(float4out, Float4GetDatum(n))); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return datum_internal; +} + Datum uint8_b_format_time(PG_FUNCTION_ARGS) { return DirectFunctionCall1(uint64_b_format_time, UInt64GetDatum((uint64)PG_GETARG_UINT8(0))); @@ -2010,6 +2072,65 @@ Datum uint64_b_format_time(PG_FUNCTION_ARGS) return DirectFunctionCall3(time_in, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); } +Datum uint8_cast_time(PG_FUNCTION_ARGS) +{ + uint64 number = (uint64)PG_GETARG_UINT8(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum uint16_cast_time(PG_FUNCTION_ARGS) +{ + uint64 number = (uint64)PG_GETARG_UINT16(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum uint32_cast_time(PG_FUNCTION_ARGS) +{ + uint64 number = (uint64)PG_GETARG_UINT32(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum uint64_cast_time(PG_FUNCTION_ARGS) +{ + uint64 number = (uint64)PG_GETARG_UINT64(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum uint_cast_time_internal(PG_FUNCTION_ARGS, uint64 number, bool* isnull) +{ + char *str = DatumGetCString(DirectFunctionCall1(uint8out, UInt64GetDatum(number))); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + *isnull = true; + } + return datum_internal; +} + Datum int8_b_format_time(PG_FUNCTION_ARGS) { return DirectFunctionCall1(int32_b_format_time, Int32GetDatum((int32)PG_GETARG_INT8(0))); @@ -2024,6 +2145,7 @@ Datum int16_b_format_time(PG_FUNCTION_ARGS) Datum int32_b_format_time(PG_FUNCTION_ARGS) { int4 time = PG_GETARG_INT32(0); + int errlevel = (SQL_MODE_STRICT() && !fcinfo->can_ignore) ? ERROR : WARNING; TimeADT result; fsec_t fsec = 0; int dterr; @@ -2032,7 +2154,7 @@ Datum int32_b_format_time(PG_FUNCTION_ARGS) time *= sign; dterr = int32_b_format_time_internal(tm, false, time, &fsec); if (dterr) { - ereport(ERROR, + ereport(errlevel, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("time out of range"))); } tm2time(tm, 0, &result); @@ -2051,6 +2173,69 @@ Datum int64_b_format_time(PG_FUNCTION_ARGS) return DirectFunctionCall3(time_in, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); } +Datum int8_cast_time(PG_FUNCTION_ARGS) +{ + int64 number = (int64)PG_GETARG_INT8(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum int16_cast_time(PG_FUNCTION_ARGS) +{ + int64 number = (int64)PG_GETARG_INT16(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum int32_cast_time(PG_FUNCTION_ARGS) +{ + int64 number = (int64)PG_GETARG_INT32(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum int64_cast_time(PG_FUNCTION_ARGS) +{ + int64 number = PG_GETARG_INT64(0); + bool isnull = false; + Datum result = int_cast_time_internal(fcinfo, number, &isnull); + if (isnull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum int_cast_time_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull) +{ + if (number >= (int64)pow_of_10[10]) { /* datetime: 0001-00-00 00-00-00 */ + Datum datetime = DirectFunctionCall1(int64_b_format_datetime, Int64GetDatum(number)); + return DirectFunctionCall1(timestamp_time, datetime); + } + char *str = DatumGetCString(DirectFunctionCall1(int8out, Int64GetDatum(number))); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + *isnull = true; + } + return datum_internal; +} + static char* adjust_b_format_time(char *str, int *timeSign, int *D, bool *hasD) { *timeSign = 1; diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 339afcd80..ec3b55b83 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -1593,7 +1593,7 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr #endif { char* str = PG_GETARG_CSTRING(0); - + int errlevel = (SQL_MODE_STRICT() && !fcinfo->can_ignore) ? ERROR : WARNING; #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif @@ -1655,7 +1655,7 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr switch (dtype) { case DTK_DATE: if (tm2timestamp(tm, fsec, &tz, &result) != 0) - ereport(ERROR, + ereport(errlevel, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range: \"%s\"", str))); break; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index da442bcb0..8414fb4c1 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -1,3 +1,17 @@ +DROP FUNCTION IF EXISTS pg_catalog.int8_cast_time(int1); +DROP FUNCTION IF EXISTS pg_catalog.int16_cast_time(int2); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_time(int4); +DROP FUNCTION IF EXISTS pg_catalog.int64_cast_time(int8); + +DROP FUNCTION IF EXISTS pg_catalog.uint8_cast_time(uint1); +DROP FUNCTION IF EXISTS pg_catalog.uint16_cast_time(uint2); +DROP FUNCTION IF EXISTS pg_catalog.uint32_cast_time(uint4); +DROP FUNCTION IF EXISTS pg_catalog.uint64_cast_time(uint8); + +DROP FUNCTION IF EXISTS pg_catalog.float4_cast_time(float4); +DROP FUNCTION IF EXISTS pg_catalog.float8_cast_time(float8); +DROP FUNCTION IF EXISTS pg_catalog.numeric_cast_time(numeric); + DROP FUNCTION IF EXISTS pg_catalog.bit_cast_time(bit); DROP FUNCTION IF EXISTS pg_catalog.bit_cast_timestamp(bit); DROP FUNCTION IF EXISTS pg_catalog.bit_cast_datetime(bit); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 614eb844a..bb7c3ce57 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -232,4 +232,29 @@ RETURNS timestamptz LANGUAGE SQL IMMUTABLE STRICT as CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_time(bit) RETURNS time without time zone LANGUAGE SQL IMMUTABLE STRICT as -'select cast(cast($1 as text) as time without time zone)'; \ No newline at end of file +'select cast(cast($1 as text) as time without time zone)'; + +CREATE OR REPLACE FUNCTION pg_catalog.int8_cast_time(int1) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int8_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.int16_cast_time(int2) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int16_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.int32_cast_time(int4) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int32_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.int64_cast_time(int8) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int64_cast_time'; + +CREATE OR REPLACE FUNCTION pg_catalog.uint8_cast_time(uint1) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint8_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.uint16_cast_time(uint2) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint16_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.uint32_cast_time(uint4) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint32_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.uint64_cast_time(uint8) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint64_cast_time'; + +CREATE OR REPLACE FUNCTION pg_catalog.float4_cast_time(float4) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float4_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.float8_cast_time(float8) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float8_cast_time'; +CREATE OR REPLACE FUNCTION pg_catalog.numeric_cast_time(numeric) +RETURNS time without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'numeric_cast_time'; \ No newline at end of file -- Gitee From cc0a45529ec0e67d35ea1b9aad6dbc4a207774dd Mon Sep 17 00:00:00 2001 From: Julong-Li <584147810@qq.com> Date: Wed, 6 Dec 2023 14:37:53 +0800 Subject: [PATCH 101/434] =?UTF-8?q?issue=E4=BF=AE=E6=94=B9=EF=BC=9A?= =?UTF-8?q?=E5=85=BC=E5=AE=B9mysql=20convert('str'=20USING=20encoding)?= =?UTF-8?q?=E7=9A=84=E8=A1=A8=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/convert.out | 12 +++++ .../dolphin/expected/charset_utf8mb4_b_db.out | 24 +++++----- contrib/dolphin/plugin_utils/mb/mbutils.cpp | 46 +++++++++++++++---- contrib/dolphin/sql/builtin_funcs/convert.sql | 2 + 4 files changed, 62 insertions(+), 22 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/convert.out b/contrib/dolphin/expected/builtin_funcs/convert.out index 29b4022f9..48bd0e9bf 100644 --- a/contrib/dolphin/expected/builtin_funcs/convert.out +++ b/contrib/dolphin/expected/builtin_funcs/convert.out @@ -64,6 +64,18 @@ select convert('测试' using utf8); 测试 (1 row) +select convert('测试' using gbk); + convert +--------- + 测试 +(1 row) + +select convert('测试' using latin1); + convert +--------- + ?? +(1 row) + select convert(11.1, decimal(10,3)); numeric --------- diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index 33d4b3ddb..47851e472 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -3747,15 +3747,15 @@ LINE 1: SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result... ^ -- -- -- for convert function SELECT CONVERT(futf8_uni USING 'GBK') result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------+------------------ - 高斯db | gbk_chinese_ci + result | pg_collation_for +----------+------------------ + 楂樻柉db | gbk_chinese_ci (1 row) SELECT CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin result FROM t_diff_charset_columns; - result --------- - 高斯db + result +---------- + 楂樻柉db (1 row) SELECT CONVERT(futf8_uni USING 'GBK') COLLATE "binary" result FROM t_diff_charset_columns; -- ERROR @@ -3763,15 +3763,15 @@ ERROR: COLLATION "binary" is not valid for CHARACTER SET "GBK" LINE 1: SELECT CONVERT(futf8_uni USING 'GBK') COLLATE "binary" resul... ^ SELECT CONCAT(CONVERT(futf8_uni USING 'GBK'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------+-------------------- - 高斯db高斯db | utf8mb4_unicode_ci + result | pg_collation_for +----------------+-------------------- + 楂樻柉db高斯db | utf8mb4_unicode_ci (1 row) SELECT CONCAT(CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin, futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------+------------------ - 高斯db高斯db | gbk_bin + result | pg_collation_for +----------------+------------------ + 楂樻柉db高斯db | gbk_bin (1 row) DROP TABLE t_diff_charset_columns; diff --git a/contrib/dolphin/plugin_utils/mb/mbutils.cpp b/contrib/dolphin/plugin_utils/mb/mbutils.cpp index b1093a4f7..770300277 100644 --- a/contrib/dolphin/plugin_utils/mb/mbutils.cpp +++ b/contrib/dolphin/plugin_utils/mb/mbutils.cpp @@ -429,11 +429,25 @@ Datum pg_convert_to(PG_FUNCTION_ARGS) PG_RETURN_DATUM(result); } +/* + * pg_convert_to_text is designed to be compatible with the function convert('str' using 'encoding') in Mysql + * TEXT pg_convert_to_text(TEXT string, NAME dest_encoding_name) + * + * If a character in the input string has a corresponding encoding in both the original and + * destination encodings, the character remains unchanged. If there is no corresponding encoding + * in the destination encoding, the character is replaced with '?'. +*/ Datum pg_convert_to_text(PG_FUNCTION_ARGS) { char* string = PG_GETARG_CSTRING(0); char* dest_encoding_name = NameStr(*PG_GETARG_NAME(1)); int dest_encoding = pg_char_to_encoding(dest_encoding_name); + bool old_bulkload_compatible_illegal_chars = u_sess->cmd_cxt.bulkload_compatible_illegal_chars; + char* dest_str; + char* result_str; + int rc; + Datum result; + if (dest_encoding < 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -442,16 +456,28 @@ Datum pg_convert_to_text(PG_FUNCTION_ARGS) int src_encoding = GetDatabaseEncoding(); const char* src_str = VARDATA_ANY(string); - int len_src = VARSIZE_ANY_EXHDR(string);; - - char* dest_str = (char*)pg_do_encoding_conversion((unsigned char*)src_str, len_src, src_encoding, dest_encoding); - char* result = (char*)palloc(len_src + 1); - int rc = memcpy_s(result, len_src + 1, dest_str, len_src); - securec_check_c(rc, "\0", "\0"); - result[len_src] = '\0'; - text* ret = cstring_to_text(result); - if (NULL != ret) { - PG_RETURN_TEXT_P(ret); + int len_src = VARSIZE_ANY_EXHDR(string); + + u_sess->cmd_cxt.bulkload_compatible_illegal_chars = true; + PG_TRY(); + { + dest_str = (char*)pg_do_encoding_conversion((unsigned char*)src_str, len_src, src_encoding, dest_encoding); + result_str = (char*)palloc(len_src + 1); + rc = memcpy_s(result_str, len_src + 1, dest_str, len_src); + securec_check_c(rc, "\0", "\0"); + result_str[len_src] = '\0'; + result = DirectFunctionCall2(pg_convert_from, + CStringGetTextDatum((const char*)result_str), CStringGetDatum(dest_encoding_name)); + } + PG_CATCH(); + { + u_sess->cmd_cxt.bulkload_compatible_illegal_chars = old_bulkload_compatible_illegal_chars; + } + PG_END_TRY(); + u_sess->cmd_cxt.bulkload_compatible_illegal_chars = old_bulkload_compatible_illegal_chars; + + if (NULL != result) { + PG_RETURN_TEXT_P(result); } else { PG_RETURN_NULL(); diff --git a/contrib/dolphin/sql/builtin_funcs/convert.sql b/contrib/dolphin/sql/builtin_funcs/convert.sql index c899480bc..2f3ef1225 100644 --- a/contrib/dolphin/sql/builtin_funcs/convert.sql +++ b/contrib/dolphin/sql/builtin_funcs/convert.sql @@ -11,6 +11,8 @@ select convert(1 using 'gbk'); select convert(1 using 'utf8'); select convert('测试' using 'utf8'); select convert('测试' using utf8); +select convert('测试' using gbk); +select convert('测试' using latin1); select convert(11.1, decimal(10,3)); select convert(1 using decimal(10,3)); drop schema db_convert cascade; -- Gitee From 3c74fb96dc10878a11a485c52e142c57dad76f16 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 6 Dec 2023 15:41:16 +0800 Subject: [PATCH 102/434] sync server code 492ab0e6567a29d849934e0ccfbf35505106a1ed --- contrib/dolphin/expected/perf_schema_test.out | 6 + contrib/dolphin/include/builtin_funcs.ini | 8 + .../include/plugin_parser/parse_expr.h | 1 + .../dolphin/output/test_show_status.source | 15 ++ contrib/dolphin/plugin_parser/gram.y | 1 - .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 179 +++++++++++++++++- 6 files changed, 207 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/perf_schema_test.out b/contrib/dolphin/expected/perf_schema_test.out index b875cd0f2..d8ba0b505 100644 --- a/contrib/dolphin/expected/perf_schema_test.out +++ b/contrib/dolphin/expected/perf_schema_test.out @@ -74,12 +74,14 @@ SELECT * FROM performance_schema.events_waits_current limit 1; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; +WARNING: [SS] dms not init! count ------- --?.* (1 row) SELECT * FROM performance_schema.events_waits_summary_global_by_event_name limit 1; +WARNING: [SS] dms not init! --?.* --?.* --?.* @@ -98,12 +100,14 @@ SELECT * FROM performance_schema.events_waits_current limit 1; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; +WARNING: [SS] dms not init! count ------- --?.* (1 row) SELECT * FROM performance_schema.events_waits_summary_global_by_event_name limit 1; +WARNING: [SS] dms not init! --?.* --?.* --?.* @@ -159,6 +163,7 @@ SELECT COUNT(*) FROM performance_schema.events_waits_current; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; +WARNING: [SS] dms not init! count ------- --?.* @@ -171,6 +176,7 @@ SELECT COUNT(*) FROM performance_schema.events_waits_current; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; +WARNING: [SS] dms not init! count ------- --?.* diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index 71287de8a..b4101a0f2 100755 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -12967,3 +12967,11 @@ AddFuncGroup( "query_page_distribution_info", 1, AddBuiltinFunc(_0(2866), _1("query_page_distribution_info"), _2(3), _3(true), _4(true), _5(query_page_distribution_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(64), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, TEXTOID, INT4OID, INT4OID), _21(11, TEXTOID, INT4OID, INT4OID, INT4OID, BOOLOID, BOOLOID, BOOLOID, TEXTOID, OIDOID, OIDOID, BOOLOID), _22(11, 'i', 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(11, "relname", "fork", "blockno", "instance_id", "is_master", "is_owner", "is_copy", "lock_mode", "mem_lsn", "disk_lsn", "is_dirty"), _24(NULL), _25("query_page_distribution_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: query page distribution information "), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "query_node_reform_info_from_dms", 1, + AddBuiltinFunc(_0(2869), _1("query_node_reform_info_from_dms"), _2(1), _3(true), _4(true), _5(query_node_reform_info_from_dms), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(27), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, INT4OID), _21(3, INT4OID, TEXTOID, TEXTOID), _22(3, 'i', 'o', 'o'), _23(3, "TYPE", "NAME", "DESCRIPTION"), _24(NULL), _25("query_node_reform_info_from_dms"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("query node reform info from dms"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "query_all_drc_info", 1, + AddBuiltinFunc(_0(2870), _1("query_all_drc_info"), _2(1), _3(true), _4(true), _5(query_all_drc_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(27), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, INT4OID), _21(19, INT4OID, TEXTOID, INT4OID, INT8OID, INT4OID, INT4OID, INT4OID, INT4OID, INT4OID, INT4OID, INT2OID, INT8OID, INT8OID, INT2OID, INT4OID, INT4OID, INT4OID, INT4OID, INT4OID), _22(19, 'i', 'o', 'o', 'o','o','o','o','o','o','o', 'o', 'o', 'o','o','o','o','o','o','o'), _23(19, "TYPE", "RESOURCE_ID", "MASTER_ID", "COPY_INSTS", "CLAIMED_OWNER", "LOCK_MODE", "LAST_EDP", "TYPE", "IN_RECOVERY", "COPY_PROMOTE", "PART_ID", "EDP_MAP", "LSN", "LEN", "RECOVERY_SKIP", "RECYCLING", "CONVERTING_INST_ID", "CONVERTING_CURR_MODE", "CONVERTING_REQ_MODE"), _24(NULL), _25("query_all_drc_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("query all drc info"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), \ No newline at end of file diff --git a/contrib/dolphin/include/plugin_parser/parse_expr.h b/contrib/dolphin/include/plugin_parser/parse_expr.h index bc972fcaf..120db1188 100644 --- a/contrib/dolphin/include/plugin_parser/parse_expr.h +++ b/contrib/dolphin/include/plugin_parser/parse_expr.h @@ -38,5 +38,6 @@ extern void AdaptSWSelectStmt(ParseState *pstate, SelectStmt *stmt); extern bool IsQuerySWCBRewrite(Query *query); extern bool IsSWCBRewriteRTE(RangeTblEntry *rte); extern Datum GetTypeZeroValue(Form_pg_attribute att_tup); +typedef Datum (*getTypeZeroValueFunc)(Form_pg_attribute att_tup); #endif /* PARSE_EXPR_H */ diff --git a/contrib/dolphin/output/test_show_status.source b/contrib/dolphin/output/test_show_status.source index 2f30a50d5..9b7fd2064 100644 --- a/contrib/dolphin/output/test_show_status.source +++ b/contrib/dolphin/output/test_show_status.source @@ -2,18 +2,33 @@ create schema db_show_status; set current_schema to 'db_show_status'; set dolphin.sql_mode=''; \! gsql -d contrib_regression -r -p @portstring@ -c "show status" > /dev/null; echo $? +WARNING: [SS] dms not init! +CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement +PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY 0 \! gsql -d contrib_regression -r -p @portstring@ -c "show session status" > /dev/null; echo $? +WARNING: [SS] dms not init! +CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement +PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY 0 \! gsql -d contrib_regression -r -p @portstring@ -c "show global status" > /dev/null; echo $? +WARNING: [SS] dms not init! +CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement +PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY 0 show session status like 'buffers_backend'; +WARNING: [SS] dms not init! +CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement +PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY Variable_name | Value -----------------+------- --?.* --?.* show global status where variable_name = 'buffers_backend'; +WARNING: [SS] dms not init! +CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement +PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY Variable_name | Value -----------------+------- --?.* diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index da9cdb330..31da28f05 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -27463,7 +27463,6 @@ policy_label_name: opt_add_resources_to_label: ADD_P resources_to_label_list { $$ = $2; } - | /* EMPTY */ { $$ = NULL; } ; resources_to_label_list: diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index ffadfad3e..268f3ff91 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -82,6 +82,8 @@ #include "ddes/dms/ss_dms_bufmgr.h" #include "storage/file/fio_device.h" #include "ddes/dms/ss_dms_recovery.h" +#include "utils/json.h" +#include "utils/jsonapi.h" #include "plugin_postgres.h" #define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32*)&(var)))) @@ -4082,10 +4084,12 @@ Datum pg_backend_pid(PG_FUNCTION_ARGS) PG_RETURN_INT64(t_thrd.proc_cxt.MyProcPid); } +#ifdef DOLPHIN Datum connection_id(PG_FUNCTION_ARGS) { PG_RETURN_INT64(IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid); } +#endif Datum pg_current_userid(PG_FUNCTION_ARGS) { @@ -15157,7 +15161,7 @@ int compute_copy_insts_count(uint64 bitmap) /* this struct is used to control the iteration during query_page_distribution_info */ typedef struct st_dms_iterate { - stat_drc_info_t *drc_info; + dv_drc_buf_info *drc_info; uint8 iterate_idx; } dms_iterate_t; @@ -15183,7 +15187,7 @@ Datum query_page_distribution_info_internal(text* relname, ForkNumber fork, Bloc funcctx = SRF_FIRSTCALL_INIT(); MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - stat_drc_info_t *drc_info = (stat_drc_info_t*)palloc0(sizeof(stat_drc_info_t)); + dv_drc_buf_info *drc_info = (dv_drc_buf_info*)palloc0(sizeof(dv_drc_buf_info)); InitDmsBufContext(&drc_info->dms_ctx, tag); drc_info->claimed_owner = CM_INVALID_ID8; drc_info->buf_info[0] = buf_info; @@ -15277,6 +15281,177 @@ Datum query_page_distribution_info(PG_FUNCTION_ARGS) return query_page_distribution_info_internal(relname, fork, blockno, fcinfo); } +#define REFORM_INFO_ROW_NUM 27 +TupleDesc create_query_node_reform_info_from_dms_tupdesc() +{ + int column = 2; + + TupleDesc tupdesc = CreateTemplateTupleDesc(column, false); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "NAME", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "DESCRIPTION", TEXTOID, -1, 0); + BlessTupleDesc(tupdesc); + return tupdesc; +} + +typedef struct RecordsetState { + JsonLexContext *lex; + Datum row_info[REFORM_INFO_ROW_NUM][2]; + int row_id; + int cur_row_id; + char *saved_scalar; +} RecordsetState; + +static void recordset_object_field_end(void *state, char *fname, bool isnull) +{ + RecordsetState *_state = (RecordsetState *)state; + + if (_state->row_id >= REFORM_INFO_ROW_NUM) { + ereport(ERROR, (errmsg("the row number returned from dms exceeds max row number"))); + } + _state->row_info[_state->row_id][0] = CStringGetTextDatum(fname); + _state->row_info[_state->row_id][1] = CStringGetTextDatum(_state->saved_scalar); + _state->row_id++; +} + +static void recordset_scalar(void *state, char *token, JsonTokenType tokentype) +{ + RecordsetState *_state = (RecordsetState *)state; + + if (_state->saved_scalar != NULL) { + pfree(_state->saved_scalar); + } + _state->saved_scalar = token; +} + +Datum query_node_reform_info_from_dms(PG_FUNCTION_ARGS) +{ + dms_info_id_e reform_info_id = + PG_GETARG_INT64(0) == 0 ? dms_info_id_e::DMS_INFO_REFORM_LAST : dms_info_id_e::DMS_INFO_REFORM_CURRENT; + if (!ENABLE_DMS) { + ereport(ERROR, (errmsg("[SS] cannot query query_node_reform_info without shared storage deployment!"))); + } + + FuncCallContext *funcctx = NULL; + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + char *json = (char *)palloc0(4096 * sizeof(char)); // 4k is enough + json[0] = '\0'; + if (!dms_info(json, 4096, reform_info_id) == GS_SUCCESS) { + ereport(ERROR, (errmsg("[SS] get reform infomation from dms fail!"))); + } + if (json[0] == '\0') { + ereport(WARNING, (errmsg("[SS] dms not init!"))); + SRF_RETURN_DONE(funcctx); + } + + RecordsetState *state = (RecordsetState *)palloc0(sizeof(RecordsetState)); + state->row_id = 0; + JsonLexContext *lex = makeJsonLexContext(cstring_to_text(json), true); + pfree(json); + JsonSemAction *sem = (JsonSemAction *)palloc0(sizeof(JsonSemAction)); + sem->semstate = (void *)state; + sem->scalar = recordset_scalar; + sem->object_field_end = recordset_object_field_end; + state->lex = lex; + pg_parse_json(lex, sem); + + state->cur_row_id = 0; + funcctx->user_fctx = (void *)state; + funcctx->tuple_desc = create_query_node_reform_info_from_dms_tupdesc(); + MemoryContextSwitchTo(oldcontext); + } + funcctx = SRF_PERCALL_SETUP(); + + RecordsetState *state = (RecordsetState *)funcctx->user_fctx; + if (state->cur_row_id < state->row_id) { + bool nulls[2] = {false}; + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, state->row_info[state->cur_row_id], nulls); + state->cur_row_id++; + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + pfree_ext(state); + SRF_RETURN_DONE(funcctx); +} + +TupleDesc create_query_all_drc_info_tupdesc() +{ + int column = 18; + + TupleDesc tupdesc = CreateTemplateTupleDesc(column, false); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "RESOURCE_ID", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "MASTER_ID", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "COPY_INSTS", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "CLAIMED_OWNER", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "LOCK_MODE", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "LAST_EDP", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "TYPE", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)8, "IN_RECOVERY", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)9, "COPY_PROMOTE", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)10, "PART_ID", INT2OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)11, "EDP_MAP", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)12, "LSN", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)13, "LEN", INT2OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)14, "RECOVERY_SKIP", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)15, "RECYCLING", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)16, "CONVERTING_INST_ID", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)17, "CONVERTING_CURR_MODE", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)18, "CONVERTING_REQ_MODE", INT4OID, -1, 0); + BlessTupleDesc(tupdesc); + return tupdesc; +} + +void fill_drc_info_to_values(dv_drc_buf_info *drc_info, Datum *values) +{ + values[0] = CStringGetTextDatum(drc_info->data); + values[1] = UInt32GetDatum((uint32)drc_info->master_id); + values[2] = UInt64GetDatum(drc_info->copy_insts); + values[3] = UInt32GetDatum((uint32)drc_info->claimed_owner); + values[4] = UInt32GetDatum((uint32)drc_info->lock_mode); + values[5] = UInt32GetDatum((uint32)drc_info->last_edp); + values[6] = UInt32GetDatum((uint32)drc_info->type); + values[7] = UInt32GetDatum((uint32)drc_info->in_recovery); + values[8] = UInt32GetDatum((uint32)drc_info->copy_promote); + values[9] = UInt16GetDatum(drc_info->part_id); + values[10] = UInt64GetDatum(drc_info->edp_map); + values[11] = UInt64GetDatum(drc_info->lsn); + values[12] = UInt16GetDatum(drc_info->len); + values[13] = UInt32GetDatum((uint32)drc_info->recovery_skip); + values[14] = UInt32GetDatum((uint32)drc_info->recycling); + values[15] = UInt32GetDatum((uint32)drc_info->converting_req_info_inst_id); + values[16] = UInt32GetDatum((uint32)drc_info->converting_req_info_curr_mod); + values[17] = UInt32GetDatum((uint32)drc_info->converting_req_info_req_mod); +} + +Datum query_all_drc_info(PG_FUNCTION_ARGS) +{ + int type = PG_GETARG_INT64(0) == 0 ? en_drc_res_type::DRC_RES_PAGE_TYPE : en_drc_res_type::DRC_RES_LOCK_TYPE; + if (!ENABLE_DMS) { + ereport(ERROR, (errmsg("[SS] cannot query query_node_reform_info without shared storage deployment!"))); + } + + FuncCallContext *funcctx = NULL; + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + funcctx->tuple_desc = create_query_all_drc_info_tupdesc(); + MemoryContextSwitchTo(oldcontext); + } + funcctx = SRF_PERCALL_SETUP(); + + dv_drc_buf_info drc_info = {0}; + unsigned long long rowid = funcctx->call_cntr; + dms_get_buf_res(&rowid, &drc_info, type); + Datum values[18]; + bool nulls[18] = {false}; + if (drc_info.is_valid) { + fill_drc_info_to_values(&drc_info, values); + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + SRF_RETURN_DONE(funcctx); +} + #ifdef DOLPHIN PG_FUNCTION_INFO_V1_PUBLIC(gs_master_status); extern "C" DLL_PUBLIC Datum gs_master_status(PG_FUNCTION_ARGS); -- Gitee From 841d75728b4ff9c77641210efb7258d16ae81c90 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 6 Dec 2023 11:14:21 +0800 Subject: [PATCH 103/434] Add error message notice for identifier start with digit. --- contrib/dolphin/expected/db_b_parser4.out | 53 +++++++++++++++++++++++ contrib/dolphin/plugin_parser/scan.l | 16 ++++++- contrib/dolphin/sql/db_b_parser4.sql | 15 +++++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/db_b_parser4.out b/contrib/dolphin/expected/db_b_parser4.out index cdf1728c6..43ee7e01e 100644 --- a/contrib/dolphin/expected/db_b_parser4.out +++ b/contrib/dolphin/expected/db_b_parser4.out @@ -52,6 +52,59 @@ drop table if exists tb_db_b_parser_0002; drop table if exists tb_default_float; drop table if exists tb_default_double; drop table if exists tb_real_float; +--syntax error message +create table t1(123abc int); +ERROR: syntax error at or near "123". Please note that database object name should start with a letter or underscore (_). +LINE 1: create table t1(123abc int); + ^ +create table t1(123 int); +ERROR: syntax error at or near "123" +LINE 1: create table t1(123 int); + ^ +create table t1(123 abc int); +ERROR: syntax error at or near "123" +LINE 1: create table t1(123 abc int); + ^ +create table 123(abc int); +ERROR: syntax error at or near "123" +LINE 1: create table 123(abc int); + ^ +create table 123abc(abc int); +ERROR: syntax error at or near "123". Please note that database object name should start with a letter or underscore (_). +LINE 1: create table 123abc(abc int); + ^ +create table 123 abc(abc int); +ERROR: syntax error at or near "123" +LINE 1: create table 123 abc(abc int); + ^ +create table if not exists t1(123abc int); +ERROR: syntax error at or near "123". Please note that database object name should start with a letter or underscore (_). +LINE 1: create table if not exists t1(123abc int); + ^ +create table if not exists t1(123 int); +ERROR: syntax error at or near "123" +LINE 1: create table if not exists t1(123 int); + ^ +create table if not exists t1(123 abc int); +ERROR: syntax error at or near "123" +LINE 1: create table if not exists t1(123 abc int); + ^ +create table if not exists 123(abc int); +ERROR: syntax error at or near "123" +LINE 1: create table if not exists 123(abc int); + ^ +create table if not exists 123abc(abc int); +ERROR: syntax error at or near "123". Please note that database object name should start with a letter or underscore (_). +LINE 1: create table if not exists 123abc(abc int); + ^ +create table if not exists 123 abc(abc int); +ERROR: syntax error at or near "123" +LINE 1: create table if not exists 123 abc(abc int); + ^ +123abc; +ERROR: syntax error at or near "123". Please note that database object name should start with a letter or underscore (_). +LINE 1: 123abc; + ^ --bconst select 0b01; ?column? diff --git a/contrib/dolphin/plugin_parser/scan.l b/contrib/dolphin/plugin_parser/scan.l index ef51aa42e..f4244148a 100644 --- a/contrib/dolphin/plugin_parser/scan.l +++ b/contrib/dolphin/plugin_parser/scan.l @@ -1564,10 +1564,24 @@ scanner_yyerror(const char *message, core_yyscan_t yyscanner) } else { + /* for syntax error, if the error string is digit + alpha, add notice to user the database object nameing rule */ + int len = strlen(loc); + bool digit = true; + if (t_thrd.postgres_cxt.debug_query_string != NULL && isalpha(t_thrd.postgres_cxt.debug_query_string[*yylloc + len])) { + for (int i = 0; i < len; i++) { + if (!isdigit((unsigned char)(loc[i]))) { + digit = false; + break; + } + } + } else { + digit = false; + } ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), /* translator: first %s is typically the translation of "syntax error" */ - errmsg("%s at or near \"%s\"", _(message), loc), + errmsg("%s at or near \"%s\"%s", _(message), loc, + digit ? ". Please note that database object name should start with a letter or underscore (_)." : ""), lexer_errposition())); } } diff --git a/contrib/dolphin/sql/db_b_parser4.sql b/contrib/dolphin/sql/db_b_parser4.sql index 45d9dff26..70982fb1a 100644 --- a/contrib/dolphin/sql/db_b_parser4.sql +++ b/contrib/dolphin/sql/db_b_parser4.sql @@ -28,6 +28,21 @@ drop table if exists tb_default_float; drop table if exists tb_default_double; drop table if exists tb_real_float; +--syntax error message +create table t1(123abc int); +create table t1(123 int); +create table t1(123 abc int); +create table 123(abc int); +create table 123abc(abc int); +create table 123 abc(abc int); +create table if not exists t1(123abc int); +create table if not exists t1(123 int); +create table if not exists t1(123 abc int); +create table if not exists 123(abc int); +create table if not exists 123abc(abc int); +create table if not exists 123 abc(abc int); +123abc; + --bconst select 0b01; select 0b01+1; -- Gitee From d6d87194bfe0973bbfdb936b2f3823712b0d8fe9 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 6 Dec 2023 17:06:52 +0800 Subject: [PATCH 104/434] Fix testcase. --- contrib/dolphin/expected/db_b_parser3.out | 74 +++++++++---------- contrib/dolphin/expected/perf_schema_test.out | 6 -- .../dolphin/output/test_show_status.source | 15 ---- contrib/dolphin/sql/db_b_parser3.sql | 50 ++++++------- 4 files changed, 62 insertions(+), 83 deletions(-) diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index c56472890..d1eb7e535 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -202,53 +202,53 @@ WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' (1 row) select -length(random_bytes(`int1`)::binary), -length(random_bytes(`uint1`)::binary), -length(random_bytes(`int2`)::binary), -length(random_bytes(`uint2`)::binary), -length(random_bytes(`int4`)::binary), -length(random_bytes(`uint4`)::binary), -length(random_bytes(`int8`)::binary), -length(random_bytes(`uint8`)::binary), -length(random_bytes(`float4`)::binary), -length(random_bytes(`float8`)::binary), -length(random_bytes(`numeric`)::binary), -length(random_bytes(`bit1`)::binary), -length(random_bytes(`bit64`)::binary), -length(random_bytes(`boolean`)::binary), -length(random_bytes(`char`)::binary), -length(random_bytes(`varchar`)::binary), -length(random_bytes(`binary`)::binary), -length(random_bytes(`varbinary`)::binary), -length(random_bytes(`tinyblob`)::binary), -length(random_bytes(`blob`)::binary), -length(random_bytes(`mediumblob`)::binary), -length(random_bytes(`longblob`)::binary), -length(random_bytes(`text`)::binary), -length(random_bytes(`enum_t`)::binary), -length(random_bytes(`set_t`)::binary) +random_bytes(`int1`)::binary, +random_bytes(`uint1`)::binary, +random_bytes(`int2`)::binary, +random_bytes(`uint2`)::binary, +random_bytes(`int4`)::binary, +random_bytes(`uint4`)::binary, +random_bytes(`int8`)::binary, +random_bytes(`uint8`)::binary, +random_bytes(`float4`)::binary, +random_bytes(`float8`)::binary, +random_bytes(`numeric`)::binary, +random_bytes(`bit1`)::binary, +random_bytes(`bit64`)::binary, +random_bytes(`boolean`)::binary, +random_bytes(`char`)::binary, +random_bytes(`varchar`)::binary, +random_bytes(`binary`)::binary, +random_bytes(`varbinary`)::binary, +random_bytes(`tinyblob`)::binary, +random_bytes(`blob`)::binary, +random_bytes(`mediumblob`)::binary, +random_bytes(`longblob`)::binary, +random_bytes(`text`)::binary, +random_bytes(`enum_t`)::binary, +random_bytes(`set_t`)::binary from test_type_table; WARNING: invalid input syntax for type integer: "1.23a " -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type integer: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type double precision: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type double precision: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type double precision: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type double precision: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type double precision: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type double precision: "1.23a" -CONTEXT: referenced column: length +CONTEXT: referenced column: random_bytes WARNING: invalid input syntax for type integer: "1.23a" -CONTEXT: referenced column: length - length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length | length ---------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+-------- - 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 7 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 5 +CONTEXT: referenced column: random_bytes +--?.* +--?.* +--?.* (1 row) --error, cause value out of range diff --git a/contrib/dolphin/expected/perf_schema_test.out b/contrib/dolphin/expected/perf_schema_test.out index d8ba0b505..b875cd0f2 100644 --- a/contrib/dolphin/expected/perf_schema_test.out +++ b/contrib/dolphin/expected/perf_schema_test.out @@ -74,14 +74,12 @@ SELECT * FROM performance_schema.events_waits_current limit 1; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; -WARNING: [SS] dms not init! count ------- --?.* (1 row) SELECT * FROM performance_schema.events_waits_summary_global_by_event_name limit 1; -WARNING: [SS] dms not init! --?.* --?.* --?.* @@ -100,14 +98,12 @@ SELECT * FROM performance_schema.events_waits_current limit 1; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; -WARNING: [SS] dms not init! count ------- --?.* (1 row) SELECT * FROM performance_schema.events_waits_summary_global_by_event_name limit 1; -WARNING: [SS] dms not init! --?.* --?.* --?.* @@ -163,7 +159,6 @@ SELECT COUNT(*) FROM performance_schema.events_waits_current; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; -WARNING: [SS] dms not init! count ------- --?.* @@ -176,7 +171,6 @@ SELECT COUNT(*) FROM performance_schema.events_waits_current; (1 row) SELECT COUNT(*) FROM performance_schema.events_waits_summary_global_by_event_name; -WARNING: [SS] dms not init! count ------- --?.* diff --git a/contrib/dolphin/output/test_show_status.source b/contrib/dolphin/output/test_show_status.source index 9b7fd2064..2f30a50d5 100644 --- a/contrib/dolphin/output/test_show_status.source +++ b/contrib/dolphin/output/test_show_status.source @@ -2,33 +2,18 @@ create schema db_show_status; set current_schema to 'db_show_status'; set dolphin.sql_mode=''; \! gsql -d contrib_regression -r -p @portstring@ -c "show status" > /dev/null; echo $? -WARNING: [SS] dms not init! -CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement -PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY 0 \! gsql -d contrib_regression -r -p @portstring@ -c "show session status" > /dev/null; echo $? -WARNING: [SS] dms not init! -CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement -PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY 0 \! gsql -d contrib_regression -r -p @portstring@ -c "show global status" > /dev/null; echo $? -WARNING: [SS] dms not init! -CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement -PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY 0 show session status like 'buffers_backend'; -WARNING: [SS] dms not init! -CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement -PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY Variable_name | Value -----------------+------- --?.* --?.* show global status where variable_name = 'buffers_backend'; -WARNING: [SS] dms not init! -CONTEXT: PL/pgSQL function dbe_perf.get_global_wait_events() line 12 at FOR over EXECUTE statement -PL/pgSQL function show_status(boolean) line 6 at RETURN QUERY Variable_name | Value -----------------+------- --?.* diff --git a/contrib/dolphin/sql/db_b_parser3.sql b/contrib/dolphin/sql/db_b_parser3.sql index 110201e9f..11e39f8d0 100644 --- a/contrib/dolphin/sql/db_b_parser3.sql +++ b/contrib/dolphin/sql/db_b_parser3.sql @@ -100,31 +100,31 @@ rand(`json`)=rand(cast(`json` as signed)) from test_type_table; select -length(random_bytes(`int1`)::binary), -length(random_bytes(`uint1`)::binary), -length(random_bytes(`int2`)::binary), -length(random_bytes(`uint2`)::binary), -length(random_bytes(`int4`)::binary), -length(random_bytes(`uint4`)::binary), -length(random_bytes(`int8`)::binary), -length(random_bytes(`uint8`)::binary), -length(random_bytes(`float4`)::binary), -length(random_bytes(`float8`)::binary), -length(random_bytes(`numeric`)::binary), -length(random_bytes(`bit1`)::binary), -length(random_bytes(`bit64`)::binary), -length(random_bytes(`boolean`)::binary), -length(random_bytes(`char`)::binary), -length(random_bytes(`varchar`)::binary), -length(random_bytes(`binary`)::binary), -length(random_bytes(`varbinary`)::binary), -length(random_bytes(`tinyblob`)::binary), -length(random_bytes(`blob`)::binary), -length(random_bytes(`mediumblob`)::binary), -length(random_bytes(`longblob`)::binary), -length(random_bytes(`text`)::binary), -length(random_bytes(`enum_t`)::binary), -length(random_bytes(`set_t`)::binary) +random_bytes(`int1`)::binary, +random_bytes(`uint1`)::binary, +random_bytes(`int2`)::binary, +random_bytes(`uint2`)::binary, +random_bytes(`int4`)::binary, +random_bytes(`uint4`)::binary, +random_bytes(`int8`)::binary, +random_bytes(`uint8`)::binary, +random_bytes(`float4`)::binary, +random_bytes(`float8`)::binary, +random_bytes(`numeric`)::binary, +random_bytes(`bit1`)::binary, +random_bytes(`bit64`)::binary, +random_bytes(`boolean`)::binary, +random_bytes(`char`)::binary, +random_bytes(`varchar`)::binary, +random_bytes(`binary`)::binary, +random_bytes(`varbinary`)::binary, +random_bytes(`tinyblob`)::binary, +random_bytes(`blob`)::binary, +random_bytes(`mediumblob`)::binary, +random_bytes(`longblob`)::binary, +random_bytes(`text`)::binary, +random_bytes(`enum_t`)::binary, +random_bytes(`set_t`)::binary from test_type_table; --error, cause value out of range -- Gitee From c13f1b0cd57b14f5588944bb706110de50bf7331 Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Wed, 6 Dec 2023 21:32:36 +0800 Subject: [PATCH 105/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dhour=20minute=20secon?= =?UTF-8?q?d=E5=87=BD=E6=95=B0=E5=9C=A8=E8=BE=93=E5=85=A5=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E6=97=B6=E8=BF=94=E5=9B=9E=E5=80=BC=E4=B8=8Emysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs.out | 200 ++++++++++++++- contrib/dolphin/expected/db_b_parser2.out | 8 +- contrib/dolphin/plugin_utils/adt/date.cpp | 227 ++++++++++++++++-- .../rollback_script/dolphin--3.0--2.0.sql | 17 +- .../b_compatibility_time_funcs.sql | 24 ++ .../upgrade_script/dolphin--2.0--3.0.sql | 17 +- 6 files changed, 463 insertions(+), 30 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index 2e98ca833..bb90bde20 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -477,7 +477,7 @@ select * from func_test; subdate(time'838:59:59', interval 1.999 hour) | 836:59:59 subdate('2022-01-01 01:01:01', interval 1.999 minute) | 2022-01-01 00:59:01 subdate('2022-01-01 01:01:01', interval 1.999 second) | 2022-01-01 01:00:59.001 ---?.* +(156 rows) SELECT SUBDATE(time'839:59:59', interval 2 hour); ERROR: date/time field value out of range: "839:59:59" @@ -553,7 +553,7 @@ select hour('838:59:59'); (1 row) select hour('-840:59:59'); -WARNING: date/time field value out of range: "-840:59:59" +WARNING: Truncated incorrect time value: "-840:59:59" CONTEXT: referenced column: hour hour ------ @@ -561,7 +561,7 @@ CONTEXT: referenced column: hour (1 row) select hour('840:59:59'); -WARNING: date/time field value out of range: "840:59:59" +WARNING: Truncated incorrect time value: "840:59:59" CONTEXT: referenced column: hour hour ------ @@ -577,7 +577,7 @@ CONTEXT: referenced column: hour (1 row) select hour('abc'); -WARNING: invalid input syntax for type time: "abc" +WARNING: Truncated incorrect time value: "abc" CONTEXT: referenced column: hour hour ------ @@ -585,7 +585,7 @@ CONTEXT: referenced column: hour (1 row) select hour('1234abc'); -WARNING: invalid input syntax for type time: "1234abc" +WARNING: Truncated incorrect time value: "1234abc" CONTEXT: referenced column: hour hour ------ @@ -606,7 +606,7 @@ select hour('838:59:59'); (1 row) select hour('-840:59:59'); -WARNING: date/time field value out of range: "-840:59:59" +WARNING: Truncated incorrect time value: "-840:59:59" CONTEXT: referenced column: hour hour ------ @@ -614,7 +614,7 @@ CONTEXT: referenced column: hour (1 row) select hour('840:59:59'); -WARNING: date/time field value out of range: "840:59:59" +WARNING: Truncated incorrect time value: "840:59:59" CONTEXT: referenced column: hour hour ------ @@ -630,7 +630,7 @@ CONTEXT: referenced column: hour (1 row) select hour('abc'); -WARNING: invalid input syntax for type time: "abc" +WARNING: Truncated incorrect time value: "abc" CONTEXT: referenced column: hour hour ------ @@ -638,7 +638,7 @@ CONTEXT: referenced column: hour (1 row) select hour('1234abc'); -WARNING: invalid input syntax for type time: "1234abc" +WARNING: Truncated incorrect time value: "1234abc" CONTEXT: referenced column: hour hour ------ @@ -940,6 +940,188 @@ CONTEXT: referenced column: timestamp select timestamp'2022-05-05 16:60:10'; ERROR: date/time field value out of range: "2022-05-05 16:60:10" CONTEXT: referenced column: timestamp +SET dolphin.sql_mode TO 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length'; +SET dolphin.b_compatibility_mode TO on; +\pset null '' +CREATE TABLE t1 (f1 date); +INSERT INTO t1 values('2007-07-19'), (NULL); +SELECT * FROM t1; + f1 +------------ + 2007-07-19 + +(2 rows) + +SELECT HOUR(f1), MINUTE(f1), SECOND(f1), MICROSECOND(f1) FROM t1; + hour | minute | second | microsecond +--------+--------+--------+------------- + 0 | 0 | 0 | 0 + | | | +(2 rows) + +DROP TABLE t1; +select hour('11:11:11.234'), minute('11:11:11.234'), second('11:11:11.234'), microsecond('11:11:11.234'); + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour('11:11:11.234-7'), minute('11:11:11.234-7'), second('11:11:11.234-7'), microsecond('11:11:11.234-7'); +WARNING: Truncated incorrect time value: "11:11:11.234-7" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "11:11:11.234-7" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "11:11:11.234-7" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "11:11:11.234-7" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour(timetz '11:11:11.234-7'), minute(timetz '11:11:11.234-7'), second(timetz '11:11:11.234-7'), microsecond(timetz '11:11:11.234-7'); + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour('2007-07-19'), minute('2007-07-19'), second('2007-07-19'), microsecond('2007-07-19'); +WARNING: Truncated incorrect time value: "2007-07-19" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "2007-07-19" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "2007-07-19" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "2007-07-19" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 0 | 20 | 7 | 0 +(1 row) + +select hour('2007-07-19 11:11:11.234'), minute('2007-07-19 11:11:11.234'), second('2007-07-19 11:11:11.234'), microsecond('2007-07-19 11:11:11.234'); + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour('2007-07-19 11:11:11.234-7'), minute('2007-07-19 11:11:11.234-7'), second('2007-07-19 11:11:11.234-7'), microsecond('2007-07-19 11:11:11.234-7'); +WARNING: Truncated incorrect time value: "2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour(timestamptz '2007-07-19 11:11:11.234-7'), minute(timestamptz '2007-07-19 11:11:11.234-7'), second(timestamptz '2007-07-19 11:11:11.234-7'), microsecond(timestamptz '2007-07-19 11:11:11.234-7'); + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour('-2007-07-19'), minute('-2007-07-19'), second('-2007-07-19'), microsecond('-2007-07-19'); +WARNING: Truncated incorrect time value: "-2007-07-19" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "-2007-07-19" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "-2007-07-19" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "-2007-07-19" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 0 | 20 | 7 | 0 +(1 row) + +select hour('-2007-07-19 11:11:11.234'), minute('-2007-07-19 11:11:11.234'), second('-2007-07-19 11:11:11.234'), microsecond('-2007-07-19 11:11:11.234'); + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour('-2007-07-19 11:11:11.234-7'), minute('-2007-07-19 11:11:11.234-7'), second('-2007-07-19 11:11:11.234-7'), microsecond('-2007-07-19 11:11:11.234-7'); +WARNING: Truncated incorrect time value: "-2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "-2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "-2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "-2007-07-19 11:11:11.234-7" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 11 | 11 | 11 | 234000 +(1 row) + +select hour(timestamptz '-2007-07-19 11:11:11.234-7'), minute(timestamptz '-2007-07-19 11:11:11.234-7'), second(timestamptz '-2007-07-19 11:11:11.234-7'), microsecond(timestamptz '-2007-07-19 11:11:11.234-7'); +WARNING: time zone displacement out of range: "-2007-07-19 11:11:11.234-7" +LINE 1: select hour(timestamptz '-2007-07-19 11:11:11.234-7'), minut... + ^ +CONTEXT: referenced column: hour +WARNING: time zone displacement out of range: "-2007-07-19 11:11:11.234-7" +LINE 1: ...'-2007-07-19 11:11:11.234-7'), minute(timestamptz '-2007-07-... + ^ +CONTEXT: referenced column: minute +WARNING: time zone displacement out of range: "-2007-07-19 11:11:11.234-7" +LINE 1: ...'-2007-07-19 11:11:11.234-7'), second(timestamptz '-2007-07-... + ^ +CONTEXT: referenced column: second +WARNING: time zone displacement out of range: "-2007-07-19 11:11:11.234-7" +LINE 1: ...7-07-19 11:11:11.234-7'), microsecond(timestamptz '-2007-07-... + ^ +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +--------+--------+--------+------------- + | | | +(1 row) + +select hour('202014'), minute('202014'), second('202014'), microsecond('202014'); + hour | minute | second | microsecond +------+--------+--------+------------- + 20 | 20 | 14 | 0 +(1 row) + +select hour('209614'), minute('209614'), second('209614'), microsecond('209614'); + hour | minute | second | microsecond +--------+--------+--------+------------- + | | | +(1 row) + +select hour('abcdefg'), minute('abcdefg'), second('abcdefg'), microsecond('abcdefg'); +WARNING: Truncated incorrect time value: "abcdefg" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "abcdefg" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "abcdefg" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "abcdefg" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 0 | 0 | 0 | 0 +(1 row) + +select hour('abcdefghijklmnopqrstuv'), minute('abcdefghijklmnopqrstuv'), second('abcdefghijklmnopqrstuv'), microsecond('abcdefghijklmnopqrstuv'); +WARNING: Truncated incorrect time value: "abcdefghijklmnopqrstuv" +CONTEXT: referenced column: hour +WARNING: Truncated incorrect time value: "abcdefghijklmnopqrstuv" +CONTEXT: referenced column: minute +WARNING: Truncated incorrect time value: "abcdefghijklmnopqrstuv" +CONTEXT: referenced column: second +WARNING: Truncated incorrect time value: "abcdefghijklmnopqrstuv" +CONTEXT: referenced column: microsecond + hour | minute | second | microsecond +------+--------+--------+------------- + 0 | 0 | 0 | 0 +(1 row) + reset dolphin.sql_mode; drop schema b_time_funcs cascade; NOTICE: drop cascades to table func_test diff --git a/contrib/dolphin/expected/db_b_parser2.out b/contrib/dolphin/expected/db_b_parser2.out index 29c67960e..ec28245af 100644 --- a/contrib/dolphin/expected/db_b_parser2.out +++ b/contrib/dolphin/expected/db_b_parser2.out @@ -176,7 +176,7 @@ select second('373839'); (1 row) select second('2022-10-09'); -WARNING: invalid input syntax for type time: "2022-10-09" +WARNING: Truncated incorrect time value: "2022-10-09" CONTEXT: referenced column: second second -------- @@ -220,7 +220,7 @@ select minute('373839'); (1 row) select minute('2022-10-09'); -WARNING: invalid input syntax for type time: "2022-10-09" +WARNING: Truncated incorrect time value: "2022-10-09" CONTEXT: referenced column: minute minute -------- @@ -264,7 +264,7 @@ select microsecond('373839'); (1 row) select microsecond('2022-10-09'); -WARNING: invalid input syntax for type time: "2022-10-09" +WARNING: Truncated incorrect time value: "2022-10-09" CONTEXT: referenced column: microsecond microsecond ------------- @@ -308,7 +308,7 @@ select hour('373839'); (1 row) select hour('2022-10-09'); -WARNING: invalid input syntax for type time: "2022-10-09" +WARNING: Truncated incorrect time value: "2022-10-09" CONTEXT: referenced column: hour hour ------ diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 73feea1b8..dd2017e51 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -173,6 +173,30 @@ PG_FUNCTION_INFO_V1_PUBLIC(GetMinute); extern "C" DLL_PUBLIC Datum GetMinute(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(GetSecond); extern "C" DLL_PUBLIC Datum GetSecond(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetHourFromDate); +extern "C" DLL_PUBLIC Datum GetHourFromDate(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetMicrosecondFromDate); +extern "C" DLL_PUBLIC Datum GetMicrosecondFromDate(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetMinuteFromDate); +extern "C" DLL_PUBLIC Datum GetMinuteFromDate(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetSecondFromDate); +extern "C" DLL_PUBLIC Datum GetSecondFromDate(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetHourFromTimeTz); +extern "C" DLL_PUBLIC Datum GetHourFromTimeTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetMicrosecondFromTimeTz); +extern "C" DLL_PUBLIC Datum GetMicrosecondFromTimeTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetMinuteFromTimeTz); +extern "C" DLL_PUBLIC Datum GetMinuteFromTimeTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetSecondFromTimeTz); +extern "C" DLL_PUBLIC Datum GetSecondFromTimeTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetHourFromTimestampTz); +extern "C" DLL_PUBLIC Datum GetHourFromTimestampTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetMicrosecondFromTimestampTz); +extern "C" DLL_PUBLIC Datum GetMicrosecondFromTimestampTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetMinuteFromTimestampTz); +extern "C" DLL_PUBLIC Datum GetMinuteFromTimestampTz(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(GetSecondFromTimestampTz); +extern "C" DLL_PUBLIC Datum GetSecondFromTimestampTz(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(get_format); extern "C" DLL_PUBLIC Datum get_format(PG_FUNCTION_ARGS); @@ -5655,7 +5679,7 @@ Datum adddate_time_interval(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } -static inline Datum GetSepecificPartOfTime(PG_FUNCTION_ARGS, const char *part) +static inline Datum GetSpecificPartOfTime(PG_FUNCTION_ARGS, int part) { char *tString = text_to_cstring(PG_GETARG_TEXT_PP(0)); int errlevel = (SQL_MODE_STRICT() ? ERROR : WARNING); @@ -5665,37 +5689,210 @@ static inline Datum GetSepecificPartOfTime(PG_FUNCTION_ARGS, const char *part) errmsg("invalid input syntax for type time: \"%s\"", tString))); PG_RETURN_NULL(); } - TimeADT tm; - if (time_in_without_overflow(tString, &tm, fcinfo->can_ignore)) { - if (tm < 0) { - tm *= -1; - } - return DirectFunctionCall2(time_part, CStringGetTextDatum(part), TimeADTGetDatum(tm)); + struct pg_tm tt; + struct pg_tm *tm = &tt; + fsec_t fsec = 0; + int timeSign = 1; + bool warnings; + int tm_type; + bool null_func_result = false; + float8 result = 0; + if (!cstring_to_time(tString, tm, fsec, timeSign, tm_type, warnings, &null_func_result) || null_func_result) { + PG_RETURN_NULL(); + } + if (warnings) { + int errlevel = (SQL_MODE_STRICT() || null_func_result) ? ERROR : WARNING; + ereport(errlevel, + (errcode(DTERR_BAD_FORMAT), errmsg("Truncated incorrect time value: \"%s\"", tString))); + } + switch (part) { + case HOUR: + result = tm->tm_hour; + break; + case MINUTE: + result = tm->tm_min; + break; + case SECOND: + result = tm->tm_sec; + break; + case MICROSECOND: + result = fsec; + break; + default: + break; } - Timestamp ts = DatumGetTimestamp( - DirectFunctionCall3(timestamp_in, CStringGetDatum(tString), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); - pfree(tString); - return DirectFunctionCall2(timestamp_part, CStringGetTextDatum(part), TimestampGetDatum(ts)); + PG_RETURN_FLOAT8(result); } Datum GetHour(PG_FUNCTION_ARGS) { - return GetSepecificPartOfTime(fcinfo, "hour"); + return GetSpecificPartOfTime(fcinfo, HOUR); } Datum GetMicrosecond(PG_FUNCTION_ARGS) { - return GetSepecificPartOfTime(fcinfo, "microsecond"); + return GetSpecificPartOfTime(fcinfo, MICROSECOND); } Datum GetMinute(PG_FUNCTION_ARGS) { - return GetSepecificPartOfTime(fcinfo, "minute"); + return GetSpecificPartOfTime(fcinfo, MINUTE); } Datum GetSecond(PG_FUNCTION_ARGS) { - return GetSepecificPartOfTime(fcinfo, "second"); + return GetSpecificPartOfTime(fcinfo, SECOND); +} + +static Datum GetSpecificPartOfTimeInDate(PG_FUNCTION_ARGS, int part) +{ + DateADT dateVal = PG_GETARG_DATEADT(0); + fsec_t fsec; + pg_tm tt; + pg_tm* tm = &tt; + float8 result = 0; + + if (timestamp2tm(date2timestamp(dateVal), NULL, tm, &fsec, NULL, NULL) == 0) { + switch (part) { + case HOUR: + result = tm->tm_hour; + break; + case MINUTE: + result = tm->tm_min; + break; + case SECOND: + result = tm->tm_sec; + break; + case MICROSECOND: + result = fsec; + break; + default: + break; + } + } else { + ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + } + PG_RETURN_FLOAT8(result); +} + +Datum GetHourFromDate(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInDate(fcinfo, HOUR); +} + +Datum GetMicrosecondFromDate(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInDate(fcinfo, MICROSECOND); +} + +Datum GetMinuteFromDate(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInDate(fcinfo, MINUTE); +} + +Datum GetSecondFromDate(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInDate(fcinfo, SECOND); +} + +static Datum GetSpecificPartOfTimeInTimeTz(PG_FUNCTION_ARGS, int part) +{ + TimeTzADT* time = PG_GETARG_TIMETZADT_P(0); + pg_tm tt; + pg_tm* tm = &tt; + fsec_t fsec; + int tz; + timetz2tm(time, tm, &fsec, &tz); + float8 result = 0; + + switch (part) { + case HOUR: + result = tm->tm_hour; + break; + case MINUTE: + result = tm->tm_min; + break; + case SECOND: + result = tm->tm_sec; + break; + case MICROSECOND: + result = fsec; + break; + default: + break; + } + PG_RETURN_FLOAT8(result); +} + +Datum GetHourFromTimeTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimeTz(fcinfo, HOUR); +} + +Datum GetMicrosecondFromTimeTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimeTz(fcinfo, MICROSECOND); +} + +Datum GetMinuteFromTimeTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimeTz(fcinfo, MINUTE); +} + +Datum GetSecondFromTimeTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimeTz(fcinfo, SECOND); +} + +static Datum GetSpecificPartOfTimeInTimestampTz(PG_FUNCTION_ARGS, int part) +{ + TimestampTz time = PG_GETARG_TIMESTAMPTZ(0); + pg_tm tt; + pg_tm* tm = &tt; + fsec_t fsec; + int tz; + float8 result = 0; + if (timestamp2tm(time, &tz, tm, &fsec, NULL, NULL) == 0) { + switch (part) { + case HOUR: + result = tm->tm_hour; + break; + case MINUTE: + result = tm->tm_min; + break; + case SECOND: + result = tm->tm_sec; + break; + case MICROSECOND: + result = fsec; + break; + default: + break; + } + } else { + ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + } + PG_RETURN_FLOAT8(result); +} + +Datum GetHourFromTimestampTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimestampTz(fcinfo, HOUR); +} + +Datum GetMicrosecondFromTimestampTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimestampTz(fcinfo, MICROSECOND); +} + +Datum GetMinuteFromTimestampTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimestampTz(fcinfo, MINUTE); +} + +Datum GetSecondFromTimestampTz(PG_FUNCTION_ARGS) +{ + return GetSpecificPartOfTimeInTimestampTz(fcinfo, SECOND); } bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag, bool vertify_time) diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 59daea8ae..a51ad37da 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -142,4 +142,19 @@ DROP FUNCTION IF EXISTS pg_catalog.timestamp_bool(timestamp(0) without time zone DROP FUNCTION IF EXISTS pg_catalog.date_format (time without time zone, text); DROP FUNCTION IF EXISTS pg_catalog.to_char(time without time zone, text); -drop function IF EXISTS pg_catalog."user"(); \ No newline at end of file +drop function IF EXISTS pg_catalog."user"(); + +DROP FUNCTION IF EXISTS pg_catalog.hour(date); +DROP FUNCTION IF EXISTS pg_catalog.microsecond(date); +DROP FUNCTION IF EXISTS pg_catalog.minute(date); +DROP FUNCTION IF EXISTS pg_catalog.second(date); + +DROP FUNCTION IF EXISTS pg_catalog.hour(timetz); +DROP FUNCTION IF EXISTS pg_catalog.microsecond(timetz); +DROP FUNCTION IF EXISTS pg_catalog.minute(timetz); +DROP FUNCTION IF EXISTS pg_catalog.second(timetz); + +DROP FUNCTION IF EXISTS pg_catalog.hour(timestamptz); +DROP FUNCTION IF EXISTS pg_catalog.microsecond(timestamptz); +DROP FUNCTION IF EXISTS pg_catalog.minute(timestamptz); +DROP FUNCTION IF EXISTS pg_catalog.second(timestamptz); \ No newline at end of file diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql index 157d71ff7..5f40f3ff6 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs.sql @@ -316,6 +316,30 @@ select timestamp'2022-05'; select timestamp'2022-15-05 16:20:10'; select timestamp'2022-05-05 16:60:10'; +SET dolphin.sql_mode TO 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length'; +SET dolphin.b_compatibility_mode TO on; +\pset null '' +CREATE TABLE t1 (f1 date); +INSERT INTO t1 values('2007-07-19'), (NULL); +SELECT * FROM t1; +SELECT HOUR(f1), MINUTE(f1), SECOND(f1), MICROSECOND(f1) FROM t1; +DROP TABLE t1; +select hour('11:11:11.234'), minute('11:11:11.234'), second('11:11:11.234'), microsecond('11:11:11.234'); +select hour('11:11:11.234-7'), minute('11:11:11.234-7'), second('11:11:11.234-7'), microsecond('11:11:11.234-7'); +select hour(timetz '11:11:11.234-7'), minute(timetz '11:11:11.234-7'), second(timetz '11:11:11.234-7'), microsecond(timetz '11:11:11.234-7'); +select hour('2007-07-19'), minute('2007-07-19'), second('2007-07-19'), microsecond('2007-07-19'); +select hour('2007-07-19 11:11:11.234'), minute('2007-07-19 11:11:11.234'), second('2007-07-19 11:11:11.234'), microsecond('2007-07-19 11:11:11.234'); +select hour('2007-07-19 11:11:11.234-7'), minute('2007-07-19 11:11:11.234-7'), second('2007-07-19 11:11:11.234-7'), microsecond('2007-07-19 11:11:11.234-7'); +select hour(timestamptz '2007-07-19 11:11:11.234-7'), minute(timestamptz '2007-07-19 11:11:11.234-7'), second(timestamptz '2007-07-19 11:11:11.234-7'), microsecond(timestamptz '2007-07-19 11:11:11.234-7'); +select hour('-2007-07-19'), minute('-2007-07-19'), second('-2007-07-19'), microsecond('-2007-07-19'); +select hour('-2007-07-19 11:11:11.234'), minute('-2007-07-19 11:11:11.234'), second('-2007-07-19 11:11:11.234'), microsecond('-2007-07-19 11:11:11.234'); +select hour('-2007-07-19 11:11:11.234-7'), minute('-2007-07-19 11:11:11.234-7'), second('-2007-07-19 11:11:11.234-7'), microsecond('-2007-07-19 11:11:11.234-7'); +select hour(timestamptz '-2007-07-19 11:11:11.234-7'), minute(timestamptz '-2007-07-19 11:11:11.234-7'), second(timestamptz '-2007-07-19 11:11:11.234-7'), microsecond(timestamptz '-2007-07-19 11:11:11.234-7'); +select hour('202014'), minute('202014'), second('202014'), microsecond('202014'); +select hour('209614'), minute('209614'), second('209614'), microsecond('209614'); +select hour('abcdefg'), minute('abcdefg'), second('abcdefg'), microsecond('abcdefg'); +select hour('abcdefghijklmnopqrstuv'), minute('abcdefghijklmnopqrstuv'), second('abcdefghijklmnopqrstuv'), microsecond('abcdefghijklmnopqrstuv'); + reset dolphin.sql_mode; drop schema b_time_funcs cascade; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index c7b3c8bd8..9f906c4d4 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -234,4 +234,19 @@ CREATE OR REPLACE FUNCTION pg_catalog.bit_cast_time(bit) RETURNS time without time zone LANGUAGE SQL IMMUTABLE STRICT as 'select cast(cast($1 as text) as time without time zone)'; -create or replace function pg_catalog."user"() returns name as 'select current_user' LANGUAGE 'sql' IMMUTABLE; \ No newline at end of file +create or replace function pg_catalog."user"() returns name as 'select current_user' LANGUAGE 'sql' IMMUTABLE; + +CREATE OR REPLACE FUNCTION pg_catalog.hour (date) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetHourFromDate'; +CREATE OR REPLACE FUNCTION pg_catalog.microsecond (date) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMicrosecondFromDate'; +CREATE OR REPLACE FUNCTION pg_catalog.minute (date) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMinuteFromDate'; +CREATE OR REPLACE FUNCTION pg_catalog.second (date) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetSecondFromDate'; + +CREATE OR REPLACE FUNCTION pg_catalog.hour (timetz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetHourFromTimeTz'; +CREATE OR REPLACE FUNCTION pg_catalog.microsecond (timetz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMicrosecondFromTimeTz'; +CREATE OR REPLACE FUNCTION pg_catalog.minute (timetz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMinuteFromTimeTz'; +CREATE OR REPLACE FUNCTION pg_catalog.second (timetz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetSecondFromTimeTz'; + +CREATE OR REPLACE FUNCTION pg_catalog.hour (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetHourFromTimestampTz'; +CREATE OR REPLACE FUNCTION pg_catalog.microsecond (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMicrosecondFromTimestampTz'; +CREATE OR REPLACE FUNCTION pg_catalog.minute (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMinuteFromTimestampTz'; +CREATE OR REPLACE FUNCTION pg_catalog.second (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetSecondFromTimestampTz'; \ No newline at end of file -- Gitee From dde85f9ea7103f66d0f97e12476be4b5eb457cb3 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Thu, 7 Dec 2023 21:11:03 +0800 Subject: [PATCH 106/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E4=BF=AE=E5=A4=8Dmdcache=E5=A4=B1?= =?UTF-8?q?=E6=95=88=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/spq_optimizer_util/spq_wrappers.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp index da9257f6e..af147a82c 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp @@ -2588,20 +2588,17 @@ spqdb::CountLeafPartTables(Oid rel_oid) * anything fetched via the wrapper functions in this file can end up in the * metadata cache and hence need to have an invalidation callback registered. */ -static bool mdcache_invalidation_counter_registered = false; -static int64 mdcache_invalidation_counter = 0; -static int64 last_mdcache_invalidation_counter = 0; static void mdsyscache_invalidation_counter_callback(Datum arg, int cacheid, uint32 hashvalue) { - mdcache_invalidation_counter++; + u_sess->spq_cxt.mdcache_invalidation_counter++; } static void mdrelcache_invalidation_counter_callback(Datum arg, Oid relid) { - mdcache_invalidation_counter++; + u_sess->spq_cxt.mdcache_invalidation_counter++; } static void @@ -2676,16 +2673,16 @@ spqdb::MDCacheNeedsReset(void) { SPQ_WRAP_START; { - if (!mdcache_invalidation_counter_registered) + if (!u_sess->spq_cxt.mdcache_invalidation_counter_registered) { register_mdcache_invalidation_callbacks(); - mdcache_invalidation_counter_registered = true; + u_sess->spq_cxt.mdcache_invalidation_counter_registered = true; } - if (last_mdcache_invalidation_counter == mdcache_invalidation_counter) + if (u_sess->spq_cxt.last_mdcache_invalidation_counter == u_sess->spq_cxt.mdcache_invalidation_counter) return false; else { - last_mdcache_invalidation_counter = mdcache_invalidation_counter; + u_sess->spq_cxt.last_mdcache_invalidation_counter = u_sess->spq_cxt.mdcache_invalidation_counter; return true; } } -- Gitee From 13f22d0c8d1ded2cd61c509e8f724c1ef8ab894b Mon Sep 17 00:00:00 2001 From: he-shaoyu Date: Tue, 5 Dec 2023 19:14:10 +0800 Subject: [PATCH 107/434] =?UTF-8?q?=E4=BF=AE=E6=AD=A3bin(bit)=E7=9A=84?= =?UTF-8?q?=E8=A1=A8=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/builtin_funcs/bin.out | 19 ++++++++++++++++++ contrib/dolphin/plugin_utils/adt/numeric.cpp | 20 ++++++++++++++++++- contrib/dolphin/sql/builtin_funcs/bin.sql | 6 ++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/builtin_funcs/bin.out b/contrib/dolphin/expected/builtin_funcs/bin.out index c979b8b22..19793875a 100644 --- a/contrib/dolphin/expected/builtin_funcs/bin.out +++ b/contrib/dolphin/expected/builtin_funcs/bin.out @@ -140,5 +140,24 @@ select BIN(-13/4); 1111111111111111111111111111111111111111111111111111111111111101 (1 row) +-- 测试bit类型,b''默认为bit类型 +select bin(b'101'); + bin +----- + 101 +(1 row) + +select bin(b'0001101'); + bin +------ + 1101 +(1 row) + +select bin(b'0000000'); + bin +----- + 0 +(1 row) + drop schema db_bin cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index 6b552c663..a56b4f3f4 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -21152,7 +21152,25 @@ Datum conv_num(PG_FUNCTION_ARGS) Datum bin_bit(PG_FUNCTION_ARGS) { - PG_RETURN_TEXT_P(cstring_to_text("0")); + VarBit* arg = PG_GETARG_VARBIT_P(0); + int len = VARBITLEN(arg); + char* result = (char*)palloc(len + 9); + int cnt = 0; + for (bits8* r = VARBITS(arg); r < VARBITEND(arg); r++) { + bits8 tmp = *r; + for (int i = BITS_PER_BYTE - 1; i >= 0; --i) { + result[cnt++] = ((tmp & (1 << i)) != 0) ? '1' : '0'; + } + } + int cntzero = 0; + while (result[cntzero] == '0') { + cntzero++; + } + if (cntzero >= len) cntzero = len - 1; + result[len] = '\0'; + text* ret = cstring_to_text(result+cntzero); + pfree_ext(result); + PG_RETURN_TEXT_P(ret); } Datum bin_integer(PG_FUNCTION_ARGS) diff --git a/contrib/dolphin/sql/builtin_funcs/bin.sql b/contrib/dolphin/sql/builtin_funcs/bin.sql index 51da8b3e8..7af054a70 100644 --- a/contrib/dolphin/sql/builtin_funcs/bin.sql +++ b/contrib/dolphin/sql/builtin_funcs/bin.sql @@ -28,5 +28,11 @@ SELECT BIN(9.6); SELECT BIN(-9.2); SELECT BIN(-9.6); select BIN(-13/4); + +-- 测试bit类型,b''默认为bit类型 +select bin(b'101'); +select bin(b'0001101'); +select bin(b'0000000'); + drop schema db_bin cascade; reset current_schema; -- Gitee From d486478aabd0634533fd3f1f77f41cfd8d8413a4 Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 8 Dec 2023 11:59:04 +0800 Subject: [PATCH 108/434] Fix missing keyword in gram.y --- contrib/dolphin/expected/kwlist.out | 17 ++++++++++++++++- contrib/dolphin/expected/pl_debugger_client.out | 2 +- contrib/dolphin/plugin_parser/gram.y | 7 ++++++- contrib/dolphin/sql/kwlist.sql | 10 ++++++++++ 4 files changed, 33 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/kwlist.out b/contrib/dolphin/expected/kwlist.out index 270dc0175..7f9ec1402 100644 --- a/contrib/dolphin/expected/kwlist.out +++ b/contrib/dolphin/expected/kwlist.out @@ -365,6 +365,15 @@ ERROR: syntax error at or near "foreign" LINE 1: analyze verbose foreign tables; ^ create index verbose_index on verbose(verbose); +-- unreserved keyword: ast, PARTITIONING, REPAIR, ROWTYPE_P, +create table ast(ast int); +create table PARTITIONING(PARTITIONING int); +create table REPAIR(REPAIR int); +create table ROWTYPE(ROWTYPE int); +-- col_name_keyword +create table ttt_test(TIMESTAMPADD int); +--type func keyword +create FUNCTION sounds() RETURNS int as 'select 1' language sql; /* excluded */ create table excluded(excluded int); insert into excluded values(1); @@ -397,7 +406,7 @@ ERROR: user cannot be trigger name reset search_path; drop schema keyword_test cascade; -NOTICE: drop cascades to 23 other objects +NOTICE: drop cascades to 29 other objects DETAIL: drop cascades to table keyword_test."cast" drop cascades to table keyword_test.last_day drop cascades to table keyword_test."less" @@ -418,6 +427,12 @@ drop cascades to table keyword_test."do" drop cascades to table keyword_test."end" drop cascades to table keyword_test."only" drop cascades to table keyword_test."verbose" +drop cascades to table keyword_test.ast +drop cascades to table keyword_test.partitioning +drop cascades to table keyword_test.repair +drop cascades to table keyword_test.rowtype +drop cascades to table keyword_test.ttt_test +drop cascades to function keyword_test.sounds() drop cascades to table keyword_test."excluded" drop cascades to table keyword_test.x drop cascades to function keyword_test.fn_x_before() diff --git a/contrib/dolphin/expected/pl_debugger_client.out b/contrib/dolphin/expected/pl_debugger_client.out index 6647be62b..582a860fa 100644 --- a/contrib/dolphin/expected/pl_debugger_client.out +++ b/contrib/dolphin/expected/pl_debugger_client.out @@ -1229,7 +1229,7 @@ select * from dbe_pldebugger.set_var('test.c', '$$2021-07-31$$::timestamp'); -- (1 row) select * from dbe_pldebugger.set_var('vrec', '(1,1,1)::test%rowtype'); -- not ok -WARNING: Exception occurs when trying to set variable: syntax error at or near "rowtype" +WARNING: Exception occurs when trying to set variable: column "rowtype" does not exist set_var --------- f diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 31da28f05..609b86100 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -37718,6 +37718,7 @@ unreserved_keyword_without_key: | ASCII | ASSERTION | ASSIGNMENT + | AST | AT | LESS | ATTRIBUTE @@ -37923,7 +37924,6 @@ unreserved_keyword_without_key: | INCLUDING | INCREMENT | INCREMENTAL - | INDEXES | INFILE | INHERIT @@ -38038,6 +38038,7 @@ unreserved_keyword_without_key: | PARSER | PARTIAL %prec PARTIAL_EMPTY_PREC | PARTITION + | PARTITIONING | PARTITIONS | PASSING | PASSWORD @@ -38096,6 +38097,7 @@ unreserved_keyword_without_key: | REMOVE | RENAME | REORGANIZE + | REPAIR | REPEAT | REPEATABLE | REPLACE @@ -38119,6 +38121,7 @@ unreserved_keyword_without_key: | ROLLUP | ROTATION | ROW_COUNT + | ROWTYPE_P | ROUTINE | ROWS | RULE @@ -38307,6 +38310,7 @@ col_name_keyword: | TEXT_P | TIME %prec IDENT | TIMESTAMP %prec IDENT + | TIMESTAMPADD | TIMESTAMPDIFF | TREAT | TRIM @@ -38404,6 +38408,7 @@ type_func_name_keyword_without_current_schema: | RIGHT | RLIKE | SIMILAR + | SOUNDS | TABLESAMPLE | TIMECAPSULE | XOR diff --git a/contrib/dolphin/sql/kwlist.sql b/contrib/dolphin/sql/kwlist.sql index e05fa815c..ce93f5a59 100644 --- a/contrib/dolphin/sql/kwlist.sql +++ b/contrib/dolphin/sql/kwlist.sql @@ -170,6 +170,16 @@ analyze verbose foreign tables; --unsupported create index verbose_index on verbose(verbose); +-- unreserved keyword: ast, PARTITIONING, REPAIR, ROWTYPE_P, +create table ast(ast int); +create table PARTITIONING(PARTITIONING int); +create table REPAIR(REPAIR int); +create table ROWTYPE(ROWTYPE int); +-- col_name_keyword +create table ttt_test(TIMESTAMPADD int); +--type func keyword +create FUNCTION sounds() RETURNS int as 'select 1' language sql; + /* excluded */ create table excluded(excluded int); insert into excluded values(1); -- Gitee From 6ff6082304d0ca166a8c096720fdf0b8af1cd679 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Fri, 8 Dec 2023 10:29:57 +0800 Subject: [PATCH 109/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=20bugfix=201.=E8=A1=A5=E9=BD=90gaussdb?= =?UTF-8?q?=E8=A2=ABkill=E5=9C=BA=E6=99=AF=E4=B8=8B,=E5=8D=8F=E8=B0=83?= =?UTF-8?q?=E7=BA=BF=E7=A8=8B=E7=9A=84=E9=87=8A=E6=94=BE=E9=80=BB=E8=BE=91?= =?UTF-8?q?=202.=E4=BF=AE=E5=A4=8DI8MQKM?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/include/spq/spq_mutate.h | 2 +- .../spq_plugin/src/executor/spq_seqscan.cpp | 47 +++---------------- contrib/spq_plugin/src/guc_spq.cpp | 1 - contrib/spq_plugin/src/spq/spq_mutate.cpp | 29 +++++++++--- contrib/spq_plugin/src/spq/spq_plan.cpp | 43 +++++++---------- contrib/spq_plugin/src/spq_opt.cpp | 2 +- .../translate/CTranslatorDXLToPlStmt.cpp | 28 +++++++++-- .../translate/CTranslatorQueryToDXL.cpp | 6 --- .../spq_optimizer_util/utils/COptTasks.cpp | 2 +- contrib/spq_plugin/src/spqplugin.cpp | 2 - 10 files changed, 76 insertions(+), 86 deletions(-) diff --git a/contrib/spq_plugin/include/spq/spq_mutate.h b/contrib/spq_plugin/include/spq/spq_mutate.h index 32770c2a1..366f3346e 100644 --- a/contrib/spq_plugin/include/spq/spq_mutate.h +++ b/contrib/spq_plugin/include/spq/spq_mutate.h @@ -21,7 +21,7 @@ extern void collect_shareinput_producers(PlannerInfo *root, Plan *plan); extern Plan *replace_shareinput_targetlists(PlannerInfo *root, Plan *plan); -extern Plan *apply_shareinput_xslice(Plan *plan, PlannerInfo *root); +extern Plan *apply_shareinput_xslice(Plan *plan, PlannerInfo *root, PlanSlice *slices); extern void remove_subquery_in_RTEs(Node *node); extern bool is_plan_node(Node *node); extern void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal *glob); diff --git a/contrib/spq_plugin/src/executor/spq_seqscan.cpp b/contrib/spq_plugin/src/executor/spq_seqscan.cpp index f69ca0063..5f42afc3d 100644 --- a/contrib/spq_plugin/src/executor/spq_seqscan.cpp +++ b/contrib/spq_plugin/src/executor/spq_seqscan.cpp @@ -364,50 +364,17 @@ public: .type = SPQ_QC_CONNECTION, }; - constexpr int MAX_RETRY_TIME = 100000; bool found = false; QCConnEntry* entry; - int retry = 0; - while (!found && retry < MAX_RETRY_TIME) { - pthread_rwlock_wrlock(&g_instance.spq_cxt.adp_connects_lock); - entry = (QCConnEntry*)hash_search(g_instance.spq_cxt.adp_connects, (void*)&key, HASH_FIND, &found); - if (!found) { - pthread_rwlock_unlock(&g_instance.spq_cxt.adp_connects_lock); - pg_usleep(100); - ++retry; - continue; - } - backward_conn = entry->backward; - BackConnInfo fcmsg; - if (entry->forward.idx == 0) { - fcmsg.node_idx = backward_conn.idx; - fcmsg.version = backward_conn.ver; - fcmsg.streamcap = entry->streamcap; - fcmsg.query_id = u_sess->debug_query_id; - fcmsg.stream_key = { - .queryId = entry->key.query_id, - .planNodeId = entry->key.plan_node_id, - .producerSmpId = 0, - .consumerSmpId = 0, - }; - fcmsg.backward = &backward_conn; - int error = gs_r_build_reply_connection(&fcmsg, backward_conn.ver, &entry->forward.sid); - if (error != 0) { - gs_close_gsocket(&entry->forward); - ereport(ERROR, ((errmsg("spq try build dual channel backward direction failed")))); - } - entry->forward.idx = backward_conn.idx; - entry->forward.ver = backward_conn.ver; - entry->forward.type = GSOCK_PRODUCER; - } - forward_conn = entry->forward; + pthread_rwlock_rdlock(&g_instance.spq_cxt.adp_connects_lock); + entry = (QCConnEntry*)hash_search(g_instance.spq_cxt.adp_connects, (void*)&key, HASH_FIND, &found); + if (!found) { pthread_rwlock_unlock(&g_instance.spq_cxt.adp_connects_lock); - break; - } - if (backward_conn.idx == 0) { - gs_close_gsocket(&backward_conn); - ereport(ERROR, ((errmsg("spq try build dual channel forward direction failed")))); + ereport(ERROR, (errmsg("spq seq scan: can not found adaptive connection"))); } + backward_conn = entry->backward; + forward_conn = entry->forward; + pthread_rwlock_unlock(&g_instance.spq_cxt.adp_connects_lock); } SpqAdpScanPagesRes adps_get_adps_response(uint32 nblocks, int64_t iter_no) diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index d8b63a429..76c80eab8 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -56,7 +56,6 @@ static bool spq_check_scan_unit_size(int *newval, void **extra, GucSource source static const char* spq_show_scan_unit_size(void); static bool check_spq_cluster_map(char **newval, void **extra, GucSource source); static const char *show_spq_cluster_map(void); -static bool check_spq_nodes(char **newval, void **extra, GucSource source); void assign_spq_nodes(const char *newval, void *extra); static const struct config_enum_entry spq_optimizer_minidump_options[] = { diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index e8cbe9f30..46962f8dd 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -453,6 +453,19 @@ static bool shareinput_mutator_xslice_1(Node *node, PlannerInfo *root, bool fPop if (shared) { ctxt->shared_inputs[sisc->share_id].producer_slice_id = motId; sisc->is_producer = true; + } else { + int producer_slice_id = ctxt->shared_inputs[sisc->share_id].producer_slice_id; + PlanSlice *producer_slice = &(ctxt->slices[producer_slice_id]); + PlanSlice *consumer_slice = &(ctxt->slices[motId]); + if (producer_slice->numsegments != consumer_slice->numsegments) { + ereport(ERROR, (errmsg("ShareInputScan check dop fail share_id[%d] producer(%d, %d) consumer(%d, %d) ", + sisc->share_id, producer_slice_id, producer_slice->numsegments, + motId, consumer_slice->numsegments))); + } else { + ereport(DEBUG2, (errmsg("ShareInputScan check dop SUCCESS share_id[%d] producer(%d, %d) consumer(%d, %d) ", + sisc->share_id, producer_slice_id, producer_slice->numsegments, + motId, consumer_slice->numsegments))); + } } share_info->participant_slices = bms_add_member(share_info->participant_slices, motId); @@ -506,19 +519,17 @@ static bool shareinput_mutator_xslice_2(Node *node, PlannerInfo *root, bool fPop * Scan through the plan tree and make note of which Share Input Scans * are cross-slice. */ -Plan *apply_shareinput_xslice(Plan *plan, PlannerInfo *root) +Plan *apply_shareinput_xslice(Plan *plan, PlannerInfo *root, PlanSlice *slices) { PlannerGlobal *glob = root->glob; ApplyShareInputContext *ctxt = &glob->share; - ListCell *lp, *lr; - int subplan_id; ctxt->motStack = NULL; ctxt->qdShares = NULL; ctxt->shared_inputs = (ApplyShareInputContextPerShare *)palloc0(ctxt->shared_input_count * sizeof(ApplyShareInputContextPerShare)); - + ctxt->slices = slices; shareinput_pushmot(ctxt, 0); /* @@ -664,14 +675,14 @@ Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion) stream->consumer_nodes = ng_convert_to_exec_nodes(distribution, LOCATOR_TYPE_REPLICATED, RELATION_ACCESS_READ); } else if (motion->motionType == MOTIONTYPE_HASH || motion->motionType == MOTIONTYPE_EXPLICIT) { - stream->smpDesc.distriType = plan->dop == 1 ? REMOTE_DISTRIBUTE : REMOTE_SPLIT_DISTRIBUTE; + stream->smpDesc.distriType = REMOTE_SPLIT_DISTRIBUTE; stream->type = STREAM_REDISTRIBUTE; stream->consumer_nodes = ng_convert_to_exec_nodes(distribution, LOCATOR_TYPE_HASH, RELATION_ACCESS_READ); if (stream->distribute_keys == nullptr) { stream->smpDesc.distriType = REMOTE_ROUNDROBIN; } } else { - ereport(LOG,(errmsg("unknown motion type [%d]", motion->motionType))); + ereport(ERROR,(errmsg("unknown motion type [%d]", motion->motionType))); } stream->streamID = motion->motionID; return (Plan*)stream; @@ -805,6 +816,9 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) if (plan == NULL) return NULL; + if (plan->exec_nodes == NULL || plan->exec_nodes->nodeList == NULL) { + ereport(ERROR, (errmsg("exec_nodes check fail plan type[%d]", plan->type))); + } /* replace motion stream for subplan */ List* subplans = root->glob->subplans; @@ -839,6 +853,9 @@ Plan *replace_motion_stream_recurse(PlannerInfo* root, Plan *plan, bool &top) } if (IsA(plan, Sequence)) { + if (top == true) { + ereport(ERROR, (errmsg("There's no gather on sequence curentIndex[%d]", cxt->curentIndex))); + } Sequence* node = (Sequence*)plan; foreach(lc, node->subplans) { Plan* subplan = (Plan*)lfirst(lc); diff --git a/contrib/spq_plugin/src/spq/spq_plan.cpp b/contrib/spq_plugin/src/spq/spq_plan.cpp index dbd3fec12..383622e25 100644 --- a/contrib/spq_plugin/src/spq/spq_plan.cpp +++ b/contrib/spq_plugin/src/spq/spq_plan.cpp @@ -767,7 +767,6 @@ Node *plan_tree_mutator(Node *node, Node *(*mutator)(Node *, void *), void *cont case RTE_RELATION: /* ordinary relation reference */ case RTE_VOID: /* deleted entry */ case RTE_RESULT: - case RTE_NAMEDTUPLESTORE: /* No extras. */ break; @@ -787,20 +786,14 @@ Node *plan_tree_mutator(Node *node, Node *(*mutator)(Node *, void *), void *cont newrte->joinaliasvars = (List *)copyObject(rte->joinaliasvars); break; - case RTE_FUNCTION: /* functions in FROM */ - // MUTATE(newrte->funcexpr, rte->funcexpr, List *); + case RTE_FUNCTION: + MUTATE(newrte->funcexpr, rte->funcexpr, Node*); break; case RTE_TABLEFUNCTION: - // newrte->subquery = (Query*)copyObject(rte->subquery); - // MUTATE(newrte->funcexpr, rte->funcexpr, List *); + newrte->subquery = (Query*)copyObject(rte->subquery); + MUTATE(newrte->funcexpr, rte->funcexpr, Node*); break; - - case RTE_TABLEFUNC: - // newrte->tablefunc = copyObject(rte->tablefunc); - // MUTATE(newrte->tablefunc, rte->tablefunc, TableFunc *); - break; - case RTE_VALUES: MUTATE(newrte->values_lists, rte->values_lists, List *); break; @@ -812,20 +805,20 @@ Node *plan_tree_mutator(Node *node, Node *(*mutator)(Node *, void *), void *cont } break; - // case T_RangeTblFunction: - // { - // RangeTblFunction *rtfunc = (RangeTblFunction *) node; - // RangeTblFunction *newrtfunc; - // - // FLATCOPY(newrtfunc, rtfunc, RangeTblFunction); - // MUTATE(newrtfunc->funcexpr, rtfunc->funcexpr, Node *); - // - // /* - // * TODO is this right? //newrte->coldeflist = (List *) - // * copyObject(rte->coldeflist); - // */ - // } - // break; + case T_RangeTblFunction: + { + RangeTblFunction *rtfunc = (RangeTblFunction *) node; + RangeTblFunction *newrtfunc; + + FLATCOPY(newrtfunc, rtfunc, RangeTblFunction); + MUTATE(newrtfunc->funcexpr, rtfunc->funcexpr, Node *); + + /* + * TODO is this right? //newrte->coldeflist = (List *) + * copyObject(rte->coldeflist); + */ + } + break; case T_ForeignScan: { ForeignScan *fdwscan = (ForeignScan *)node; diff --git a/contrib/spq_plugin/src/spq_opt.cpp b/contrib/spq_plugin/src/spq_opt.cpp index 7491b0f4c..d3956d30e 100644 --- a/contrib/spq_plugin/src/spq_opt.cpp +++ b/contrib/spq_plugin/src/spq_opt.cpp @@ -244,7 +244,7 @@ PlannedStmt *spq_planner(Query *parse, ParamListInfo boundParams) collect_shareinput_producers(root, result->planTree); /* Post-process ShareInputScan nodes */ - (void)apply_shareinput_xslice(result->planTree, root); + (void)apply_shareinput_xslice(result->planTree, root, result->slices); /* * Fix ShareInputScans for EXPLAIN, like in standard_planner(). For all diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index a24fc78ef..b9cfa4280 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -142,6 +142,10 @@ static void add_distribute_info(Plan* scanPlan, List* scanClauses, RangeTblEntry execNodes = makeNode(ExecNodes); Distribution* distribution = ng_get_installation_group_distribution(); + if (distribution->bms_data_nodeids == NULL) { + List* nodeid_list = lappend_int(NIL, 0); + distribution->bms_data_nodeids = ng_convert_to_nodeids(nodeid_list); + } ng_set_distribution(&execNodes->distribution, distribution); execNodes->nodeList = ng_convert_to_nodeid_list(execNodes->distribution.bms_data_nodeids); } @@ -156,7 +160,11 @@ static void add_distribute_info(Plan* scanPlan, List* scanClauses, RangeTblEntry } scanPlan->exec_type = EXEC_ON_DATANODES; } - + + if (execNodes == NULL || list_length(execNodes->nodeList) == 0) { + SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, + SPQOS_WSZ_LIT("add_distribute_info exec_nodes cannot be NULL")); + } scanPlan->exec_nodes = execNodes; // not support(build_baserel_distributekey return null) //scanPlan->distributed_keys = bestPath->parent->distribute_keys; @@ -3531,7 +3539,7 @@ CTranslatorDXLToPlStmt::TranslateDXLAppend( SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, SPQOS_WSZ_LIT("exec_nodes cannot be NULL")); } - if (max_num_exec_nodes < list_length(child_plan->exec_nodes->nodeList)) { + if (plan->exec_nodes == NULL || max_num_exec_nodes < list_length(child_plan->exec_nodes->nodeList)) { plan->exec_nodes = ng_get_dest_execnodes(child_plan); max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); } @@ -3971,6 +3979,10 @@ CTranslatorDXLToPlStmt::TranslateDXLCTEConsumerToSharedScan( /* SPQ: add exec_nodes for plan */ ExecNodes* execNodes = makeNode(ExecNodes); Distribution* distribution = ng_get_installation_group_distribution(); + if (distribution->bms_data_nodeids == NULL) { + List* nodeid_list = lappend_int(NIL, 0); + distribution->bms_data_nodeids = ng_convert_to_nodeids(nodeid_list); + } ng_set_distribution(&execNodes->distribution, distribution); execNodes->nodeList = ng_convert_to_nodeid_list(execNodes->distribution.bms_data_nodeids); execNodes->baselocatortype = LOCATOR_TYPE_HASH; @@ -4058,7 +4070,7 @@ CTranslatorDXLToPlStmt::TranslateDXLSequence( SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiExpr2DXLUnsupportedFeature, SPQOS_WSZ_LIT("exec_nodes cannot be NULL in Sequence")); } - if (max_num_exec_nodes < list_length(child_plan->exec_nodes->nodeList)) { + if (plan->exec_nodes == NULL || max_num_exec_nodes < list_length(child_plan->exec_nodes->nodeList)) { plan->exec_nodes = ng_get_dest_execnodes(child_plan); max_num_exec_nodes = list_length(plan->exec_nodes->nodeList); } @@ -6095,6 +6107,16 @@ CTranslatorDXLToPlStmt::TranslateDXLBitmapIndexProbe( * As of 8.4, the indexstrategy and indexsubtype fields are no longer * available or needed in IndexScan. Ignore them. */ + ExecNodes* execNodes = makeNode(ExecNodes); + Distribution* distribution = ng_get_installation_group_distribution(); + if (distribution->bms_data_nodeids == NULL) { + List* nodeid_list = lappend_int(NIL, 0); + distribution->bms_data_nodeids = ng_convert_to_nodeids(nodeid_list); + } + ng_set_distribution(&execNodes->distribution, distribution); + execNodes->nodeList = ng_convert_to_nodeid_list(execNodes->distribution.bms_data_nodeids); + execNodes->baselocatortype = LOCATOR_TYPE_HASH; + plan->exec_nodes = execNodes; SetParamIds(plan); /* diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp index b51304e9c..73b462e1f 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp @@ -3085,12 +3085,6 @@ CTranslatorQueryToDXL::TranslateFromClauseToDXL(Node *node) (RangeTblEntry *) spqdb::ListNth(m_query->rtable, rt_index - 1); SPQOS_ASSERT(NULL != rte); - if (rte->forceDistRandom) - { - SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiQuery2DXLUnsupportedFeature, - SPQOS_WSZ_LIT("spq_dist_random")); - } - if (rte->lateral) { SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiQuery2DXLUnsupportedFeature, diff --git a/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp b/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp index 6c3052315..90f7318db 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp @@ -794,7 +794,7 @@ COptTasks::PrintMissingStatsWarning(CMemoryPool *mp, CMDAccessor *md_accessor, "One or more columns in the following table(s) do not have statistics: %s", CreateMultiByteCharStringFromWCString(wcstr.GetBuffer())); GpdbEreport( - ERRCODE_SUCCESSFUL_COMPLETION, NOTICE, msgbuf, + ERRCODE_SUCCESSFUL_COMPLETION, LOG, msgbuf, "For non-partitioned tables, run analyze ()." " For partitioned tables, run analyze rootpartition ()." " See log for columns missing statistics."); diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index c6dc6d936..4157704b1 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -116,11 +116,9 @@ static bool check_disable_spq_planner_walker(Node *node, void *context) case RTE_JOIN: case RTE_CTE: break; - case RTE_TABLEFUNC: case RTE_VALUES: case RTE_TABLEFUNCTION: case RTE_VOID: - case RTE_NAMEDTUPLESTORE: default: CHECK_RETURN_HELP_LOG(true, "the RTEKIND is not supported"); } -- Gitee From c69caea59d7484ad228511322357fc63ce97b8c8 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Tue, 5 Dec 2023 15:10:26 +0800 Subject: [PATCH 110/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9stddev=E3=80=81varian?= =?UTF-8?q?ce=E5=87=BD=E6=95=B0=E4=B8=BA=E6=A0=B7=E6=9C=AC(pop)=E7=BB=9F?= =?UTF-8?q?=E8=AE=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/default_function.out | 36 +++++++++++++ contrib/dolphin/expected/tinyint_agg.out | 28 +++++----- contrib/dolphin/expected/uint_agg.out | 48 ++++++++--------- contrib/dolphin/plugin_parser/parse_expr.cpp | 51 ++++++++++++++++--- contrib/dolphin/sql/default_function.sql | 19 +++++++ 5 files changed, 136 insertions(+), 46 deletions(-) diff --git a/contrib/dolphin/expected/default_function.out b/contrib/dolphin/expected/default_function.out index 3ed2c21f2..04295923f 100644 --- a/contrib/dolphin/expected/default_function.out +++ b/contrib/dolphin/expected/default_function.out @@ -165,5 +165,41 @@ drop table t4; drop table t5; drop table t6; drop table t7; +CREATE FUNCTION tt.mode_b_default(i integer) RETURNS integer AS +$$ +BEGIN + RETURN i + 1; +END; +$$ LANGUAGE plpgsql; +set current_schema = 'tt'; +select mode_b_default(b) from public.a; +ERROR: Invalid default value. +DETAIL: the 2th column of a doesn't have a default value +CONTEXT: referenced column: mode_b_default +select tt.mode_b_default(b) from public.a; + mode_b_default +---------------- + 2 +(1 row) + +select pg_catalog.mode_b_default(b) from public.a; +ERROR: Invalid default value. +DETAIL: the 2th column of a doesn't have a default value +CONTEXT: referenced column: mode_b_default +reset current_schema; +select mode_b_default(b) from a; +ERROR: Invalid default value. +DETAIL: the 2th column of a doesn't have a default value +CONTEXT: referenced column: mode_b_default +select tt.mode_b_default(b) from a; + mode_b_default +---------------- + 2 +(1 row) + +select pg_catalog.mode_b_default(b) from a; +ERROR: Invalid default value. +DETAIL: the 2th column of a doesn't have a default value +CONTEXT: referenced column: mode_b_default \c postgres drop database if exists db_default; diff --git a/contrib/dolphin/expected/tinyint_agg.out b/contrib/dolphin/expected/tinyint_agg.out index 5a949de4c..a9ddf4dcc 100644 --- a/contrib/dolphin/expected/tinyint_agg.out +++ b/contrib/dolphin/expected/tinyint_agg.out @@ -45,9 +45,9 @@ select min(a), min(b) from u1; (1 row) select stddev(a), stddev(b) from u1; - stddev | stddev -------------------+------------------ - 127.500326796967 | 127.500326796967 + stddev | stddev +----------------------+---------------------- + 104.1035808968911288 | 104.1035808968911288 (1 row) select stddev_pop(a), stddev_pop(b) from u1; @@ -81,9 +81,9 @@ select var_samp(a), var_samp(b) from u1; (1 row) select variance(a), variance(b) from u1; - variance | variance ---------------------+-------------------- - 16256.333333333333 | 16256.333333333333 + variance | variance +------------------------+------------------------ + 10837.5555555555555556 | 10837.5555555555555556 (1 row) select listagg(a) within group(order by a) from u1; @@ -208,14 +208,14 @@ explain(costs off, verbose) select min(a)from smp_test; (8 rows) explain(costs off, verbose) select stddev(a) from smp_test; - QUERY PLAN --------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------- Aggregate - Output: pg_catalog.stddev((stddev((a)::double precision))) + Output: pg_catalog.stddev_pop((stddev_pop(a))) -> Streaming(type: LOCAL GATHER dop: 1/2) - Output: (stddev((a)::double precision)) + Output: (stddev_pop(a)) -> Aggregate - Output: stddev((a)::double precision) + Output: stddev_pop(a) -> Seq Scan on tinyint_agg.smp_test Output: a (8 rows) @@ -289,11 +289,11 @@ explain(costs off, verbose) select variance(a)from smp_test; QUERY PLAN ---------------------------------------------------- Aggregate - Output: pg_catalog.variance((variance(a))) + Output: pg_catalog.var_pop((var_pop(a))) -> Streaming(type: LOCAL GATHER dop: 1/2) - Output: (variance(a)) + Output: (var_pop(a)) -> Aggregate - Output: variance(a) + Output: var_pop(a) -> Seq Scan on tinyint_agg.smp_test Output: a (8 rows) diff --git a/contrib/dolphin/expected/uint_agg.out b/contrib/dolphin/expected/uint_agg.out index 73e514136..a82f62313 100644 --- a/contrib/dolphin/expected/uint_agg.out +++ b/contrib/dolphin/expected/uint_agg.out @@ -48,7 +48,7 @@ select min(a), min(b) from u1; select stddev(a), stddev(b) from u1; stddev | stddev ------------------+------------------ - 147.224318643355 | 147.224318643355 + 120.208152801713 | 120.208152801713 (1 row) select stddev_pop(a), stddev_pop(b) from u1; @@ -84,7 +84,7 @@ select var_samp(a), var_samp(b) from u1; select variance(a), variance(b) from u1; variance | variance --------------------+-------------------- - 21675.000000000000 | 21675.000000000000 + 14450.000000000000 | 14450.000000000000 (1 row) select listagg(a) within group(order by a) from u1; @@ -147,7 +147,7 @@ select min(a), min(b) from u2; select stddev(a), stddev(b) from u2; stddev | stddev ----------------+---------------- - 37836.64989134 | 37836.64989134 + 30893.49527004 | 30893.49527004 (1 row) select stddev_pop(a), stddev_pop(b) from u2; @@ -181,9 +181,9 @@ select var_samp(a), var_samp(b) from u2; (1 row) select variance(a), variance(b) from u2; - variance | variance ----------------------+--------------------- - 1431612075.00000000 | 1431612075.00000000 + variance | variance +--------------------+-------------------- + 954408050.00000000 | 954408050.00000000 (1 row) select listagg(a) within group(order by a) from u2; @@ -246,7 +246,7 @@ select min(a), min(b) from u4; select stddev(a), stddev(b) from u4; stddev | stddev ------------+------------ - 2479700524 | 2479700524 + 2024667000 | 2024667000 (1 row) select stddev_pop(a), stddev_pop(b) from u4; @@ -282,7 +282,7 @@ select var_samp(a), var_samp(b) from u4; select variance(a), variance(b) from u4; variance | variance ---------------------+--------------------- - 6148914688373205675 | 6148914688373205675 + 4099276458915470450 | 4099276458915470450 (1 row) select listagg(a) within group(order by a) from u4; @@ -343,9 +343,9 @@ select min(a), min(b) from u8; (1 row) select stddev(a), stddev(b) from u8; - stddev | stddev -----------------------+---------------------- - 10650232656628343400 | 10650232656628343400 + stddev | stddev +---------------------+--------------------- + 8695878550221854808 | 8695878550221854808 (1 row) select stddev_pop(a), stddev_pop(b) from u8; @@ -379,9 +379,9 @@ select var_samp(a), var_samp(b) from u8; (1 row) select variance(a), variance(b) from u8; - variance | variance ------------------------------------------+----------------------------------------- - 113427455640312821142160373094783036075 | 113427455640312821142160373094783036075 + variance | variance +----------------------------------------+---------------------------------------- + 75618303760208547428106915396522024050 | 75618303760208547428106915396522024050 (1 row) select listagg(a) within group(order by a) from u8; @@ -488,14 +488,14 @@ explain(costs off, verbose) select min(a), min(b) from smp_test; (8 rows) explain(costs off, verbose) select stddev(a), stddev(b) from smp_test; - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------ Aggregate - Output: pg_catalog.stddev((stddev(a))), pg_catalog.stddev((stddev(b))) + Output: pg_catalog.stddev_pop((stddev_pop(a))), pg_catalog.stddev_pop((stddev_pop(b))) -> Streaming(type: LOCAL GATHER dop: 1/2) - Output: (stddev(a)), (stddev(b)) + Output: (stddev_pop(a)), (stddev_pop(b)) -> Aggregate - Output: stddev(a), stddev(b) + Output: stddev_pop(a), stddev_pop(b) -> Seq Scan on uint_agg.smp_test Output: a, b (8 rows) @@ -566,14 +566,14 @@ explain(costs off, verbose) select var_samp(a), var_samp(b) from smp_test; (8 rows) explain(costs off, verbose) select variance(a), variance(b) from smp_test; - QUERY PLAN ----------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------ Aggregate - Output: pg_catalog.variance((variance(a))), pg_catalog.variance((variance(b))) + Output: pg_catalog.var_pop((var_pop(a))), pg_catalog.var_pop((var_pop(b))) -> Streaming(type: LOCAL GATHER dop: 1/2) - Output: (variance(a)), (variance(b)) + Output: (var_pop(a)), (var_pop(b)) -> Aggregate - Output: variance(a), variance(b) + Output: var_pop(a), var_pop(b) -> Seq Scan on uint_agg.smp_test Output: a, b (8 rows) diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index 4f40872d8..d2cfc26b4 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -132,6 +132,8 @@ typedef struct DefaultFuncType { Oid tableOid = InvalidOid; int colNumber = 0; } DefaultFuncType; + +#define SYSTEM_SCHEMA_NAME(schemaname) ((schemaname) == NULL || strcmp((schemaname), "pg_catalog") == 0) #endif #define OrientedIsCOLorPAX(rte) ((rte)->orientation == REL_COL_ORIENTED || (rte)->orientation == REL_PAX_ORIENTED) @@ -2126,14 +2128,13 @@ static inline bool IsSameCategory(Oid type1, Oid type2) * make aexpr and do transform, return the result. * 2. otherwise, all arg's type is common, use logical in between_and function, just like MySQL. return NULL in this case */ -static Node* HandleBetweenAnd(ParseState* pstate, FuncCall* fn, List* targs) +static Node* HandleBetweenAnd(ParseState* pstate, FuncCall* fn, List* targs, const char* funcname) { /* sanity check, between args muse be 3 */ - if (list_length(fn->funcname) > 2 || list_length(targs) != 3) { + if (list_length(fn->funcname) > 3 || list_length(targs) != 3) { return NULL; } - const char* funcname = list_length(fn->funcname) == 1 ? strVal(linitial(fn->funcname)) : strVal(lsecond(fn->funcname)); bool b_a = strcmp(funcname, "b_between_and") == 0; bool b_n_a = strcmp(funcname, "b_not_between_and") == 0; bool b_s_a = strcmp(funcname, "b_sym_between_and") == 0; @@ -2207,6 +2208,32 @@ static Node* HandleBetweenAnd(ParseState* pstate, FuncCall* fn, List* targs) return transformExprRecurse(pstate, node); } + +void ReplaceBCmptFuncName(List* names, char* objname, char* defaultname, char* replacename) +{ + int length = list_length(names); + Assert(length <= 3); + + if (strcmp(objname, defaultname) == 0) { + /* + * 1. obj + * 2. pkg.obj/schema.pkg/schema.obj + * 3. schema.pkg.obj/catalog.schema.obj + * 4. catalog.schema.pkg.obj + * Package only allowed create in A compatibility + */ + if (length == 1) { + strVal(linitial(names)) = replacename; + } else if (length == 2) { + strVal(lsecond(names)) = replacename; + } else if (length == 3) { + strVal(lthird(names)) = replacename; + } else { + /* should not happen */ + } + pfree(objname); + } +} #endif static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) @@ -2216,8 +2243,12 @@ static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) ListCell* args = NULL; Node* result = NULL; #ifdef DOLPHIN + char* schemaname = NULL; + char* objname = NULL; + char* pkgname = NULL; + DeconstructQualifiedName(fn->funcname, &schemaname, &objname, &pkgname); /* For DEFAULT function, while transform, replace it to the default expr for col*/ - if (strcmp(strVal(linitial(fn->funcname)), "mode_b_default") == 0) { + if (strcmp(objname, "mode_b_default") == 0 && SYSTEM_SCHEMA_NAME(schemaname)) { return HandleDefaultFunction(pstate, fn); } #endif @@ -2236,12 +2267,16 @@ static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) } } #ifdef DOLPHIN - result = HandleBetweenAnd(pstate, fn, targs); - if (PointerIsValid(result)) { - return result; + if (SYSTEM_SCHEMA_NAME(schemaname)) { + result = HandleBetweenAnd(pstate, fn, targs, objname); + if (PointerIsValid(result)) { + return result; + } + ReplaceBCmptFuncName(fn->funcname, objname, "stddev", "stddev_pop"); + ReplaceBCmptFuncName(fn->funcname, objname, "variance", "var_pop"); } - if (strcmp(strVal(linitial(fn->funcname)), "name_const") == 0 && list_length(targs) == 2) { + if (strcmp(objname, "name_const") == 0 && list_length(targs) == 2 && SYSTEM_SCHEMA_NAME(schemaname)) { Node *name_arg = (Node *)linitial(targs); Node *const_arg = (Node *)llast(targs); diff --git a/contrib/dolphin/sql/default_function.sql b/contrib/dolphin/sql/default_function.sql index feaf5f6e9..b95b247f3 100644 --- a/contrib/dolphin/sql/default_function.sql +++ b/contrib/dolphin/sql/default_function.sql @@ -70,5 +70,24 @@ drop table t5; drop table t6; drop table t7; +CREATE FUNCTION tt.mode_b_default(i integer) RETURNS integer AS +$$ +BEGIN + RETURN i + 1; +END; +$$ LANGUAGE plpgsql; + +set current_schema = 'tt'; + +select mode_b_default(b) from public.a; +select tt.mode_b_default(b) from public.a; +select pg_catalog.mode_b_default(b) from public.a; + +reset current_schema; + +select mode_b_default(b) from a; +select tt.mode_b_default(b) from a; +select pg_catalog.mode_b_default(b) from a; + \c postgres drop database if exists db_default; -- Gitee From 34edf4f22e8effbc545696115ba9d9b82814e3cb Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Mon, 11 Dec 2023 19:30:55 +0800 Subject: [PATCH 111/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dgms=5Fpipe.unpack=5Fm?= =?UTF-8?q?essage=5Fdate()=E5=9C=A8=E6=9C=AC=E5=9C=B0=E7=BC=93=E5=86=B2?= =?UTF-8?q?=E5=8C=BA=E4=B8=AD=E6=8E=A5=E6=94=B6date=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E6=95=B0=E6=8D=AE=E6=9C=89=E8=AF=AF=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/whale/expected/gms_pipe_session_A.out | 25 +++++++++++++++++++ contrib/whale/expected/gms_pipe_session_B.out | 20 +++++++-------- contrib/whale/expected/orafce.out | 14 +++++------ contrib/whale/plugin_orafce/pipe.cpp | 10 +++----- contrib/whale/sql/gms_pipe_session_A.sql | 6 +++++ .../sql_script/plugin_orafce_functions.sql | 4 +-- 6 files changed, 52 insertions(+), 27 deletions(-) diff --git a/contrib/whale/expected/gms_pipe_session_A.out b/contrib/whale/expected/gms_pipe_session_A.out index 5892a2ed8..038d27599 100644 --- a/contrib/whale/expected/gms_pipe_session_A.out +++ b/contrib/whale/expected/gms_pipe_session_A.out @@ -101,6 +101,31 @@ SELECT checkUniqueSessionNameA(); (1 row) +-- test about date +select gms_pipe.pack_message(date '2023-12-6 00:00:00'); + pack_message +-------------- + +(1 row) + +select gms_pipe.send_message('PipeTest'); + send_message +-------------- + 0 +(1 row) + +select gms_pipe.receive_message('PipeTest'); + receive_message +----------------- + 0 +(1 row) + +select gms_pipe.unpack_message_date(); + unpack_message_date +-------------------------- + Wed Dec 06 00:00:00 2023 +(1 row) + DROP FUNCTION createImplicitPipe(); DROP FUNCTION createExplicitPipe(text,integer); DROP FUNCTION createPipe(text,integer); diff --git a/contrib/whale/expected/gms_pipe_session_B.out b/contrib/whale/expected/gms_pipe_session_B.out index 08cd86865..3658fa7d4 100644 --- a/contrib/whale/expected/gms_pipe_session_B.out +++ b/contrib/whale/expected/gms_pipe_session_B.out @@ -7,8 +7,8 @@ -- Receives messages sent via an implicit pipe SELECT receiveFrom('named_pipe'); NOTICE: RECEIVE 11: Message From Session A -NOTICE: RECEIVE 13: Tue Jan 01 00:00:00 2013 PST -NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST +NOTICE: RECEIVE 12: Tue Jan 01 00:00:00 2013 +NOTICE: RECEIVE 12: Tue Jan 01 09:00:00 2013 NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST NOTICE: RECEIVE 9: 12345.6789 NOTICE: RECEIVE 9: 12345 @@ -23,8 +23,8 @@ NOTICE: RECEIVE 24: (2,rob) -- Bulk receive messages SELECT bulkReceive(); NOTICE: RECEIVE 11: Message From Session A -NOTICE: RECEIVE 13: Tue Jan 01 00:00:00 2013 PST -NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST +NOTICE: RECEIVE 12: Tue Jan 01 00:00:00 2013 +NOTICE: RECEIVE 12: Tue Jan 01 09:00:00 2013 NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST NOTICE: RECEIVE 9: 12345.6789 NOTICE: RECEIVE 9: 12345 @@ -46,8 +46,8 @@ SELECT gms_pipe.receive_message('recv_private1_notifier'); SELECT receiveFrom('private_pipe_1'); NOTICE: RECEIVE 11: Message From Session A -NOTICE: RECEIVE 13: Tue Jan 01 00:00:00 2013 PST -NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST +NOTICE: RECEIVE 12: Tue Jan 01 00:00:00 2013 +NOTICE: RECEIVE 12: Tue Jan 01 09:00:00 2013 NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST NOTICE: RECEIVE 9: 12345.6789 NOTICE: RECEIVE 9: 12345 @@ -85,8 +85,8 @@ SELECT gms_pipe.receive_message('recv_public1_notifier'); SELECT receiveFrom('public_pipe_3'); NOTICE: RECEIVE 11: Message From Session A -NOTICE: RECEIVE 13: Tue Jan 01 00:00:00 2013 PST -NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST +NOTICE: RECEIVE 12: Tue Jan 01 00:00:00 2013 +NOTICE: RECEIVE 12: Tue Jan 01 09:00:00 2013 NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST NOTICE: RECEIVE 9: 12345.6789 NOTICE: RECEIVE 9: 12345 @@ -106,8 +106,8 @@ SELECT gms_pipe.receive_message('recv_public2_notifier'); SELECT receiveFrom('public_pipe_4'); NOTICE: RECEIVE 11: Message From Session A -NOTICE: RECEIVE 13: Tue Jan 01 00:00:00 2013 PST -NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST +NOTICE: RECEIVE 12: Tue Jan 01 00:00:00 2013 +NOTICE: RECEIVE 12: Tue Jan 01 09:00:00 2013 NOTICE: RECEIVE 13: Tue Jan 01 09:00:00 2013 PST NOTICE: RECEIVE 9: 12345.6789 NOTICE: RECEIVE 9: 12345 diff --git a/contrib/whale/expected/orafce.out b/contrib/whale/expected/orafce.out index c45c97a66..af20721b5 100644 --- a/contrib/whale/expected/orafce.out +++ b/contrib/whale/expected/orafce.out @@ -637,13 +637,13 @@ select gms_pipe.receive_message('test_date'); select gms_pipe.next_item_type(); next_item_type ---------------- - 13 + 12 (1 row) select gms_pipe.unpack_message_date(); unpack_message_date --------------------- - 2006-10-11 07:00:00 + 2006-10-11 00:00:00 (1 row) select gms_pipe.pack_message(to_timestamp('2008-10-30 01:23:45', 'YYYY-MM-DD HH24:MI:SS')); @@ -667,15 +667,13 @@ select gms_pipe.receive_message('test_timestamp'); select gms_pipe.next_item_type(); next_item_type ---------------- - 13 + 12 (1 row) select to_char(gms_pipe.unpack_message_timestamp(), 'YYYY-MM-DD HH24:MI:SS'); - to_char ---------------------- - 2008-10-30 01:23:45 -(1 row) - +ERROR: datatype mismatch +DETAIL: unpack unexpected type: 12 +CONTEXT: referenced column: to_char select gms_pipe.pack_message(6262626262::numeric); pack_message -------------- diff --git a/contrib/whale/plugin_orafce/pipe.cpp b/contrib/whale/plugin_orafce/pipe.cpp index 37cf353b3..d36733b43 100644 --- a/contrib/whale/plugin_orafce/pipe.cpp +++ b/contrib/whale/plugin_orafce/pipe.cpp @@ -533,7 +533,7 @@ Datum gms_pipe_pack_message_text(PG_FUNCTION_ARGS) Datum gms_pipe_pack_message_date(PG_FUNCTION_ARGS) { - DateADT dt = PG_GETARG_DATEADT(0); + Timestamp dt = PG_GETARG_TIMESTAMP(0); GetSessionContext()->output_buffer = check_buffer(GetSessionContext()->output_buffer, LOCALMSGSZ); pack_field(GetSessionContext()->output_buffer, IT_DATE, sizeof(dt), &dt, InvalidOid); @@ -661,7 +661,7 @@ static Datum gms_pipe_unpack_message(PG_FUNCTION_ARGS, message_data_type dtype) result = TimestampTzGetDatum(*(TimestampTz *)ptr); break; case IT_DATE: - result = DateADTGetDatum(*(DateADT *)ptr); + result = TimestampGetDatum(*(TimestampTz *)ptr); break; case IT_VARCHAR: case IT_NUMBER: @@ -720,11 +720,7 @@ Datum gms_pipe_unpack_message_text(PG_FUNCTION_ARGS) Datum gms_pipe_unpack_message_date(PG_FUNCTION_ARGS) { - if (DB_IS_CMPT(A_FORMAT)) { - return gms_pipe_unpack_message(fcinfo, IT_TIMESTAMPTZ); - } else { - return gms_pipe_unpack_message(fcinfo, IT_DATE); - } + return gms_pipe_unpack_message(fcinfo, IT_DATE); } Datum gms_pipe_unpack_message_timestamp(PG_FUNCTION_ARGS) diff --git a/contrib/whale/sql/gms_pipe_session_A.sql b/contrib/whale/sql/gms_pipe_session_A.sql index b36cd9dba..8e75943f5 100644 --- a/contrib/whale/sql/gms_pipe_session_A.sql +++ b/contrib/whale/sql/gms_pipe_session_A.sql @@ -194,6 +194,12 @@ SELECT notifyDropTemp(); -- tests unique_session_name() SELECT checkUniqueSessionNameA(); +-- test about date +select gms_pipe.pack_message(date '2023-12-6 00:00:00'); +select gms_pipe.send_message('PipeTest'); +select gms_pipe.receive_message('PipeTest'); +select gms_pipe.unpack_message_date(); + DROP FUNCTION createImplicitPipe(); DROP FUNCTION createExplicitPipe(text,integer); DROP FUNCTION createPipe(text,integer); diff --git a/contrib/whale/sql_script/plugin_orafce_functions.sql b/contrib/whale/sql_script/plugin_orafce_functions.sql index d6c91bf8e..2313f0aba 100644 --- a/contrib/whale/sql_script/plugin_orafce_functions.sql +++ b/contrib/whale/sql_script/plugin_orafce_functions.sql @@ -2732,11 +2732,11 @@ AS '$libdir/whale','gms_pipe_remove_pipe' LANGUAGE C VOLATILE STRICT; COMMENT ON FUNCTION gms_pipe.remove_pipe(text) IS 'Destroy pipe'; -CREATE FUNCTION gms_pipe.pack_message(pg_catalog.date) +CREATE FUNCTION gms_pipe.pack_message(date) RETURNS void AS '$libdir/whale','gms_pipe_pack_message_date' LANGUAGE C VOLATILE STRICT; -COMMENT ON FUNCTION gms_pipe.pack_message(pg_catalog.date) IS 'Add date field to message'; +COMMENT ON FUNCTION gms_pipe.pack_message(date) IS 'Add date field to message'; CREATE FUNCTION gms_pipe.unpack_message_date() RETURNS date -- Gitee From 3262ff55de58ad4d70cf76597a6058640081a5e0 Mon Sep 17 00:00:00 2001 From: yuchao Date: Mon, 11 Dec 2023 16:28:10 +0800 Subject: [PATCH 112/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E8=A2=ABtruncate=E5=90=8E=E4=B8=8D=E6=98=BE=E7=A4=BAwarning?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/builtin_funcs/cast.out | 6 + .../dolphin/expected/column_quote_alias.out | 34 +++ .../expected/convert_truncated_warning.out | 223 ++++++++++++++++++ .../dolphin/expected/db_b_new_gram_test.out | 18 ++ contrib/dolphin/expected/db_b_parser3.out | 8 + .../ignore_invalid_input.out | 8 + .../json_operator_test.out | 80 +++++++ .../string_func_test/db_b_compress_test.out | 35 ++- .../string_func_test/db_b_nameconst_test.out | 20 +- contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_utils/adt/numeric.cpp | 87 ++++++- contrib/dolphin/plugin_utils/adt/varchar.cpp | 9 +- .../dolphin/sql/convert_truncated_warning.sql | 28 +++ 13 files changed, 544 insertions(+), 14 deletions(-) create mode 100644 contrib/dolphin/expected/convert_truncated_warning.out create mode 100644 contrib/dolphin/sql/convert_truncated_warning.sql diff --git a/contrib/dolphin/expected/builtin_funcs/cast.out b/contrib/dolphin/expected/builtin_funcs/cast.out index bc8ece2b3..2c99b2ecb 100644 --- a/contrib/dolphin/expected/builtin_funcs/cast.out +++ b/contrib/dolphin/expected/builtin_funcs/cast.out @@ -37,6 +37,8 @@ select cast(cast('2022-11-10 18:03:20'::timestamp as unsigned) as timestamp); (1 row) SELECT CAST('test' AS CHAR CHARACTER SET utf8); +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- t @@ -44,6 +46,8 @@ SELECT CAST('test' AS CHAR CHARACTER SET utf8); SELECT CAST('test' AS CHAR CHARACTER SET not_exist); WARNING: not_exist is not a valid encoding name. default value set +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- t @@ -51,6 +55,8 @@ WARNING: not_exist is not a valid encoding name. default value set SELECT CAST('test' AS nchar CHARACTER SET not_exist); WARNING: not_exist is not a valid encoding name. default value set +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- t diff --git a/contrib/dolphin/expected/column_quote_alias.out b/contrib/dolphin/expected/column_quote_alias.out index e7f0e991b..a1dbbad1f 100644 --- a/contrib/dolphin/expected/column_quote_alias.out +++ b/contrib/dolphin/expected/column_quote_alias.out @@ -11,6 +11,8 @@ select character varying 'keyword_test' from t_alias_case0001_1; (1 row) select character 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- k @@ -47,18 +49,24 @@ select VARCHAR2 'keyword_test' from t_alias_case0001_1; (1 row) select NATIONAL CHARACTER 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- k (1 row) select NATIONAL CHAR 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- k (1 row) select NCHAR 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- k @@ -71,6 +79,8 @@ select NCHAR VARYING 'keyword_test' from t_alias_case0001_1; (1 row) select CHAR 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- k @@ -89,6 +99,8 @@ select TEXT 'keyword_test' from t_alias_case0001_1; (1 row) select character(2) 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(2) +CONTEXT: referenced column: bpchar bpchar -------- ke @@ -101,6 +113,8 @@ select text(2) 'keyword_test' from t_alias_case0001_1; (1 row) select char(1) 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character(1) +CONTEXT: referenced column: bpchar bpchar -------- k @@ -225,30 +239,50 @@ CONTEXT: referenced column: int4 (1 row) select decimal 'keyword_test' from t_alias_case0001_1; +WARNING: invalid input syntax for type numeric: "keyword_test" +LINE 1: select decimal 'keyword_test' from t_alias_case0001_1; + ^ +CONTEXT: referenced column: numeric numeric --------- 0 (1 row) select number 'keyword_test' from t_alias_case0001_1; +WARNING: invalid input syntax for type numeric: "keyword_test" +LINE 1: select number 'keyword_test' from t_alias_case0001_1; + ^ +CONTEXT: referenced column: numeric numeric --------- 0 (1 row) select dec 'keyword_test' from t_alias_case0001_1; +WARNING: invalid input syntax for type numeric: "keyword_test" +LINE 1: select dec 'keyword_test' from t_alias_case0001_1; + ^ +CONTEXT: referenced column: numeric numeric --------- 0 (1 row) select numeric 'keyword_test' from t_alias_case0001_1; +WARNING: invalid input syntax for type numeric: "keyword_test" +LINE 1: select numeric 'keyword_test' from t_alias_case0001_1; + ^ +CONTEXT: referenced column: numeric numeric --------- 0 (1 row) select fixed 'keyword_test' from t_alias_case0001_1; +WARNING: invalid input syntax for type numeric: "keyword_test" +LINE 1: select fixed 'keyword_test' from t_alias_case0001_1; + ^ +CONTEXT: referenced column: numeric numeric --------- 0 diff --git a/contrib/dolphin/expected/convert_truncated_warning.out b/contrib/dolphin/expected/convert_truncated_warning.out new file mode 100644 index 000000000..aa557e1c1 --- /dev/null +++ b/contrib/dolphin/expected/convert_truncated_warning.out @@ -0,0 +1,223 @@ +create schema convert_truncated_warning; +set current_schema = convert_truncated_warning; +set dolphin.sql_mode = default; +set dolphin.b_compatibility_mode = on; +SELECT CONVERT(TIMESTAMP '2004-01-22 21:45:33',CHAR(4)); +WARNING: value too long for type character(4) +CONTEXT: referenced column: bpchar + bpchar +-------- + 2004 +(1 row) + +SELECT CONVERT('1.1', signed); +WARNING: invalid input syntax for type int16: "1.1" +DETAIL: text contain invalid character +CONTEXT: referenced column: int8 + int8 +------ + 1 +(1 row) + +SELECT CONVERT('1.5', signed); +WARNING: invalid input syntax for type int16: "1.5" +DETAIL: text contain invalid character +CONTEXT: referenced column: int8 + int8 +------ + 1 +(1 row) + +SELECT CONVERT('abc', signed); +WARNING: Truncated incorrect INTEGER value: 'abc' +CONTEXT: referenced column: int8 + int8 +------ + 0 +(1 row) + +SELECT CONVERT('1111111111111111111111111111111111111111111', signed); +WARNING: value "1111111111111111111111111111111111111111111" is out of range for type int16 +DETAIL: text exceeds the length of int16 +CONTEXT: referenced column: int8 +WARNING: bigint out of range +CONTEXT: referenced column: int8 + int8 +------ + -1 +(1 row) + +SELECT CONVERT(1111111111111111111111111111111111111111111, signed); +WARNING: bigint out of range +CONTEXT: referenced column: int8 + int8 +--------------------- + 9223372036854775807 +(1 row) + +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', signed); +WARNING: bigint out of range +CONTEXT: referenced column: int8 +WARNING: bigint out of range +CONTEXT: referenced column: int8 + int8 +------ + -1 +(1 row) + +SELECT CONVERT('1.1', unsigned); +WARNING: invalid input syntax for type int16: "1.1" +DETAIL: text contain invalid character +CONTEXT: referenced column: uint8 + uint8 +------- + 1 +(1 row) + +SELECT CONVERT('1.5', unsigned); +WARNING: invalid input syntax for type int16: "1.5" +DETAIL: text contain invalid character +CONTEXT: referenced column: uint8 + uint8 +------- + 1 +(1 row) + +SELECT CONVERT('abc', unsigned); +WARNING: Truncated incorrect INTEGER value: 'abc' +CONTEXT: referenced column: uint8 + uint8 +------- + 0 +(1 row) + +SELECT CONVERT('1111111111111111111111111111111111111111111', unsigned); +WARNING: value "1111111111111111111111111111111111111111111" is out of range for type int16 +DETAIL: text exceeds the length of int16 +CONTEXT: referenced column: uint8 +WARNING: bigint unsigned out of range +CONTEXT: referenced column: uint8 + uint8 +---------------------- + 18446744073709551615 +(1 row) + +SELECT CONVERT(1111111111111111111111111111111111111111111, unsigned); +WARNING: bigint unsigned out of range +CONTEXT: referenced column: uint8 + uint8 +---------------------- + 18446744073709551615 +(1 row) + +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', unsigned); +WARNING: bigint unsigned out of range +CONTEXT: referenced column: uint8 + uint8 +---------------------- + 18446744073709551615 +(1 row) + +SELECT CONVERT('1111111111111111111111111111111111111111111', decimal); +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 0 must round to an absolute value less than 10^10. +CONTEXT: referenced column: numeric + numeric +------------ + 9999999999 +(1 row) + +SELECT CONVERT('1111111111111111111111111111111111111111111', decimal(10, 2)); +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 2 must round to an absolute value less than 10^8. +CONTEXT: referenced column: numeric + numeric +------------- + 99999999.99 +(1 row) + +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal); +WARNING: bigint unsigned out of range +CONTEXT: referenced column: bittouint8 +referenced column: bittouint8 +SQL statement "SELECT (SELECT bittouint8($1))" +PL/pgSQL function bit2numeric(bit) line 3 at RETURN +referenced column: numeric +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 0 must round to an absolute value less than 10^10. +CONTEXT: referenced column: numeric + numeric +------------ + 9999999999 +(1 row) + +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal(10, 2)); +WARNING: bigint unsigned out of range +CONTEXT: referenced column: bittouint8 +referenced column: bittouint8 +SQL statement "SELECT (SELECT bittouint8($1))" +PL/pgSQL function bit2numeric(bit) line 3 at RETURN +referenced column: numeric +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 2 must round to an absolute value less than 10^8. +CONTEXT: referenced column: numeric + numeric +------------- + 99999999.99 +(1 row) + +SELECT CONVERT('.1.', decimal); +WARNING: invalid input syntax for type numeric: ".1." +LINE 1: SELECT CONVERT('.1.', decimal); + ^ +CONTEXT: referenced column: numeric + numeric +--------- + 0 +(1 row) + +SELECT CONVERT('1e', decimal); +WARNING: invalid input syntax for type numeric: "1e" +LINE 1: SELECT CONVERT('1e', decimal); + ^ +CONTEXT: referenced column: numeric + numeric +--------- + 1 +(1 row) + +SELECT CONVERT('1e1001', decimal); +WARNING: invalid input syntax for type numeric: "1e1001" +LINE 1: SELECT CONVERT('1e1001', decimal); + ^ +CONTEXT: referenced column: numeric +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 0 must round to an absolute value less than 10^10. +CONTEXT: referenced column: numeric + numeric +------------ + 9999999999 +(1 row) + +SELECT CONVERT('1e-1001', decimal); +WARNING: invalid input syntax for type numeric: "1e-1001" +LINE 1: SELECT CONVERT('1e-1001', decimal); + ^ +CONTEXT: referenced column: numeric + numeric +--------- + 0 +(1 row) + +SELECT CONVERT('abc', decimal); +WARNING: invalid input syntax for type numeric: "abc" +LINE 1: SELECT CONVERT('abc', decimal); + ^ +CONTEXT: referenced column: numeric + numeric +--------- + 0 +(1 row) + +reset current_schema; +drop schema convert_truncated_warning cascade; diff --git a/contrib/dolphin/expected/db_b_new_gram_test.out b/contrib/dolphin/expected/db_b_new_gram_test.out index ef1a4ff5f..86f45a8c4 100644 --- a/contrib/dolphin/expected/db_b_new_gram_test.out +++ b/contrib/dolphin/expected/db_b_new_gram_test.out @@ -1275,6 +1275,24 @@ select (a.Sno+1) as no,a.Sname as name , (select Degree from Score where Cno= ' --multi var select (a.Sno+a.Sname) as no,a.Sname as name , (select Degree from Score where Cno= '3-105' and S_no=no) from Student as a; +WARNING: invalid input syntax for type numeric: "æŽå†›" +CONTEXT: referenced column: no +WARNING: invalid input syntax for type numeric: "æŽå†›" +CONTEXT: referenced column: Degree +WARNING: invalid input syntax for type numeric: "æŽå†›" +CONTEXT: referenced column: Degree +WARNING: invalid input syntax for type numeric: "陆å›" +CONTEXT: referenced column: no +WARNING: invalid input syntax for type numeric: "陆å›" +CONTEXT: referenced column: Degree +WARNING: invalid input syntax for type numeric: "陆å›" +CONTEXT: referenced column: Degree +WARNING: invalid input syntax for type numeric: "匡明" +CONTEXT: referenced column: no +WARNING: invalid input syntax for type numeric: "匡明" +CONTEXT: referenced column: Degree +WARNING: invalid input syntax for type numeric: "匡明" +CONTEXT: referenced column: Degree no | name | Degree -----+------+-------- 101 | æŽå†› | 64.0 diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index d1eb7e535..0ec8a4f7c 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -534,6 +534,10 @@ select '-12.3abc' div 'null'; WARNING: invalid input syntax for type numeric: "-12.3abc" LINE 1: select '-12.3abc' div 'null'; ^ +CONTEXT: referenced column: div +WARNING: invalid input syntax for type numeric: "null" +LINE 1: select '-12.3abc' div 'null'; + ^ CONTEXT: referenced column: div div ----- @@ -2383,6 +2387,10 @@ WARNING: invalid input syntax for type numeric: "-12.3abc" LINE 1: select '-12.3abc' mod 'null'; ^ CONTEXT: referenced column: b_mod +WARNING: invalid input syntax for type numeric: "null" +LINE 1: select '-12.3abc' mod 'null'; + ^ +CONTEXT: referenced column: b_mod WARNING: division by zero CONTEXT: SQL function "b_mod" statement 1 referenced column: b_mod diff --git a/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out b/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out index 629d79e64..82deaa92f 100644 --- a/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out +++ b/contrib/dolphin/expected/keyword_ignore_test/ignore_invalid_input.out @@ -512,7 +512,15 @@ LINE 1: insert ignore into t_numeric values ('-333331892038097432987... ^ CONTEXT: referenced column: c insert ignore into t_numeric values ('aaa123.12a45'); +WARNING: invalid input syntax for type numeric: "aaa123.12a45" +LINE 1: insert ignore into t_numeric values ('aaa123.12a45'); + ^ +CONTEXT: referenced column: c insert ignore into t_numeric values ('abcde'); +WARNING: invalid input syntax for type numeric: "abcde" +LINE 1: insert ignore into t_numeric values ('abcde'); + ^ +CONTEXT: referenced column: c insert ignore into t_numeric values (''); WARNING: invalid input syntax for numeric: "" LINE 1: insert ignore into t_numeric values (''); diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index 956fda7f2..bb95afd30 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -2615,6 +2615,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`float4/json` = test_j WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: float4/json UPDATE test_json_type, test_json_table SET test_json_type.`float4%json` = test_json_table.`float4` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: float4%json UPDATE test_json_type, test_json_table SET test_json_type.`float4^json` = test_json_table.`float4` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: float4^json @@ -2660,6 +2662,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`float8/json` = test_j WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: float8/json UPDATE test_json_type, test_json_table SET test_json_type.`float8%json` = test_json_table.`float8` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: float8%json UPDATE test_json_type, test_json_table SET test_json_type.`float8^json` = test_json_table.`float8` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: float8^json @@ -2705,6 +2709,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`numeric/json` = test_ WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: numeric/json UPDATE test_json_type, test_json_table SET test_json_type.`numeric%json` = test_json_table.`numeric` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: numeric%json UPDATE test_json_type, test_json_table SET test_json_type.`numeric^json` = test_json_table.`numeric` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: numeric^json @@ -2750,6 +2756,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`bit1/json` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: bit1/json UPDATE test_json_type, test_json_table SET test_json_type.`bit1%json` = test_json_table.`bit1` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: bit1%json UPDATE test_json_type, test_json_table SET test_json_type.`bit1^json` = test_json_table.`bit1` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: bit1^json @@ -2795,6 +2803,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`bit64/json` = test_js WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: bit64/json UPDATE test_json_type, test_json_table SET test_json_type.`bit64%json` = test_json_table.`bit64` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: bit64%json UPDATE test_json_type, test_json_table SET test_json_type.`bit64^json` = test_json_table.`bit64` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: bit64^json @@ -2840,6 +2850,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`boolean/json` = test_ WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: boolean/json UPDATE test_json_type, test_json_table SET test_json_type.`boolean%json` = test_json_table.`boolean` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: boolean%json UPDATE test_json_type, test_json_table SET test_json_type.`boolean^json` = test_json_table.`boolean` ^ test_json_table.`json`; ERROR: failed to find conversion function from boolean to double precision CONTEXT: referenced column: test_json_type @@ -2881,6 +2893,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`date/json` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: date/json UPDATE test_json_type, test_json_table SET test_json_type.`date%json` = test_json_table.`date` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: date%json UPDATE test_json_type, test_json_table SET test_json_type.`date^json` = test_json_table.`date` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: date^json @@ -2926,6 +2940,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`time/json` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: time/json UPDATE test_json_type, test_json_table SET test_json_type.`time%json` = test_json_table.`time` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: time%json UPDATE test_json_type, test_json_table SET test_json_type.`time^json` = test_json_table.`time` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: time^json @@ -2971,6 +2987,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`time(4)/json` = test_ WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: time(4)/json UPDATE test_json_type, test_json_table SET test_json_type.`time(4)%json` = test_json_table.`time(4)` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: time(4)%json UPDATE test_json_type, test_json_table SET test_json_type.`time(4)^json` = test_json_table.`time(4)` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: time(4)^json @@ -3016,6 +3034,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`datetime/json` = test WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: datetime/json UPDATE test_json_type, test_json_table SET test_json_type.`datetime%json` = test_json_table.`datetime` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: datetime%json UPDATE test_json_type, test_json_table SET test_json_type.`datetime^json` = test_json_table.`datetime` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: datetime^json @@ -3067,6 +3087,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`datetime(4)/json` = t WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: datetime(4)/json UPDATE test_json_type, test_json_table SET test_json_type.`datetime(4)%json` = test_json_table.`datetime(4)` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: datetime(4)%json UPDATE test_json_type, test_json_table SET test_json_type.`datetime(4)^json` = test_json_table.`datetime(4)` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: datetime(4)^json @@ -3118,6 +3140,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`timestamp/json` = tes WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: timestamp/json UPDATE test_json_type, test_json_table SET test_json_type.`timestamp%json` = test_json_table.`timestamp` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: timestamp%json UPDATE test_json_type, test_json_table SET test_json_type.`timestamp^json` = test_json_table.`timestamp` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: timestamp^json @@ -3169,6 +3193,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`timestamp(4)/json` = WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: timestamp(4)/json UPDATE test_json_type, test_json_table SET test_json_type.`timestamp(4)%json` = test_json_table.`timestamp(4)` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: timestamp(4)%json UPDATE test_json_type, test_json_table SET test_json_type.`timestamp(4)^json` = test_json_table.`timestamp(4)` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: timestamp(4)^json @@ -3220,6 +3246,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`year/json` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: year/json UPDATE test_json_type, test_json_table SET test_json_type.`year%json` = test_json_table.`year` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: year%json UPDATE test_json_type, test_json_table SET test_json_type.`year^json` = test_json_table.`year` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: year^json @@ -3291,6 +3319,8 @@ CONTEXT: referenced column: char/json UPDATE test_json_type, test_json_table SET test_json_type.`char%json` = test_json_table.`char` % test_json_table.`json`; WARNING: invalid input syntax for type numeric: "1.23a" CONTEXT: referenced column: char%json +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: char%json UPDATE test_json_type, test_json_table SET test_json_type.`char^json` = test_json_table.`char` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: char^json @@ -3366,6 +3396,8 @@ CONTEXT: referenced column: varchar/json UPDATE test_json_type, test_json_table SET test_json_type.`varchar%json` = test_json_table.`varchar` % test_json_table.`json`; WARNING: invalid input syntax for type numeric: "1.23a" CONTEXT: referenced column: varchar%json +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: varchar%json UPDATE test_json_type, test_json_table SET test_json_type.`varchar^json` = test_json_table.`varchar` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: varchar^json @@ -3939,6 +3971,8 @@ CONTEXT: referenced column: text/json UPDATE test_json_type, test_json_table SET test_json_type.`text%json` = test_json_table.`text` % test_json_table.`json`; WARNING: invalid input syntax for type numeric: "1.23a" CONTEXT: referenced column: text%json +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: text%json UPDATE test_json_type, test_json_table SET test_json_type.`text^json` = test_json_table.`text` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: text^json @@ -4004,6 +4038,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`enum_t/json` = test_j WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: enum_t/json UPDATE test_json_type, test_json_table SET test_json_type.`enum_t%json` = test_json_table.`enum_t` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: enum_t%json UPDATE test_json_type, test_json_table SET test_json_type.`enum_t^json` = test_json_table.`enum_t` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: enum_t^json @@ -4065,6 +4101,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`set_t/json` = test_js WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: set_t/json UPDATE test_json_type, test_json_table SET test_json_type.`set_t%json` = test_json_table.`set_t` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: set_t%json UPDATE test_json_type, test_json_table SET test_json_type.`set_t^json` = test_json_table.`set_t` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: set_t^json @@ -4502,6 +4540,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/float4` = test_j WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/float4 UPDATE test_json_type, test_json_table SET test_json_type.`json%float4` = test_json_table.`json` % test_json_table.`float4`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%float4 UPDATE test_json_type, test_json_table SET test_json_type.`json^float4` = test_json_table.`json` ^ test_json_table.`float4`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^float4 @@ -4547,6 +4587,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/float8` = test_j WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/float8 UPDATE test_json_type, test_json_table SET test_json_type.`json%float8` = test_json_table.`json` % test_json_table.`float8`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%float8 UPDATE test_json_type, test_json_table SET test_json_type.`json^float8` = test_json_table.`json` ^ test_json_table.`float8`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^float8 @@ -4592,6 +4634,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/numeric` = test_ WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/numeric UPDATE test_json_type, test_json_table SET test_json_type.`json%numeric` = test_json_table.`json` % test_json_table.`numeric`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%numeric UPDATE test_json_type, test_json_table SET test_json_type.`json^numeric` = test_json_table.`json` ^ test_json_table.`numeric`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^numeric @@ -4637,6 +4681,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/bit1` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/bit1 UPDATE test_json_type, test_json_table SET test_json_type.`json%bit1` = test_json_table.`json` % test_json_table.`bit1`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%bit1 UPDATE test_json_type, test_json_table SET test_json_type.`json^bit1` = test_json_table.`json` ^ test_json_table.`bit1`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^bit1 @@ -4688,6 +4734,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/bit64` = test_js WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/bit64 UPDATE test_json_type, test_json_table SET test_json_type.`json%bit64` = test_json_table.`json` % test_json_table.`bit64`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%bit64 UPDATE test_json_type, test_json_table SET test_json_type.`json^bit64` = test_json_table.`json` ^ test_json_table.`bit64`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^bit64 @@ -4739,6 +4787,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/boolean` = test_ WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/boolean UPDATE test_json_type, test_json_table SET test_json_type.`json%boolean` = test_json_table.`json` % test_json_table.`boolean`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%boolean UPDATE test_json_type, test_json_table SET test_json_type.`json^boolean` = test_json_table.`json` ^ test_json_table.`boolean`; ERROR: failed to find conversion function from boolean to double precision CONTEXT: referenced column: test_json_type @@ -4780,6 +4830,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/date` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/date UPDATE test_json_type, test_json_table SET test_json_type.`json%date` = test_json_table.`json` % test_json_table.`date`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%date UPDATE test_json_type, test_json_table SET test_json_type.`json^date` = test_json_table.`json` ^ test_json_table.`date`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^date @@ -4825,6 +4877,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/time` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/time UPDATE test_json_type, test_json_table SET test_json_type.`json%time` = test_json_table.`json` % test_json_table.`time`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%time UPDATE test_json_type, test_json_table SET test_json_type.`json^time` = test_json_table.`json` ^ test_json_table.`time`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^time @@ -4870,6 +4924,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/time(4)` = test_ WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/time(4) UPDATE test_json_type, test_json_table SET test_json_type.`json%time(4)` = test_json_table.`json` % test_json_table.`time(4)`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%time(4) UPDATE test_json_type, test_json_table SET test_json_type.`json^time(4)` = test_json_table.`json` ^ test_json_table.`time(4)`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^time(4) @@ -4915,6 +4971,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/datetime` = test WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/datetime UPDATE test_json_type, test_json_table SET test_json_type.`json%datetime` = test_json_table.`json` % test_json_table.`datetime`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%datetime UPDATE test_json_type, test_json_table SET test_json_type.`json^datetime` = test_json_table.`json` ^ test_json_table.`datetime`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^datetime @@ -4970,6 +5028,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/datetime(4)` = t WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/datetime(4) UPDATE test_json_type, test_json_table SET test_json_type.`json%datetime(4)` = test_json_table.`json` % test_json_table.`datetime(4)`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%datetime(4) UPDATE test_json_type, test_json_table SET test_json_type.`json^datetime(4)` = test_json_table.`json` ^ test_json_table.`datetime(4)`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^datetime(4) @@ -5025,6 +5085,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/timestamp` = tes WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/timestamp UPDATE test_json_type, test_json_table SET test_json_type.`json%timestamp` = test_json_table.`json` % test_json_table.`timestamp`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%timestamp UPDATE test_json_type, test_json_table SET test_json_type.`json^timestamp` = test_json_table.`json` ^ test_json_table.`timestamp`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^timestamp @@ -5080,6 +5142,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/timestamp(4)` = WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/timestamp(4) UPDATE test_json_type, test_json_table SET test_json_type.`json%timestamp(4)` = test_json_table.`json` % test_json_table.`timestamp(4)`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%timestamp(4) UPDATE test_json_type, test_json_table SET test_json_type.`json^timestamp(4)` = test_json_table.`json` ^ test_json_table.`timestamp(4)`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^timestamp(4) @@ -5135,6 +5199,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/year` = test_jso WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/year UPDATE test_json_type, test_json_table SET test_json_type.`json%year` = test_json_table.`json` % test_json_table.`year`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%year UPDATE test_json_type, test_json_table SET test_json_type.`json^year` = test_json_table.`json` ^ test_json_table.`year`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^year @@ -5204,6 +5270,8 @@ CONTEXT: referenced column: json/char WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: json/char UPDATE test_json_type, test_json_table SET test_json_type.`json%char` = test_json_table.`json` % test_json_table.`char`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%char WARNING: invalid input syntax for type numeric: "1.23a" CONTEXT: referenced column: json%char UPDATE test_json_type, test_json_table SET test_json_type.`json^char` = test_json_table.`json` ^ test_json_table.`char`; @@ -5279,6 +5347,8 @@ CONTEXT: referenced column: json/varchar WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: json/varchar UPDATE test_json_type, test_json_table SET test_json_type.`json%varchar` = test_json_table.`json` % test_json_table.`varchar`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%varchar WARNING: invalid input syntax for type numeric: "1.23a" CONTEXT: referenced column: json%varchar UPDATE test_json_type, test_json_table SET test_json_type.`json^varchar` = test_json_table.`json` ^ test_json_table.`varchar`; @@ -5852,6 +5922,8 @@ CONTEXT: referenced column: json/text WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: json/text UPDATE test_json_type, test_json_table SET test_json_type.`json%text` = test_json_table.`json` % test_json_table.`text`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%text WARNING: invalid input syntax for type numeric: "1.23a" CONTEXT: referenced column: json%text UPDATE test_json_type, test_json_table SET test_json_type.`json^text` = test_json_table.`json` ^ test_json_table.`text`; @@ -5919,6 +5991,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/enum_t` = test_j WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/enum_t UPDATE test_json_type, test_json_table SET test_json_type.`json%enum_t` = test_json_table.`json` % test_json_table.`enum_t`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%enum_t UPDATE test_json_type, test_json_table SET test_json_type.`json^enum_t` = test_json_table.`json` ^ test_json_table.`enum_t`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^enum_t @@ -5980,6 +6054,8 @@ UPDATE test_json_type, test_json_table SET test_json_type.`json/set_t` = test_js WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/set_t UPDATE test_json_type, test_json_table SET test_json_type.`json%set_t` = test_json_table.`json` % test_json_table.`set_t`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%set_t UPDATE test_json_type, test_json_table SET test_json_type.`json^set_t` = test_json_table.`json` ^ test_json_table.`set_t`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^set_t @@ -6049,6 +6125,10 @@ CONTEXT: referenced column: json/json WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json/json UPDATE test_json_type, test_json_table SET test_json_type.`json%json` = test_json_table.`json` % test_json_table.`json`; +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%json +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: json%json UPDATE test_json_type, test_json_table SET test_json_type.`json^json` = test_json_table.`json` ^ test_json_table.`json`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json^json diff --git a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out index 40547c257..4dbf3f114 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out @@ -115,12 +115,16 @@ SELECT HEX(COMPRESS('string for test compress function '::name)); (1 row) SELECT HEX(COMPRESS('string for test compress function '::character(15))); +WARNING: value too long for type character(15) +CONTEXT: referenced column: hex hex -------------------------------------------------------- 0f000000789c2b2e29cacc4b5748cb2f5228492d2e01002f9805df (1 row) SELECT HEX(COMPRESS('string for test compress function '::char(10))); +WARNING: value too long for type character(10) +CONTEXT: referenced column: hex hex ---------------------------------------------- 0a000000789c2b2e29cacc4b5748cb2f020016a403ff @@ -236,10 +240,15 @@ SELECT HEX(COMPRESS(1E+1000)); (1 row) SELECT HEX(COMPRESS(1E+1001)); --ERROR -ERROR: invalid input syntax for type numeric: "1E+1001" +WARNING: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT HEX(COMPRESS(1E+1001)); ^ CONTEXT: referenced column: hex + hex +-------------------------------------------- + ea030000789c33341805a360140c7f000028b4bbe2 +(1 row) + SELECT HEX(COMPRESS(RPAD('a', 1024, 'a'))); hex -------------------------------------------- @@ -395,12 +404,16 @@ SELECT UNCOMPRESS(COMPRESS('string for test compress function '::name)); (1 row) SELECT UNCOMPRESS(COMPRESS('string for test compress function '::character(15))); +WARNING: value too long for type character(15) +CONTEXT: referenced column: uncompress uncompress ----------------- string for test (1 row) SELECT UNCOMPRESS(COMPRESS('string for test compress function '::char(10))); +WARNING: value too long for type character(10) +CONTEXT: referenced column: uncompress uncompress ------------ string for @@ -560,10 +573,17 @@ CONTEXT: referenced column: uncompress (1 row) SELECT UNCOMPRESS(1E+1001); --ERROR -ERROR: invalid input syntax for type numeric: "1E+1001" +WARNING: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT UNCOMPRESS(1E+1001); ^ CONTEXT: referenced column: uncompress +WARNING: ZLIB: Input data corrupted +CONTEXT: referenced column: uncompress + uncompress +------------ + +(1 row) + SELECT UNCOMPRESS(COMPRESS(RPAD('a', 1024, 'a'))); uncompress ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ @@ -729,12 +749,16 @@ SELECT UNCOMPRESSED_LENGTH(COMPRESS('string for test compress function '::name)) (1 row) SELECT UNCOMPRESSED_LENGTH(COMPRESS('string for test compress function '::character(15))); +WARNING: value too long for type character(15) +CONTEXT: referenced column: uncompressed_length uncompressed_length --------------------- 15 (1 row) SELECT UNCOMPRESSED_LENGTH(COMPRESS('string for test compress function '::char(10))); +WARNING: value too long for type character(10) +CONTEXT: referenced column: uncompressed_length uncompressed_length --------------------- 10 @@ -875,10 +899,15 @@ SELECT UNCOMPRESSED_LENGTH(COMPRESS(RPAD('a', 1000000, 'a'))); (1 row) SELECT UNCOMPRESSED_LENGTH(1E+1001); --ERROR -ERROR: invalid input syntax for type numeric: "1E+1001" +WARNING: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT UNCOMPRESSED_LENGTH(1E+1001); ^ CONTEXT: referenced column: uncompressed_length + uncompressed_length +--------------------- + 808464433 +(1 row) + SELECT UNCOMPRESSED_LENGTH(COMPRESS(RPAD('a', 1024, 'a'))); uncompressed_length --------------------- diff --git a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out index 4a564033e..4039b4c9b 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out @@ -237,18 +237,24 @@ SELECT NAME_CONST('test'::clob, 123); (1 row) SELECT NAME_CONST('test', '2023-06-01 00:23:59'::character(15)); +WARNING: value too long for type character(15) +CONTEXT: referenced column: name_const test ----------------- 2023-06-01 00:2 (1 row) SELECT NAME_CONST('2023-06-01 00:23:59'::character(15), 'test'); +WARNING: value too long for type character(15) +CONTEXT: referenced column: name_const 2023-06-01 00:2 ----------------- test (1 row) SELECT NAME_CONST('2023-06-01 00:23:59'::char(10), 'test'); +WARNING: value too long for type character(10) +CONTEXT: referenced column: name_const 2023-06-01 ------------ test @@ -350,10 +356,15 @@ SELECT NAME_CONST(1E+1000, 'test'); (1 row) SELECT NAME_CONST(1E+1001, 'test'); --ERROR -ERROR: invalid input syntax for type numeric: "1E+1001" +WARNING: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT NAME_CONST(1E+1001, 'test'); ^ CONTEXT: referenced column: name_const + 100000000000000000000000000000000000000000000000000000000000000 +----------------------------------------------------------------- + test +(1 row) + SELECT NAME_CONST('test', 1E+1000); test ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -361,10 +372,15 @@ SELECT NAME_CONST('test', 1E+1000); (1 row) SELECT NAME_CONST('test', 1E+1001); --ERROR -ERROR: invalid input syntax for type numeric: "1E+1001" +WARNING: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT NAME_CONST('test', 1E+1001); ^ CONTEXT: referenced column: name_const + test +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(1 row) + SELECT NAME_CONST('test', '9999-99-99 99:99:99'::time); --ERROR WARNING: date/time field value out of range: "9999-99-99 99:99:99" LINE 1: SELECT NAME_CONST('test', '9999-99-99 99:99:99'::time); diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index e5f8cc622..fd70da736 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -9,7 +9,7 @@ test: db_b_parser1 db_b_parser2 db_b_parser3 db_b_parser4 second_microsecond tes test: db_b_plpgsql_test default_guc describe explain_desc kill set_password network test_dayofweek test_timestampn kwlist -test: empty_value_list empty_value_lists empty_value_support_value create_index test_guc_select_and_set test_copy_year2 test_default +test: empty_value_list empty_value_lists empty_value_support_value create_index test_guc_select_and_set test_copy_year2 test_default convert_truncated_warning test: greatest_least join_without_on mysqlmode_fullgroup mysqlmode_strict mysqlmode_strict2 none_strict_warning test_alter_table test_dolphin_catalog diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index a56b4f3f4..a715ac577 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -265,17 +265,22 @@ static void alloc_var(NumericVar* var, int ndigits); static void zero_var(NumericVar* var); static void init_ro_var_from_var(const NumericVar* value, NumericVar* dest); - +#ifdef DOLPHIN +static const char* set_var_from_str(const char* str, const char* cp, NumericVar* dest, bool can_ignore); +#else static const char* set_var_from_str(const char* str, const char* cp, NumericVar* dest); +#endif static void set_var_from_num(Numeric value, NumericVar* dest); static void set_var_from_var(const NumericVar* value, NumericVar* dest); static void init_var_from_var(const NumericVar *value, NumericVar *dest); static char* get_str_from_var(NumericVar* var); static char* output_get_str_from_var(NumericVar* var); static char* get_str_from_var_sci(NumericVar* var, int rscale); - +#ifdef DOLPHIN +static void apply_typmod(NumericVar* var, int32 typmod, bool can_ignore); +#else static void apply_typmod(NumericVar* var, int32 typmod); - +#endif static int32 numericvar_to_int32(const NumericVar* var, bool can_ignore = false); static double numericvar_to_double_no_overflow(NumericVar* var); @@ -504,8 +509,11 @@ Datum numeric_in(PG_FUNCTION_ARGS) NumericVar value; init_var(&value); - +#ifdef DOLPHIN + cp = set_var_from_str(str, cp, &value, fcinfo->can_ignore); +#else cp = set_var_from_str(str, cp, &value); +#endif /* * We duplicate a few lines of code here because we would like to @@ -525,8 +533,11 @@ Datum numeric_in(PG_FUNCTION_ARGS) } cp++; } - +#ifdef DOLPHIN + apply_typmod(&value, typmod, fcinfo->can_ignore); +#else apply_typmod(&value, typmod); +#endif res = make_result(&value); free_var(&value); @@ -803,8 +814,11 @@ Datum numeric_recv(PG_FUNCTION_ARGS) errmsg("invalid digit in external \"numeric\" value"))); value.digits[i] = d; } - +#ifdef DOLPHIN + apply_typmod(&value, typmod, fcinfo->can_ignore); +#else apply_typmod(&value, typmod); +#endif res = make_result(&value); free_var(&value); @@ -963,7 +977,11 @@ Datum numeric(PG_FUNCTION_ARGS) init_var(&var); set_var_from_num(num, &var); +#ifdef DOLPHIN + apply_typmod(&var, typmod, fcinfo->can_ignore); +#else apply_typmod(&var, typmod); +#endif newm = make_result(&var); free_var(&var); @@ -3572,7 +3590,11 @@ Datum float8_numeric(PG_FUNCTION_ARGS) init_var(&result); /* Assume we need not worry about leading/trailing spaces */ +#ifdef DOLPHIN + (void)set_var_from_str(buf, buf, &result, fcinfo->can_ignore); +#else (void)set_var_from_str(buf, buf, &result); +#endif res = make_result(&result); @@ -3649,7 +3671,11 @@ Datum float4_numeric(PG_FUNCTION_ARGS) init_var(&result); /* Assume we need not worry about leading/trailing spaces */ +#ifdef DOLPHIN + (void)set_var_from_str(buf, buf, &result, fcinfo->can_ignore); +#else (void)set_var_from_str(buf, buf, &result); +#endif res = make_result(&result); @@ -4784,7 +4810,11 @@ static void zero_var(NumericVar* var) * cp is the place to actually start parsing; str is what to use in error * reports. (Typically cp would be the same except advanced over spaces.) */ +#ifdef DOLPHIN +static const char* set_var_from_str(const char* str, const char* cp, NumericVar* dest, bool can_ignore) +#else static const char* set_var_from_str(const char* str, const char* cp, NumericVar* dest) +#endif { bool have_dp = FALSE; int i; @@ -4822,6 +4852,11 @@ static const char* set_var_from_str(const char* str, const char* cp, NumericVar* } if (!isdigit((unsigned char)*cp) && u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { +#ifdef DOLPHIN + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type numeric: \"%s\"", str))); +#endif char* cp = (char*)palloc0(sizeof(char)); return cp; } @@ -4847,7 +4882,11 @@ static const char* set_var_from_str(const char* str, const char* cp, NumericVar* dscale++; } else if (*cp == '.') { if (have_dp) +#ifdef DOLPHIN + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, +#else ereport(ERROR, +#endif (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type numeric: \"%s\"", str))); have_dp = TRUE; @@ -4869,12 +4908,20 @@ static const char* set_var_from_str(const char* str, const char* cp, NumericVar* cp++; exponent = strtol(cp, &endptr, 10); if (endptr == cp) +#ifdef DOLPHIN + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, +#else ereport(ERROR, +#endif (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type numeric: \"%s\"", str))); cp = endptr; if (exponent > NUMERIC_MAX_PRECISION || exponent < -NUMERIC_MAX_PRECISION) +#ifdef DOLPHIN + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, +#else ereport(ERROR, +#endif (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type numeric: \"%s\"", str))); dweight += (int)exponent; @@ -5538,7 +5585,11 @@ Numeric makeNumeric(NumericVar* var) * Do bounds checking and rounding according to the attributes * typmod field. */ +#ifdef DOLPHIN +static void apply_typmod(NumericVar* var, int32 typmod, bool can_ignore) +#else static void apply_typmod(NumericVar* var, int32 typmod) +#endif { int precision; int scale; @@ -5588,8 +5639,13 @@ static void apply_typmod(NumericVar* var, int32 typmod) #else #error unsupported NBASE #endif +#ifdef DOLPHIN + if (ddigits > maxdigits) { + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, +#else if (ddigits > maxdigits) ereport(ERROR, +#endif (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("numeric field overflow"), errdetail( @@ -5599,6 +5655,25 @@ static void apply_typmod(NumericVar* var, int32 typmod) /* Display 10^0 as 1 */ maxdigits ? "10^" : "", maxdigits ? maxdigits : 1))); +#ifdef DOLPHIN + errno_t rc; + size_t maxlen = precision + 2; + char str[maxlen] = {}; + while (maxdigits-- > 0) { + rc = strcat_s(str, maxlen, "9"); + securec_check(rc, "\0", "\0"); + } + if (scale > 0) { + rc = strcat_s(str, maxlen, "."); + securec_check(rc, "\0", "\0"); + while (scale-- > 0) { + rc = strcat_s(str, maxlen, "9"); + securec_check(rc, "\0", "\0"); + } + } + (void)set_var_from_str(str, str, var, can_ignore); + } +#endif break; } ddigits -= DEC_DIGITS; diff --git a/contrib/dolphin/plugin_utils/adt/varchar.cpp b/contrib/dolphin/plugin_utils/adt/varchar.cpp index cd050741e..2d0ac8554 100644 --- a/contrib/dolphin/plugin_utils/adt/varchar.cpp +++ b/contrib/dolphin/plugin_utils/adt/varchar.cpp @@ -331,6 +331,7 @@ Datum bpchar_launch(bool can_ignore, BpChar* source, int32 &maxlen, bool isExpli errmsg("value too long for type character(%d)", maxlen))); break; } + } #else for (i = maxmblen; i < len; i++) { if (s[i] != ' ') { @@ -354,8 +355,12 @@ Datum bpchar_launch(bool can_ignore, BpChar* source, int32 &maxlen, bool isExpli } } } -#endif + } else { + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type character(%d)", maxlen))); } +#endif len = maxmblen; @@ -2218,4 +2223,4 @@ Datum bpchar_text(PG_FUNCTION_ARGS) PG_RETURN_DATUM(result); } -#endif \ No newline at end of file +#endif diff --git a/contrib/dolphin/sql/convert_truncated_warning.sql b/contrib/dolphin/sql/convert_truncated_warning.sql new file mode 100644 index 000000000..3cd743e68 --- /dev/null +++ b/contrib/dolphin/sql/convert_truncated_warning.sql @@ -0,0 +1,28 @@ +create schema convert_truncated_warning; +set current_schema = convert_truncated_warning; +set dolphin.sql_mode = default; +set dolphin.b_compatibility_mode = on; +SELECT CONVERT(TIMESTAMP '2004-01-22 21:45:33',CHAR(4)); +SELECT CONVERT('1.1', signed); +SELECT CONVERT('1.5', signed); +SELECT CONVERT('abc', signed); +SELECT CONVERT('1111111111111111111111111111111111111111111', signed); +SELECT CONVERT(1111111111111111111111111111111111111111111, signed); +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', signed); +SELECT CONVERT('1.1', unsigned); +SELECT CONVERT('1.5', unsigned); +SELECT CONVERT('abc', unsigned); +SELECT CONVERT('1111111111111111111111111111111111111111111', unsigned); +SELECT CONVERT(1111111111111111111111111111111111111111111, unsigned); +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', unsigned); +SELECT CONVERT('1111111111111111111111111111111111111111111', decimal); +SELECT CONVERT('1111111111111111111111111111111111111111111', decimal(10, 2)); +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal); +SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal(10, 2)); +SELECT CONVERT('.1.', decimal); +SELECT CONVERT('1e', decimal); +SELECT CONVERT('1e1001', decimal); +SELECT CONVERT('1e-1001', decimal); +SELECT CONVERT('abc', decimal); +reset current_schema; +drop schema convert_truncated_warning cascade; -- Gitee From e3e05ccdcfa7176753c56b9163b226877a56bb5d Mon Sep 17 00:00:00 2001 From: = Date: Fri, 8 Dec 2023 23:14:40 +0800 Subject: [PATCH 113/434] fix I8MPVW rtrim enum value --- contrib/dolphin/Makefile | 2 +- contrib/dolphin/checklist/checklist.md | 1 + .../dolphin/expected/test_enum_collation.out | 8 +- contrib/dolphin/expected/test_mysql_enum.out | 17 + contrib/dolphin/plugin_catalog/Makefile | 2 +- contrib/dolphin/plugin_catalog/pg_enum.cpp | 637 ++++++++++++++++++ contrib/dolphin/sql/test_mysql_enum.sql | 14 + contrib/filelist.txt | 1 + 8 files changed, 676 insertions(+), 6 deletions(-) create mode 100644 contrib/dolphin/plugin_catalog/pg_enum.cpp diff --git a/contrib/dolphin/Makefile b/contrib/dolphin/Makefile index 309b93d6b..919805a5b 100644 --- a/contrib/dolphin/Makefile +++ b/contrib/dolphin/Makefile @@ -72,7 +72,7 @@ OBJS += $(commands)/functioncmds.o $(commands)/foreigncmds.o $(commands)/copy.o OBJS += $(pl)/pl_gram.o $(pl)/pl_scanner.o $(pl)/pl_comp.o $(pl)/pl_handler.o -OBJS += $(catalog)/heap.o +OBJS += $(catalog)/heap.o $(catalog)/pg_enum.o all $(OBJS): write_git_commit protocol parser utils executor vector plan prep optimizer_util commands storage pl catalog; diff --git a/contrib/dolphin/checklist/checklist.md b/contrib/dolphin/checklist/checklist.md index 49945626c..82b0de9da 100644 --- a/contrib/dolphin/checklist/checklist.md +++ b/contrib/dolphin/checklist/checklist.md @@ -130,6 +130,7 @@ |plugin_pl\plpgsql\src\pl_reserved_kwlist.h|src\common\pl\plpgsql\src\pl_reserved_kwlist.h | |plugin_pl\plpgsql\src\pl_unreserved_kwlist.h|src\common\pl\plpgsql\src\pl_unreserved_kwlist.h | |plugin_catalog|heap.cpp |src\common\backend\catalog\heap.cpp | +|plugin_catalog|pg_enum.cpp |src\common\backend\catalog\pg_enum.cpp | |llvmir|openGauss_expr_dolphin_aarch64.ir |src\gausskernel\runtime\codegen\llvmir\GaussDB_expr_aarch64.ir | |llvmir|openGauss_expr_dolphin_aarch64.ll |src\gausskernel\runtime\codegen\llvmir\GaussDB_expr_aarch64.ll | |llvmir|openGauss_expr_dolphin_x86_64.ir |src\gausskernel\runtime\codegen\llvmir\GaussDB_expr.ir | diff --git a/contrib/dolphin/expected/test_enum_collation.out b/contrib/dolphin/expected/test_enum_collation.out index c64fe8d08..9b95ee2a0 100644 --- a/contrib/dolphin/expected/test_enum_collation.out +++ b/contrib/dolphin/expected/test_enum_collation.out @@ -5,11 +5,13 @@ set current_schema = 'test_enum_collation'; drop table if exists test_enum_key1; NOTICE: table "test_enum_key1" does not exist, skipping create table test_enum_key1(a enum('', ' ')); +ERROR: duplicate key value violates unique constraint "pg_enum_typid_label_index" +--?.* create table test_enum_key1(a enum('', ' ') collate utf8_bin); ERROR: enum has duplicate key value "" = " " create table test_enum_key1(a enum('a', 'a ')); -ERROR: relation "test_enum_key1" already exists in schema "test_enum_collation" -DETAIL: creating new table with existing name in the same schema +ERROR: duplicate key value violates unique constraint "pg_enum_typid_label_index" +--?.* create table test_enum_key1(a enum('a', 'a ') collate utf8_bin); ERROR: enum has duplicate key value "a" = "a " create table test_enum_key1(a enum('aaa', 'AAA ') collate utf8_general_ci); @@ -17,8 +19,6 @@ ERROR: enum has duplicate key value "aaa" = "AAA " create table test_enum_key1(a enum('高斯sS', '高斯ŠŠ ') collate utf8_general_ci); ERROR: enum has duplicate key value "高斯sS" = "高斯ŠŠ " create table test_enum_key1(a enum('aaa', 'aaA ') charset 'utf8'); -ERROR: relation "test_enum_key1" already exists in schema "test_enum_collation" -DETAIL: creating new table with existing name in the same schema create table test_enum_key1(a enum('aaa', 'aaA ') charset 'gbk'); ERROR: difference between the charset and the database encoding has not supported create table test_enum_key1(a enum('aaa', 'aaA ') charset 'gb18030'); diff --git a/contrib/dolphin/expected/test_mysql_enum.out b/contrib/dolphin/expected/test_mysql_enum.out index a98514ad6..55a04b293 100644 --- a/contrib/dolphin/expected/test_mysql_enum.out +++ b/contrib/dolphin/expected/test_mysql_enum.out @@ -541,3 +541,20 @@ drop view my_view; drop table t_drop_view; drop schema db_b_new_gram_test3 cascade; reset current_schema; +-- +--test tailing blank for enum value +-- +--expect failure with with duplicate values +create table tabenum(a enum ('a','a ') ); +ERROR: duplicate key value violates unique constraint "pg_enum_typid_label_index" +--?.* +--expect success +create table tabenum(a enum ('a ',' a')); +drop table tabenum; +--expect failure +CREATE TYPE typenum AS ENUM ( 'a', 'a '); +ERROR: duplicate key value violates unique constraint "pg_enum_typid_label_index" +--?.* +--expect success +CREATE TYPE typenum AS ENUM ( 'a', ' a'); +DROP TYPE typenum; diff --git a/contrib/dolphin/plugin_catalog/Makefile b/contrib/dolphin/plugin_catalog/Makefile index 15436625e..65431e6b8 100644 --- a/contrib/dolphin/plugin_catalog/Makefile +++ b/contrib/dolphin/plugin_catalog/Makefile @@ -35,7 +35,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -OBJS = heap.o +OBJS = heap.o pg_enum.o override CPPFLAGS += -D__STDC_FORMAT_MACROS diff --git a/contrib/dolphin/plugin_catalog/pg_enum.cpp b/contrib/dolphin/plugin_catalog/pg_enum.cpp new file mode 100644 index 000000000..5a0db00eb --- /dev/null +++ b/contrib/dolphin/plugin_catalog/pg_enum.cpp @@ -0,0 +1,637 @@ +/* ------------------------------------------------------------------------- + * + * pg_enum.cpp + * routines to support manipulation of the pg_enum relation + * + * Copyright (c) 2006-2012, PostgreSQL Global Development Group + * + * + * IDENTIFICATION + * src/common/backend/catalog/pg_enum.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/genam.h" +#include "access/heapam.h" +#include "access/xact.h" +#include "catalog/catalog.h" +#include "catalog/indexing.h" +#include "catalog/pg_enum.h" +#include "catalog/pg_type.h" +#include "catalog/gs_dependencies_fn.h" +#include "storage/lmgr.h" +#include "miscadmin.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/syscache.h" +#include "utils/snapmgr.h" +#include "utils/lsyscache.h" + +static void RenumberEnumType(Relation pg_enum, HeapTuple* existing, int nelems); +static int sort_order_cmp(const void* p1, const void* p2); + +#define checkEnumLableValue(val) \ + do { \ + if (NAMEDATALEN - 1 < strlen(val) || (0 == strlen(val) && !u_sess->attr.attr_sql.dolphin)) { \ + ereport(ERROR, \ + (errcode(ERRCODE_INVALID_NAME), \ + errmsg("invalid enum label \"%s\"", val), \ + errdetail("Labels must contain 1 to %d characters.", NAMEDATALEN - 1))); \ + } \ + } while (0) + +/* + * EnumValuesCreate + * Create an entry in pg_enum for each of the supplied enum values. + * + * vals is a list of Value strings. + */ +void EnumValuesCreate(Oid enumTypeOid, List* vals, Oid collation) +{ + Relation pg_enum = NULL; + NameData enumlabel; + Oid* oids = NULL; + int elemno; + int num_elems; + Datum values[Natts_pg_enum]; + bool nulls[Natts_pg_enum]; + ListCell* lc = NULL; + HeapTuple tup = NULL; + + num_elems = list_length(vals); + check_duplicate_value_by_collation(vals, collation, TYPTYPE_ENUM); + + /* + * We do not bother to check the list of values for duplicates --- if you + * have any, you'll get a less-than-friendly unique-index violation. It is + * probably not worth trying harder. + */ + + pg_enum = heap_open(EnumRelationId, RowExclusiveLock); + + /* + * Allocate OIDs for the enum's members. + * + * While this method does not absolutely guarantee that we generate no + * duplicate OIDs (since we haven't entered each oid into the table before + * allocating the next), trouble could only occur if the OID counter wraps + * all the way around before we finish. Which seems unlikely. + */ + oids = (Oid*)palloc(num_elems * sizeof(Oid)); + + for (elemno = 0; elemno < num_elems; elemno++) { + /* + * We assign even-numbered OIDs to all the new enum labels. This + * tells the comparison functions the OIDs are in the correct sort + * order and can be compared directly. + */ + Oid new_oid; + + do { + new_oid = GetNewOid(pg_enum); + } while (new_oid & 1); + oids[elemno] = new_oid; + } + + /* sort them, just in case OID counter wrapped from high to low */ + qsort(oids, num_elems, sizeof(Oid), oid_cmp); + + /* and make the entries */ + errno_t rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); + securec_check(rc, "", ""); + + elemno = 0; + foreach (lc, vals) { + char* lab = strVal(lfirst(lc)); + + /* + * labels are stored in a name field, for easier syscache lookup, so + * check the length to make sure it's within range. + */ + checkEnumLableValue(lab); + values[Anum_pg_enum_enumtypid - 1] = ObjectIdGetDatum(enumTypeOid); + values[Anum_pg_enum_enumsortorder - 1] = Float4GetDatum(elemno + 1); +#ifdef DOLPHIN + /* trim the right space for set label */ + Datum trimtxt = DirectFunctionCall1(rtrim1, CStringGetTextDatum(lab)); + (void)namestrcpy(&enumlabel, TextDatumGetCString(trimtxt)); +#else + (void)namestrcpy(&enumlabel, lab); +#endif + values[Anum_pg_enum_enumlabel - 1] = NameGetDatum(&enumlabel); + + tup = heap_form_tuple(RelationGetDescr(pg_enum), values, nulls); + HeapTupleSetOid(tup, oids[elemno]); + + (void)simple_heap_insert(pg_enum, tup); + CatalogUpdateIndexes(pg_enum, tup); + heap_freetuple_ext(tup); + + elemno++; + } + + /* clean up */ + pfree_ext(oids); + heap_close(pg_enum, RowExclusiveLock); +} + +/* + * EnumValuesDelete + * Remove all the pg_enum entries for the specified enum type. + */ +void EnumValuesDelete(Oid enumTypeOid) +{ + GsDependObjDesc refObj; + if (enable_plpgsql_gsdependency_guc()) { + refObj.name = NULL; + if (!OidIsValid(enumTypeOid)) { + gsplsql_get_depend_obj_by_typ_id(&refObj, enumTypeOid, InvalidOid); + } + } + Relation pg_enum = NULL; + ScanKeyData key[1]; + SysScanDesc scan = NULL; + HeapTuple tup = NULL; + + pg_enum = heap_open(EnumRelationId, RowExclusiveLock); + + ScanKeyInit(&key[0], Anum_pg_enum_enumtypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(enumTypeOid)); + + scan = systable_beginscan(pg_enum, EnumTypIdLabelIndexId, true, NULL, 1, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) { + simple_heap_delete(pg_enum, &tup->t_self); + } + + systable_endscan(scan); + + heap_close(pg_enum, RowExclusiveLock); + if (enable_plpgsql_gsdependency_guc() && NULL != refObj.name) { + CommandCounterIncrement(); + (void)gsplsql_remove_ref_dependency(&refObj); + pfree_ext(refObj.schemaName); + pfree_ext(refObj.packageName); + pfree_ext(refObj.name); + } +} + +/* + * AddEnumLabel + * Add a new label to the enum set. By default it goes at + * the end, but the user can choose to place it before or + * after any existing set member. + */ +void AddEnumLabel(Oid enumTypeOid, const char* newVal, const char* neighbor, bool newValIsAfter, bool skipIfExists) +{ + Relation pg_enum = NULL; + Oid newOid; + Datum values[Natts_pg_enum]; + bool nulls[Natts_pg_enum]; + NameData enumlabel; + HeapTuple enum_tup = NULL; + float4 newelemorder; + HeapTuple* existing = NULL; + CatCList* list = NULL; + int nelems; + int i; + bool isExists = false; + + /* check length of new label is ok */ + checkEnumLableValue(newVal); + + /* + * Acquire a lock on the enum type, which we won't release until commit. + * This ensures that two backends aren't concurrently modifying the same + * enum type. Without that, we couldn't be sure to get a consistent view + * of the enum members via the syscache. Note that this does not block + * other backends from inspecting the type; see comments for + * RenumberEnumType. + */ + LockDatabaseObject(TypeRelationId, enumTypeOid, 0, ExclusiveLock); + + /* + * Check if label is already in use. The unique index on pg_enum would + * catch this anyway, but we prefer a friendlier error message, and + * besides we need a check to support IF NOT EXISTS. + */ + isExists = SearchSysCacheExists2(ENUMTYPOIDNAME, ObjectIdGetDatum(enumTypeOid), CStringGetDatum(newVal)); + if (isExists) { + if (skipIfExists) { + ereport(NOTICE, + (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("enum label \"%s\" already exists, skipping", newVal))); + return; + } else { + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("enum label \"%s\" already exists", newVal))); + } + } + + pg_enum = heap_open(EnumRelationId, RowExclusiveLock); + + /* If we have to renumber the existing members, we restart from here */ +restart: + + /* Get the list of existing members of the enum */ + list = SearchSysCacheList1(ENUMTYPOIDNAME, ObjectIdGetDatum(enumTypeOid)); + nelems = list->n_members; + + /* Sort the existing members by enumsortorder */ + if (0 < nelems) { + existing = (HeapTuple*)palloc(nelems * sizeof(HeapTuple)); + for (i = 0; i < nelems; i++) { + /* Sort the existing enum lable */ + existing[i] = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i); + } + qsort(existing, nelems, sizeof(HeapTuple), sort_order_cmp); + } + + if (neighbor == NULL) { + /* + * Put the new label at the end of the list. No change to existing + * tuples is required. + */ + if (nelems > 0) { + Form_pg_enum en = (Form_pg_enum)GETSTRUCT(existing[nelems - 1]); + + newelemorder = en->enumsortorder + 1; + } else + newelemorder = 1; + } else { + /* BEFORE or AFTER was specified */ + int nbr_index; + int other_nbr_index; + Form_pg_enum nbr_en; + Form_pg_enum other_nbr_en; + + /* Locate the neighbor element */ + for (nbr_index = 0; nbr_index < nelems; nbr_index++) { + Form_pg_enum en = (Form_pg_enum)GETSTRUCT(existing[nbr_index]); + + if (strcmp(NameStr(en->enumlabel), neighbor) == 0) + break; + } + + if (nbr_index >= nelems) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"%s\" is not an existing enum label", neighbor))); + } + nbr_en = (Form_pg_enum)GETSTRUCT(existing[nbr_index]); + + /* + * Attempt to assign an appropriate enumsortorder value: one less than + * the smallest member, one more than the largest member, or halfway + * between two existing members. + */ + if (newValIsAfter) + other_nbr_index = nbr_index + 1; + else + other_nbr_index = nbr_index - 1; + + if (other_nbr_index < 0) + newelemorder = nbr_en->enumsortorder - 1; + else if (other_nbr_index >= nelems) + newelemorder = nbr_en->enumsortorder + 1; + else { + /* + * The midpoint value computed here has to be rounded to float4 + * precision, else our equality comparisons against the adjacent + * values are meaningless. The most portable way of forcing that + * to happen with non-C-standard-compliant compilers is to store + * it into a volatile variable. + */ + volatile float4 midpoint; + + other_nbr_en = (Form_pg_enum)GETSTRUCT(existing[other_nbr_index]); + midpoint = (nbr_en->enumsortorder + other_nbr_en->enumsortorder) / 2; + + if (midpoint == nbr_en->enumsortorder || midpoint == other_nbr_en->enumsortorder) { + /* + * In the "halfway" case, because of the finite precision of float4, + * we might compute a value that's actually equal to one or the other + * of its neighbors. In that case we renumber the existing members + * and try again. + */ + RenumberEnumType(pg_enum, existing, nelems); + /* Clean up and start over */ + pfree_ext(existing); + ReleaseSysCacheList(list); + goto restart; + } + + newelemorder = midpoint; + } + } + + /* Get a new OID for the new label */ + if (u_sess->proc_cxt.IsBinaryUpgrade) { + if (!OidIsValid(u_sess->upg_cxt.binary_upgrade_next_pg_enum_oid)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_enum OID value not set when in binary upgrade mode"))); + } + + /* + * Use binary-upgrade override for pg_enum.oid, if supplied. During + * binary upgrade, all pg_enum.oid's are set this way so they are + * guaranteed to be consistent. + */ + if (neighbor != NULL) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("ALTER TYPE ADD BEFORE/AFTER is incompatible with binary upgrade"))); + } + + newOid = u_sess->upg_cxt.binary_upgrade_next_pg_enum_oid; + u_sess->upg_cxt.binary_upgrade_next_pg_enum_oid = InvalidOid; + } else { + /* + * Normal case: we need to allocate a new Oid for the value. + * + * We want to give the new element an even-numbered Oid if it's safe, + * which is to say it compares correctly to all pre-existing even + * numbered Oids in the enum. Otherwise, we must give it an odd Oid. + */ + for (;;) { + bool sorts_ok = false; + + /* Get a new OID (different from all existing pg_enum tuples) */ + newOid = GetNewOid(pg_enum); + + /* + * Detect whether it sorts correctly relative to existing + * even-numbered labels of the enum. We can ignore existing + * labels with odd Oids, since a comparison involving one of those + * will not take the fast path anyway. + */ + sorts_ok = true; + for (i = 0; i < nelems; i++) { + HeapTuple exists_tup = existing[i]; + Form_pg_enum exists_en = (Form_pg_enum)GETSTRUCT(exists_tup); + Oid exists_oid = HeapTupleGetOid(exists_tup); + + if (exists_oid & 1) + continue; /* ignore odd Oids */ + + if (exists_en->enumsortorder < newelemorder) { + /* should sort before */ + if (exists_oid >= newOid) { + sorts_ok = false; + break; + } + } else { + /* should sort after */ + if (exists_oid <= newOid) { + sorts_ok = false; + break; + } + } + } + + if (sorts_ok) { + /* If it's even and sorts OK, we're done. */ + if ((newOid & 1) == 0) + break; + + /* + * If it's odd, and sorts OK, loop back to get another OID and + * try again. Probably, the next available even OID will sort + * correctly too, so it's worth trying. + */ + } else { + /* + * If it's odd, and does not sort correctly, we're done. + * (Probably, the next available even OID would sort + * incorrectly too, so no point in trying again.) + */ + if (newOid & 1) + break; + + /* + * If it's even, and does not sort correctly, loop back to get + * another OID and try again. (We *must* reject this case.) + */ + } + } + } + + /* Done with info about existing members */ + pfree_ext(existing); + ReleaseSysCacheList(list); + + /* Create the new pg_enum entry */ + errno_t rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); + securec_check(rc, "", ""); + + values[Anum_pg_enum_enumtypid - 1] = ObjectIdGetDatum(enumTypeOid); + values[Anum_pg_enum_enumsortorder - 1] = Float4GetDatum(newelemorder); + (void)namestrcpy(&enumlabel, newVal); + values[Anum_pg_enum_enumlabel - 1] = NameGetDatum(&enumlabel); + enum_tup = heap_form_tuple(RelationGetDescr(pg_enum), values, nulls); + HeapTupleSetOid(enum_tup, newOid); + (void)simple_heap_insert(pg_enum, enum_tup); + CatalogUpdateIndexes(pg_enum, enum_tup); + heap_freetuple_ext(enum_tup); + + heap_close(pg_enum, RowExclusiveLock); + if (enable_plpgsql_gsdependency_guc()) { + CommandCounterIncrement(); + (void)gsplsql_build_ref_type_dependency(enumTypeOid); + } +} + +/* + * RenameEnumLabel + * Rename a label in an enum set. + */ +void RenameEnumLabel(Oid enumTypeOid, const char* oldVal, const char* newVal) +{ + Relation pg_enum = NULL; + HeapTuple enum_tup = NULL; + Form_pg_enum en = NULL; + CatCList* list = NULL; + int nelems; + HeapTuple old_tup = NULL; + bool found_new = false; + int i; + + /* check length of new label is ok */ + checkEnumLableValue(newVal); + checkEnumLableValue(oldVal); + + if (enable_plpgsql_gsdependency_guc() && gsplsql_is_object_depend(enumTypeOid, GSDEPEND_OBJECT_TYPE_TYPE)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The rename operator on %s is not allowed, " + "because it is dependent on another object.", get_typename(enumTypeOid)))); + } + /* + * Acquire a lock on the enum type, which we won't release until commit. + * This ensures that two backends aren't concurrently modifying the same + * enum type. Since we are not changing the type's sort order, this is + * probably not really necessary, but there seems no reason not to take + * the lock to be sure. + */ + LockDatabaseObject(TypeRelationId, enumTypeOid, 0, ExclusiveLock); + pg_enum = heap_open(EnumRelationId, RowExclusiveLock); + + /* Get the list of existing members of the enum */ + list = SearchSysCacheList1(ENUMTYPOIDNAME, ObjectIdGetDatum(enumTypeOid)); + nelems = list->n_members; + + /* + * Locate the element to rename and check if the new label is already in + * use. (The unique index on pg_enum would catch that anyway, but we + * prefer a friendlier error message.) + */ + for (i = 0; i < nelems; i++) { + enum_tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i); + en = (Form_pg_enum)GETSTRUCT(enum_tup); + + if (strcmp(NameStr(en->enumlabel), oldVal) == 0) + old_tup = enum_tup; + if (strcmp(NameStr(en->enumlabel), newVal) == 0) + found_new = true; + } + + if (!old_tup) { + ReleaseSysCacheList(list); + heap_close(pg_enum, RowExclusiveLock); + + ereport( + ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"%s\" is not an existing enum label", oldVal))); + } + + if (found_new) { + ReleaseSysCacheList(list); + heap_close(pg_enum, RowExclusiveLock); + + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("enum label \"%s\" already exists", newVal))); + } + + /* OK, make a writable copy of old tuple */ + enum_tup = heap_copytuple(old_tup); + en = (Form_pg_enum)GETSTRUCT(enum_tup); + + ReleaseSysCacheList(list); + + /* Update the pg_enum entry */ + (void)namestrcpy(&en->enumlabel, newVal); + simple_heap_update(pg_enum, &enum_tup->t_self, enum_tup); + CatalogUpdateIndexes(pg_enum, enum_tup); + heap_freetuple_ext(enum_tup); + + heap_close(pg_enum, RowExclusiveLock); +} + +/* + * RenumberEnumType + * Renumber existing enum elements to have sort positions 1..n. + * + * We avoid doing this unless absolutely necessary; in most installations + * it will never happen. The reason is that updating existing pg_enum + * entries creates hazards for other backends that are concurrently reading + * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could + * see both old and new versions of an updated row as valid, or neither of + * them, if the commit happens between scanning the two versions. It's + * also quite likely for a concurrent scan to see an inconsistent set of + * rows (some members updated, some not). + * + * We can avoid these risks by reading pg_enum with an MVCC snapshot + * instead of SnapshotNow, but that forecloses use of the syscaches. + * We therefore make the following choices: + * + * 1. Any code that is interested in the enumsortorder values MUST read + * pg_enum with an MVCC snapshot, or else acquire lock on the enum type + * to prevent concurrent execution of AddEnumLabel(). The risk of + * seeing inconsistent values of enumsortorder is too high otherwise. + * + * 2. Code that is not examining enumsortorder can use a syscache + * (for example, enum_in and enum_out do so). The worst that can happen + * is a transient failure to find any valid value of the row. This is + * judged acceptable in view of the infrequency of use of RenumberEnumType. + */ +static void RenumberEnumType(Relation pg_enum, HeapTuple* existing, int nelems) +{ + int i; + + /* + * We should only need to increase existing elements' enumsortorders, + * never decrease them. Therefore, work from the end backwards, to avoid + * unwanted uniqueness violations. + */ + for (i = nelems - 1; i >= 0; i--) { + HeapTuple newtup; + Form_pg_enum en; + float4 newsortorder; + + newtup = heap_copytuple(existing[i]); + if ((newtup == NULL) || (newtup->t_data == NULL)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for existing[%d]!", i))); + } + en = (Form_pg_enum)GETSTRUCT(newtup); + + newsortorder = i + 1; + if (en->enumsortorder != newsortorder) { + en->enumsortorder = newsortorder; + + simple_heap_update(pg_enum, &newtup->t_self, newtup); + + CatalogUpdateIndexes(pg_enum, newtup); + } + + heap_freetuple_ext(newtup); + } + + /* Make the updates visible */ + CommandCounterIncrement(); +} + +/* qsort comparison function for tuples by sort order */ +static int sort_order_cmp(const void* p1, const void* p2) +{ + HeapTuple v1 = *((const HeapTuple*)p1); + HeapTuple v2 = *((const HeapTuple*)p2); + Form_pg_enum en1 = (Form_pg_enum)GETSTRUCT(v1); + Form_pg_enum en2 = (Form_pg_enum)GETSTRUCT(v2); + + if (en1->enumsortorder < en2->enumsortorder) + return -1; + else if (en1->enumsortorder > en2->enumsortorder) + return 1; + else + return 0; +} + +char* SerializeEnumAttr(Oid enumTypeOid) +{ + if (!type_is_enum(enumTypeOid)) { + return NULL; + } + CatCList* list = SearchSysCacheList1(ENUMTYPOIDNAME, ObjectIdGetDatum(enumTypeOid)); + if (list == NULL) { + return NULL; + } + if (0 == list->n_members) { + ReleaseSysCacheList(list); + return NULL; + } + HeapTuple* enumList = (HeapTuple*)palloc(list->n_members * sizeof(HeapTuple)); + for (int i = 0; i < list->n_members; i++) { + enumList[i] = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i); + } + qsort(enumList, list->n_members, sizeof(HeapTuple), sort_order_cmp); + StringInfoData concatName; + initStringInfo(&concatName); + for (int i = 0; i < list->n_members; i++) { + Form_pg_enum en = (Form_pg_enum)GETSTRUCT(enumList[i]); + appendStringInfoString(&concatName, NameStr(en->enumlabel)); + appendStringInfoString(&concatName, ","); + } + pfree_ext(enumList); + ReleaseSysCacheList(list); + char* ret = pstrdup(concatName.data); + FreeStringInfo(&concatName); + return ret; +} diff --git a/contrib/dolphin/sql/test_mysql_enum.sql b/contrib/dolphin/sql/test_mysql_enum.sql index 72f19a232..2a1e1656e 100644 --- a/contrib/dolphin/sql/test_mysql_enum.sql +++ b/contrib/dolphin/sql/test_mysql_enum.sql @@ -262,3 +262,17 @@ drop view my_view; drop table t_drop_view; drop schema db_b_new_gram_test3 cascade; reset current_schema; + +-- +--test tailing blank for enum value +-- +--expect failure with with duplicate values +create table tabenum(a enum ('a','a ') ); +--expect success +create table tabenum(a enum ('a ',' a')); +drop table tabenum; +--expect failure +CREATE TYPE typenum AS ENUM ( 'a', 'a '); +--expect success +CREATE TYPE typenum AS ENUM ( 'a', ' a'); +DROP TYPE typenum; diff --git a/contrib/filelist.txt b/contrib/filelist.txt index c74440bb0..89bb0da6a 100644 --- a/contrib/filelist.txt +++ b/contrib/filelist.txt @@ -171,6 +171,7 @@ partitionfuncs.h password.cpp password.h pg_builtin_proc.cpp +pg_enum.cpp pg_locale.cpp pg_wchar.h pgstatfuncs.cpp -- Gitee From db1becb3ad2317ccc0e9c2972aef3127b15587c5 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 4 Dec 2023 15:49:48 +0800 Subject: [PATCH 114/434] Fix ignore bug. --- .../ignore_not_null_constraints.out | 69 ++++--- contrib/dolphin/plugin_postgres.cpp | 187 +++++++++++++++++- contrib/dolphin/plugin_utils/adt/varlena.cpp | 32 ++- .../ignore_not_null_constraints.sql | 16 +- 4 files changed, 268 insertions(+), 36 deletions(-) diff --git a/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out b/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out index bd908a8d6..88656a7a8 100644 --- a/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out +++ b/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out @@ -648,27 +648,27 @@ select * from t_jsonb where c::text = 'null'; (2 rows) -- bit -create table t_bit(c bit not null); -insert ignore into t_bit values (null); +create table t_bit(c bit not null, d bit(10) not null); +insert ignore into t_bit values (null, null); WARNING: null value in column "c" violates not-null constraint -DETAIL: Failing row contains (null). +DETAIL: Failing row contains (null, null). select * from t_bit; - c ---- - + c | d +---+------------ + 0 | 0000000000 (1 row) -insert into t_bit values('1'); -update ignore t_bit set c = null; +insert into t_bit values(b'1', b'111'); +update ignore t_bit set c = null, d = null; WARNING: null value in column "c" violates not-null constraint -DETAIL: Failing row contains (null). +DETAIL: Failing row contains (null, null). WARNING: null value in column "c" violates not-null constraint -DETAIL: Failing row contains (null). +DETAIL: Failing row contains (null, null). select * from t_bit; - c ---- - - + c | d +---+------------ + 0 | 0000000000 + 0 | 0000000000 (2 rows) -- tinyint @@ -894,9 +894,9 @@ insert ignore into t_charn values (null); WARNING: null value in column "c" violates not-null constraint DETAIL: Failing row contains (null). select * from t_charn; - c ---- - + c +-------- + (1 row) insert into t_charn values('abc'); @@ -906,10 +906,10 @@ DETAIL: Failing row contains (null). WARNING: null value in column "c" violates not-null constraint DETAIL: Failing row contains (null). select * from t_charn; - c ---- - - + c +-------- + + (2 rows) -- varchar(n) @@ -960,6 +960,30 @@ select * from t_text; (2 rows) +-- binary +create table t_binaryn(c binary(6) not null); +insert ignore into t_binaryn values (null); +WARNING: null value in column "c" violates not-null constraint +DETAIL: Failing row contains (null). +select *,hex(c) from t_binaryn; + c | hex +----------------+-------------- + \x000000000000 | 000000000000 +(1 row) + +insert into t_binaryn values(b'01'); +update ignore t_binaryn set c = null; +WARNING: null value in column "c" violates not-null constraint +DETAIL: Failing row contains (null). +WARNING: null value in column "c" violates not-null constraint +DETAIL: Failing row contains (null). +select *,hex(c) from t_binaryn; + c | hex +----------------+-------------- + \x000000000000 | 000000000000 + \x000000000000 | 000000000000 +(2 rows) + -- mixture drop table if exists t_mix; NOTICE: table "t_mix" does not exist, skipping @@ -1541,7 +1565,7 @@ drop table if exists t_ignore; reset dolphin.sql_mode; -- restore context drop schema sql_ignore_not_null_test cascade; -NOTICE: drop cascades to 32 other objects +NOTICE: drop cascades to 33 other objects DETAIL: drop cascades to table t_from drop cascades to table t_timestamp drop cascades to table t_timetz @@ -1572,6 +1596,7 @@ drop cascades to table t_bool drop cascades to table t_charn drop cascades to table t_varcharn drop cascades to table t_text +drop cascades to table t_binaryn drop cascades to table t_not_null_key_partition drop cascades to table ignore_range_range reset current_schema; diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index 4db31d0ec..42d42351e 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -43,6 +43,7 @@ #include "catalog/gs_db_privilege.h" #include "catalog/pg_extension.h" #include "catalog/pg_operator.h" +#include "catalog/pg_attribute.h" #include "executor/spi_priv.h" #include "tcop/utility.h" #include "gs_ledger/ledger_utils.h" @@ -51,6 +52,13 @@ #include "utils/lsyscache.h" #include "utils/acl.h" #include "utils/knl_catcache.h" +#include "plugin_utils/date.h" +#include "utils/nabstime.h" +#include "utils/geo_decls.h" +#include "utils/varbit.h" +#include "utils/json.h" +#include "utils/jsonb.h" +#include "utils/xml.h" #include "pgxc/groupmgr.h" #include "libpq/pqformat.h" #include "optimizer/nodegroups.h" @@ -191,8 +199,10 @@ extern "C" DLL_PUBLIC void _PG_fini(void); PG_FUNCTION_INFO_V1_PUBLIC(dolphin_types); extern "C" DLL_PUBLIC Datum dolphin_types(); +extern "C" Datum dolphin_binaryin(PG_FUNCTION_ARGS); static void InitDolphinTypeId(BSqlPluginContext* cxt); static void InitDolphinOperator(BSqlPluginContext* cxt); +static Datum DolphinGetTypeZeroValue(Form_pg_attribute att_tup); PG_FUNCTION_INFO_V1_PUBLIC(dolphin_invoke); void dolphin_invoke(void) @@ -303,8 +313,9 @@ void init_plugin_object() u_sess->hook_cxt.coreYYlexHook = (void*)core_yylex; u_sess->hook_cxt.pluginProcDestReciverHook = (void*)CreateSqlProcSpiDestReciver; u_sess->hook_cxt.pluginSpiReciverParamHook = (void*)SetSqlProcSpiStmtParams; - u_sess->hook_cxt.pluginSpiExecuteMultiResHook =(void*)SpiIsExecMultiSelect; - u_sess->hook_cxt.pluginMultiResExceptionHook =(void*)SpiMultiSelectException; + u_sess->hook_cxt.pluginSpiExecuteMultiResHook = (void*)SpiIsExecMultiSelect; + u_sess->hook_cxt.pluginMultiResExceptionHook = (void*)SpiMultiSelectException; + u_sess->hook_cxt.getTypeZeroValueHook = (void*)DolphinGetTypeZeroValue; set_default_guc(); if (g_instance.attr.attr_network.enable_dolphin_proto && u_sess->proc_cxt.MyProcPort && @@ -1424,3 +1435,175 @@ static void InitDolphinOperator(BSqlPluginContext* cxt) cxt->dolphin_oprs[AEXPR_DIV_INT8][UINT_OP] = NUMERICDIVOID; InitUintOprs(cxt); } + +/* copy from openGauss-server's execUtils.cpp GetTypeZeroValue */ +static Datum DolphinGetTypeZeroValue(Form_pg_attribute att_tup) +{ + Datum result; + switch (att_tup->atttypid) { + case TIMESTAMPOID: { + result = (Datum)DirectFunctionCall3(timestamp_in, CStringGetDatum("now"), ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1)); + break; + } + case TIMESTAMPTZOID: { + result = clock_timestamp(NULL); + break; + } + case TIMETZOID: { + result = (Datum)DirectFunctionCall3( + timetz_in, CStringGetDatum("00:00:00"), ObjectIdGetDatum(0), Int32GetDatum(-1)); + break; + } + case INTERVALOID: { + result = (Datum)DirectFunctionCall3( + interval_in, CStringGetDatum("00:00:00"), ObjectIdGetDatum(0), Int32GetDatum(-1)); + break; + } + case TINTERVALOID: { + Datum epoch = (Datum)DirectFunctionCall1(timestamp_abstime, (TimestampGetDatum(SetEpochTimestamp()))); + result = (Datum)DirectFunctionCall2(mktinterval, epoch, epoch); + break; + } + case SMALLDATETIMEOID: { + result = (Datum)DirectFunctionCall3( + smalldatetime_in, CStringGetDatum("1970-01-01 00:00:00"), ObjectIdGetDatum(0), Int32GetDatum(-1)); + break; + } + case DATEOID: { + result = timestamp2date(SetEpochTimestamp()); + break; + } + case UUIDOID: { + result = (Datum)DirectFunctionCall3(uuid_in, CStringGetDatum("00000000-0000-0000-0000-000000000000"), + ObjectIdGetDatum(0), Int32GetDatum(-1)); + break; + } + case NAMEOID: { + result = (Datum)DirectFunctionCall1(namein, CStringGetDatum("")); + break; + } + case POINTOID: { + result = (Datum)DirectFunctionCall1(point_in, CStringGetDatum("(0,0)")); + break; + } + case PATHOID: { + result = (Datum)DirectFunctionCall1(path_in, CStringGetDatum("0,0")); + break; + } + case POLYGONOID: { + result = (Datum)DirectFunctionCall1(poly_in, CStringGetDatum("(0,0)")); + break; + } + case CIRCLEOID: { + result = (Datum)DirectFunctionCall1(circle_in, CStringGetDatum("0,0,0")); + break; + } + case LSEGOID: + case BOXOID: { + result = (Datum)DirectFunctionCall1(box_in, CStringGetDatum("0,0,0,0")); + break; + } + case JSONOID: { + result = (Datum)DirectFunctionCall1(json_in, CStringGetDatum("null")); + break; + } + case JSONBOID: { + result = (Datum)DirectFunctionCall1(jsonb_in, CStringGetDatum("null")); + break; + } + case XMLOID: { + result = (Datum)DirectFunctionCall1(xml_in, CStringGetDatum("null")); + break; + } + case BITOID: { + result = (Datum)DirectFunctionCall3(bit_in, CStringGetDatum(""), ObjectIdGetDatum(0), Int32GetDatum(-1)); + /* for dolphin, use att_tup's typmod, to do extra padding */ + result = (Datum)DirectFunctionCall2(bit, result, Int32GetDatum(att_tup->atttypmod)); + break; + } + case VARBITOID: { + result = (Datum)DirectFunctionCall3(varbit_in, CStringGetDatum(""), ObjectIdGetDatum(0), Int32GetDatum(-1)); + break; + } + case NUMERICOID: { + result = + (Datum)DirectFunctionCall3(numeric_in, CStringGetDatum("0"), ObjectIdGetDatum(0), Int32GetDatum(0)); + break; + } + case CIDROID: { + result = DirectFunctionCall1(cidr_in, CStringGetDatum("0.0.0.0")); + break; + } + case INETOID: { + result = DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0")); + break; + } + case MACADDROID: { + result = (Datum)DirectFunctionCall1(macaddr_in, CStringGetDatum("00:00:00:00:00:00")); + break; + } + case NUMRANGEOID: + case INT8RANGEOID: + case INT4RANGEOID: { + Type targetType = typeidType(att_tup->atttypid); + result = stringTypeDatum(targetType, "(0,0)", att_tup->atttypmod, true); + ReleaseSysCache(targetType); + break; + } + case TSRANGEOID: + case TSTZRANGEOID: { + Type targetType = typeidType(att_tup->atttypid); + result = stringTypeDatum(targetType, "(1970-01-01 00:00:00,1970-01-01 00:00:00)", att_tup->atttypmod, true); + ReleaseSysCache(targetType); + break; + } + case DATERANGEOID: { + Type targetType = typeidType(att_tup->atttypid); + result = stringTypeDatum(targetType, "(1970-01-01,1970-01-01)", att_tup->atttypmod, true); + ReleaseSysCache(targetType); + break; + } + case HASH16OID: { + Type targetType = typeidType(att_tup->atttypid); + result = stringTypeDatum(targetType, "0", att_tup->atttypmod, true); + ReleaseSysCache(targetType); + break; + } + case HASH32OID: { + Type targetType = typeidType(att_tup->atttypid); + result = stringTypeDatum(targetType, "00000000000000000000000000000000", att_tup->atttypmod, true); + ReleaseSysCache(targetType); + break; + } + case TSVECTOROID: { + Type targetType = typeidType(att_tup->atttypid); + result = stringTypeDatum(targetType, "", att_tup->atttypmod, true); + ReleaseSysCache(targetType); + break; + } + case BPCHAROID: { + /* for dolphin, bpchar should use att_tup's typmod, to do extea padding */ + result = (Datum)DirectFunctionCall3(bpcharin, CStringGetDatum(""), + ObjectIdGetDatum(0), Int32GetDatum(att_tup->atttypmod)); + break; + } + default: { + if (att_tup->atttypid == BINARYOID) { + /* binary should use att_tup's typmod, to do extra padding */ + result = (Datum)DirectFunctionCall3(dolphin_binaryin, CStringGetDatum(""), + ObjectIdGetDatum(0), Int32GetDatum(att_tup->atttypmod)); + break; + } + bool typeIsVarlena = (!att_tup->attbyval) && (att_tup->attlen == -1); + if (typeIsVarlena) { + result = CStringGetTextDatum(""); + } else { + result = (Datum)0; + } + break; + } + } + return result; +} + diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index c73669dc3..8cfd4b33f 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -684,6 +684,7 @@ extern "C" DLL_PUBLIC Datum dolphin_binaryin(PG_FUNCTION_ARGS); Datum dolphin_binaryin(PG_FUNCTION_ARGS) { char* inputText = PG_GETARG_CSTRING(0); + int32 atttypmod = PG_NARGS() == 3 ? PG_GETARG_INT32(2) : -1; char* tp = NULL; char* rp = NULL; int bc; @@ -693,12 +694,19 @@ Datum dolphin_binaryin(PG_FUNCTION_ARGS) /* Recognize hex input */ if (inputText[0] == '\\' && inputText[1] == 'x') { size_t len = strlen(inputText); + if (atttypmod < VARHDRSZ) { + bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */ + result = (bytea*)palloc(bc); + } else { + if (len > (size_t)(atttypmod - VARHDRSZ)) { + ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type binary(%d)", atttypmod - VARHDRSZ))); + } + result = (bytea*)palloc0(atttypmod); /* palloc0, pad with zero */ + } - bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */ - result = (bytea*)palloc(bc); bc = hex_decode(inputText + 2, len - 2, VARDATA(result)); - SET_VARSIZE(result, bc + VARHDRSZ); /* actual length */ - + SET_VARSIZE(result, atttypmod < VARHDRSZ ? bc + VARHDRSZ : atttypmod); PG_RETURN_BYTEA_P(result); } @@ -732,10 +740,18 @@ Datum dolphin_binaryin(PG_FUNCTION_ARGS) } } - bc += VARHDRSZ; - - result = (bytea*)palloc(bc); - SET_VARSIZE(result, bc); + if (atttypmod < VARHDRSZ) { + bc += VARHDRSZ; + result = (bytea*)palloc(bc); + SET_VARSIZE(result, bc); + } else { + if (bc > atttypmod - VARHDRSZ) { + ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type binary(%d)", atttypmod - VARHDRSZ))); + } + result = (bytea*)palloc0(atttypmod); /* palloc0, pad with zero */ + SET_VARSIZE(result, atttypmod); + } tp = inputText; rp = VARDATA(result); diff --git a/contrib/dolphin/sql/keyword_ignore_test/ignore_not_null_constraints.sql b/contrib/dolphin/sql/keyword_ignore_test/ignore_not_null_constraints.sql index 42833e37c..33144b26e 100644 --- a/contrib/dolphin/sql/keyword_ignore_test/ignore_not_null_constraints.sql +++ b/contrib/dolphin/sql/keyword_ignore_test/ignore_not_null_constraints.sql @@ -210,11 +210,11 @@ select * from t_jsonb; select * from t_jsonb where c::text = 'null'; -- bit -create table t_bit(c bit not null); -insert ignore into t_bit values (null); +create table t_bit(c bit not null, d bit(10) not null); +insert ignore into t_bit values (null, null); select * from t_bit; -insert into t_bit values('1'); -update ignore t_bit set c = null; +insert into t_bit values(b'1', b'111'); +update ignore t_bit set c = null, d = null; select * from t_bit; -- tinyint @@ -314,6 +314,14 @@ insert into t_text values('xxxxxx'); update ignore t_text set c = null; select * from t_text; +-- binary +create table t_binaryn(c binary(6) not null); +insert ignore into t_binaryn values (null); +select *,hex(c) from t_binaryn; +insert into t_binaryn values(b'01'); +update ignore t_binaryn set c = null; +select *,hex(c) from t_binaryn; + -- mixture drop table if exists t_mix; create table t_mix -- Gitee From de2e130362b96fcf230ba94bf50e252535c7dcd1 Mon Sep 17 00:00:00 2001 From: totaj Date: Tue, 12 Dec 2023 11:18:42 +0800 Subject: [PATCH 115/434] Fix json_objectagg bug. --- contrib/dolphin/expected/json_objectagg.out | 49 +++++++++++++++++++ .../dolphin/plugin_utils/adt/jsonfuncs.cpp | 32 +++++++----- contrib/dolphin/sql/json_objectagg.sql | 10 ++++ 3 files changed, 79 insertions(+), 12 deletions(-) diff --git a/contrib/dolphin/expected/json_objectagg.out b/contrib/dolphin/expected/json_objectagg.out index 32cf63b46..5df884b4e 100644 --- a/contrib/dolphin/expected/json_objectagg.out +++ b/contrib/dolphin/expected/json_objectagg.out @@ -141,6 +141,55 @@ select json_objectagg(a, b) from json_table; {"{\"a\": \"b\"}": "Json", "{\"age\": 20, \"name\": \"Jim\"}": "Text", "{\"Number\": 123, \"Bool\": false}": null} (1 row) +-- special type, type typcategory is N(numeric), but out result is string actually +select pg_catalog.json_objectagg(1, cast(1247 as regclass)); + json_objectagg +------------------ + {"1": "pg_type"} +(1 row) + +select pg_catalog.json_objectagg(1, cast(598 as regproc)); + json_objectagg +---------------------------- + {"1": "pg_catalog.abbrev"} +(1 row) + +select pg_catalog.json_objectagg(1, cast(15 as regoperator)); + json_objectagg +---------------------------- + {"1": "=(integer,bigint)"} +(1 row) + +select pg_catalog.json_objectagg(1, cast(15 as regoper)); + json_objectagg +----------------------- + {"1": "pg_catalog.="} +(1 row) + +select pg_catalog.json_objectagg(1, cast(598 as regprocedure)); + json_objectagg +----------------------- + {"1": "abbrev(inet)"} +(1 row) + +select pg_catalog.json_objectagg(1, cast(23 as regtype)); + json_objectagg +------------------ + {"1": "integer"} +(1 row) + +select pg_catalog.json_objectagg(1, cast(3748 as regconfig)); + json_objectagg +----------------- + {"1": "simple"} +(1 row) + +select pg_catalog.json_objectagg(1, cast(3765 as regdictionary)); + json_objectagg +----------------- + {"1": "simple"} +(1 row) + drop schema json_objectagg_test cascade; NOTICE: drop cascades to table city reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp index 2210a347b..4997773b9 100644 --- a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp @@ -67,7 +67,6 @@ TYPCATEGORY get_value_type(Oid val_type, Oid typoutput) { - TYPCATEGORY tcategory; Oid castfunc = InvalidOid; if (val_type == InvalidOid) { @@ -87,18 +86,27 @@ TYPCATEGORY get_value_type(Oid val_type, Oid typoutput) } } if (castfunc != InvalidOid) { - tcategory = TYPCATEGORY_JSON_CAST; - } else if (val_type == RECORDARRAYOID) { - tcategory = TYPCATEGORY_ARRAY; - } else if (val_type == RECORDOID) { - tcategory = TYPCATEGORY_COMPOSITE; - } else if (val_type == JSONOID) { - tcategory = TYPCATEGORY_JSON; - } else { - tcategory = TypeCategory(val_type); + return TYPCATEGORY_JSON_CAST; + } + switch (val_type) { + case RECORDARRAYOID: + return TYPCATEGORY_ARRAY; + case RECORDOID: + return TYPCATEGORY_COMPOSITE; + case JSONOID: + return TYPCATEGORY_JSON; + case REGPROCOID: + case REGCLASSOID: + case REGOPERATOROID: + case REGOPEROID: + case REGPROCEDUREOID: + case REGTYPEOID: + case REGCONFIGOID: + case REGDICTIONARYOID: + return TYPCATEGORY_STRING; + default: + return TypeCategory(val_type); } - - return tcategory; } typedef enum { diff --git a/contrib/dolphin/sql/json_objectagg.sql b/contrib/dolphin/sql/json_objectagg.sql index e6a93fc12..afd8a9c1b 100644 --- a/contrib/dolphin/sql/json_objectagg.sql +++ b/contrib/dolphin/sql/json_objectagg.sql @@ -61,5 +61,15 @@ insert into json_table values (json_object('name', 'Jim', 'age', 20, 'name', 'Ti insert into json_table values ('{"Number": 123, "Bool": false}', null); select json_objectagg(a, b) from json_table; +-- special type, type typcategory is N(numeric), but out result is string actually +select pg_catalog.json_objectagg(1, cast(1247 as regclass)); +select pg_catalog.json_objectagg(1, cast(598 as regproc)); +select pg_catalog.json_objectagg(1, cast(15 as regoperator)); +select pg_catalog.json_objectagg(1, cast(15 as regoper)); +select pg_catalog.json_objectagg(1, cast(598 as regprocedure)); +select pg_catalog.json_objectagg(1, cast(23 as regtype)); +select pg_catalog.json_objectagg(1, cast(3748 as regconfig)); +select pg_catalog.json_objectagg(1, cast(3765 as regdictionary)); + drop schema json_objectagg_test cascade; reset current_schema; -- Gitee From 028eacb6cd039a16acee68d7faa459dc4a96fac9 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Mon, 11 Dec 2023 21:53:38 +0800 Subject: [PATCH 116/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dissue=20I8LYIB=20datetime=E6=AF=94=E8=BE=83?= =?UTF-8?q?=E7=BB=93=E6=9E=9C=E4=B8=8Emysql=E4=B8=8D=E4=B8=80=E8=87=B4?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98.=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E5=86=85=E5=AE=B9=E3=80=91:=20=E9=80=9A=E8=BF=87=E5=88=9B?= =?UTF-8?q?=E5=BB=BAdatetime=E4=B8=8Eint=E4=B9=8B=E9=97=B4=E7=9A=84=20>,>?= =?UTF-8?q?=3D,<,<=3D,=3D,<>=E7=9A=84=E7=AC=A6=E5=8F=B7=E8=A7=A3=E5=86=B3d?= =?UTF-8?q?atetime=E6=AF=94=E8=BE=83=E7=BB=93=E6=9E=9C=E4=B8=8Emysql?= =?UTF-8?q?=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20?= =?UTF-8?q?=E4=B9=8B=E5=89=8Ddatetime=E4=B8=8Eint=E6=AF=94=E8=BE=83?= =?UTF-8?q?=EF=BC=8C=E9=83=BD=E8=BD=AC=E6=88=90=E4=BA=86int=E5=8E=BB?= =?UTF-8?q?=E6=AF=94=E8=BE=83=EF=BC=8Cdatetime=E6=AF=94int=E5=A4=9A?= =?UTF-8?q?=E6=97=B6=E5=88=86=E7=A7=926=E4=B8=AA0=EF=BC=8C=E5=AF=BC?= =?UTF-8?q?=E8=87=B4=E6=AF=94=E8=BE=83=E7=BB=93=E6=9E=9C=E6=9C=89=E9=97=AE?= =?UTF-8?q?=E9=A2=98=E3=80=82=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9?= =?UTF-8?q?=E6=A1=88=E3=80=91:=20datetime=E4=B8=8Eint=E6=AF=94=E8=BE=83?= =?UTF-8?q?=EF=BC=8C=E5=B0=86int=E8=BD=AC=E6=88=90datetime=E5=86=8D?= =?UTF-8?q?=E5=8E=BB=E6=AF=94=E8=BE=83=E3=80=82=20=E3=80=90=E5=85=B3?= =?UTF-8?q?=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e?= =?UTF-8?q?.gitee.com/opengaussorg/dashboard=3Fissue=3DI8LYIB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../time_operator_test.out | 1427 +++++++++++++++++ .../dolphin/plugin_utils/adt/timestamp.cpp | 73 +- .../rollback_script/dolphin--3.0--2.0.sql | 82 +- .../time_operator_test.sql | 100 ++ .../upgrade_script/dolphin--2.0--3.0.sql | 151 ++ 5 files changed, 1827 insertions(+), 6 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out index d18e18e3a..111cfd12a 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/time_operator_test.out @@ -865,6 +865,1433 @@ select date '2022-02-01' / 2 AS result; 10110100.500000000000 (1 row) +-- test timestamp>,>=,<,<=,=,<> with int/uint +set dolphin.sql_mode='sql_mode_strict,sql_mode_full_group,pipes_as_concat,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +create table t1 (id int, dt datetime, dt1 timestamp); +insert into t1 values (1,"2001-08-14 00:00:00","2001-08-14 00:00:00"),(2,"2001-08-15 00:00:00","2001-08-15 00:00:00"),(3,"2001-08-16 00:00:00","2001-08-16 00:00:00"),(4,"2003-09-15 01:20:30","2003-09-15 01:20:30"); +select * from t1 where dt > 20021020 and dt1 > 20021020 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt > 20 and dt1 > 20 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 20000000000000 and dt1 > 20000000000000 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 200000000000000 and dt1 > 200000000000000 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt >= 20021020 and dt1 >= 20021020 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt >= 20 and dt1 >= 20 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20000000000000 and dt1 >= 20000000000000 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 200000000000000 and dt1 >= 200000000000000 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 20030915 and dt1 < 20030915 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt < 2003 and dt < 2003 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 200000000000000 and dt < 200000000000000 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <= 20030915 and dt <= 20030915 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt <= 2003 and dt <= 2003 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 200000000000000 and dt <= 200000000000000 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <> 20010814 and dt <> 20010814 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(3 rows) + +select * from t1 where dt = 20010814; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 +(1 row) + +select * from t1 where dt > 20021020::uint4 and dt1 > 20021020::uint4 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt > 20::uint4 and dt1 > 20::uint4 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 20000000000000::uint4 and dt1 > 20000000000000::uint4 order by dt; +WARNING: int unsigned out of range +WARNING: int unsigned out of range +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 200000000000000::uint4 and dt1 > 200000000000000::uint4 order by dt; +WARNING: int unsigned out of range +WARNING: int unsigned out of range +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20021020::uint4 and dt1 >= 20021020::uint4 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt >= 20::uint4 and dt1 >= 20::uint4 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20000000000000::uint4 and dt1 >= 20000000000000::uint4 order by dt; +WARNING: int unsigned out of range +WARNING: int unsigned out of range +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 200000000000000::uint4 and dt1 >= 200000000000000::uint4 order by dt; +WARNING: int unsigned out of range +WARNING: int unsigned out of range +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt < 20030915::uint4 and dt1 < 20030915::uint4 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt < 2003::uint4 and dt1 < 2003::uint4 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 200000000000000::uint4 and dt1 < 200000000000000::uint4 order by dt; +WARNING: int unsigned out of range +WARNING: int unsigned out of range +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 20030915::uint4 and dt1 <= 20030915::uint4 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt <= 2003::uint4 and dt1 <= 2003::uint4 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 200000000000000::uint4 and dt1 <= 200000000000000::uint4 order by dt; +WARNING: int unsigned out of range +WARNING: int unsigned out of range +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <> 20010814::uint4 and dt1 <> 20010814::uint4 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(3 rows) + + +select * from t1 where dt > 20021020::int8 and dt1 > 20021020::int8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt > 20::int8 and dt1 > 20::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 20000000000000::int8 and dt1 > 20000000000000::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 200000000000000::int8 and dt1 > 200000000000000::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt >= 20021020::int8 and dt1 >= 20021020::int8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt >= 20::int8 and dt1 >= 20::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20000000000000::int8 and dt1 >= 20000000000000::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 200000000000000::int8 and dt1 >= 200000000000000::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 20030915::int8 and dt1 < 20030915::int8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt < 2003::int8 and dt1 < 2003::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 200000000000000::int8 and dt1 < 200000000000000::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_lt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <= 20030915::int8 and dt1 <= 20030915::int8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt <= 2003::int8 and dt1 <= 2003::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 200000000000000::int8 and dt1 <= 200000000000000::int8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_le" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <> 20010814::int8 and dt1 <> 20010814::int8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(3 rows) + + +select * from t1 where dt > 20021020::uint8 and dt1 > 20021020::uint8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt > 20::uint8 and dt1 > 20::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 20000000000000::uint8 and dt1 > 20000000000000::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 200000000000000::uint8 and dt1 > 200000000000000::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt >= 20021020::uint8 and dt1 >= 20021020::uint8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt >= 20::uint8 and dt1 >= 20::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20000000000000::uint8 and dt1 >= 20000000000000::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 200000000000000::uint8 and dt1 >= 200000000000000::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 20030915::uint8 and dt1 < 20030915::uint8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt < 2003::uint8 and dt1 < 2003::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt < 200000000000000::uint8 and dt1 < 200000000000000::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_lt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <= 20030915::uint8 and dt1 <= 20030915::uint8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 +(3 rows) + +select * from t1 where dt <= 2003::uint8 and dt1 <= 2003::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 200000000000000::uint8 and dt1 <= 200000000000000::uint8 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_le" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <> 20010814::uint8 and dt1 <> 20010814::uint8 order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(3 rows) + + +select * from t1 where dt > 20::int1 and dt1 > 20::int1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20::int1 and dt1 >= 20::int1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt < 20::int1 and dt1 < 20::int1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 20::int1 and dt1 <= 20::int1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <> 20::int1 and dt1 <> 20::int1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 2000::int2 and dt1 > 2000::int2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 2000::int2 and dt1 >= 2000::int2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt < 2000::int2 and dt1 < 2000::int2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 2000::int2 and dt1 <= 2000::int2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <> 2000::int2 and dt1 <> 2000::int2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_double_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_double_ne" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 20::uint1 and dt1 > 20::uint1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 20::uint1 and dt1 >= 20::uint1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt < 20::uint1 and dt1 < 20::uint1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 20::uint1 and dt1 <= 20::uint1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <> 20::uint1 and dt1 <> 20::uint1 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 2000::uint2 and dt1 > 2000::uint2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_gt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_gt" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt >= 2000::uint2 and dt1 >= 2000::uint2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ge" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ge" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt < 2000::uint2 and dt1 < 2000::uint2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_lt" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <= 2000::uint2 and dt1 <= 2000::uint2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_le" statement 1 + id | dt | dt1 +----+----+----- +(0 rows) + +select * from t1 where dt <> 2000::uint2 and dt1 <> 2000::uint2 order by dt; +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "datetime_uint8_ne" statement 1 +WARNING: timestamp out of range +CONTEXT: SQL function "timestamp_uint8_ne" statement 1 + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt > 20021020::float and dt1 > 20021020::float order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt > 20021020::double and dt1 > 20021020::double order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt > 20021020::numeric and dt1 > 20021020::numeric order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(1 row) + +select * from t1 where dt <> 20021020::float and dt1 <> 20021020::float order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <> 20021020::double and dt1 <> 20021020::double order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +select * from t1 where dt <> 20021020::numeric and dt1 <> 20021020::numeric order by dt; + id | dt | dt1 +----+---------------------+------------------------ + 1 | 2001-08-14 00:00:00 | 2001-08-14 00:00:00-07 + 2 | 2001-08-15 00:00:00 | 2001-08-15 00:00:00-07 + 3 | 2001-08-16 00:00:00 | 2001-08-16 00:00:00-07 + 4 | 2003-09-15 01:20:30 | 2003-09-15 01:20:30-07 +(4 rows) + +drop table t1; ---------- tail ---------- drop schema time_operator_test_schema cascade; NOTICE: drop cascades to 2 other objects diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index d2f696e14..97e3218c5 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -332,14 +332,26 @@ PG_FUNCTION_INFO_V1_PUBLIC(timestamp_agg_finalfn); extern "C" DLL_PUBLIC Datum timestamp_agg_finalfn(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(timestamp_cast); extern "C" DLL_PUBLIC Datum timestamp_cast(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(convert_datetime_double); +extern "C" DLL_PUBLIC Datum convert_datetime_double(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(convert_timestamptz_double); +extern "C" DLL_PUBLIC Datum convert_timestamptz_double(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(convert_datetime_uint64); +extern "C" DLL_PUBLIC Datum convert_datetime_uint64(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(convert_timestamptz_uint64); +extern "C" DLL_PUBLIC Datum convert_timestamptz_uint64(PG_FUNCTION_ARGS); #endif /* b format datetime and timestamp type */ #ifdef DOLPHIN static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, - bool can_ignore, TimeErrorType* time_error_type); + bool can_ignore, TimeErrorType* time_error_type, int* ts_cnt = NULL); static int64 integer_b_format_timestamp(bool hasTz, int64 ts, - bool can_ignore, TimeErrorType* time_error_type); + bool can_ignore, TimeErrorType* time_error_type, int* ts_cnt = NULL); #else static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, bool can_ignore); static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore); @@ -882,7 +894,7 @@ int NumberTimestamp(char *str, pg_tm *tm, fsec_t *fsec) #ifdef DOLPHIN static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, bool can_ignore, - TimeErrorType* time_error_type) + TimeErrorType* time_error_type, int* ts_cnt) #else static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t fsec, bool can_ignore) #endif @@ -908,9 +920,19 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t ++cnt; tmp /= 10; } + +#ifdef DOLPHIN + if (ts_cnt != NULL) { + *ts_cnt = cnt; + } +#endif + if (cnt > TIMESTAMP_YYYYMMDDhhmmss_LEN) { ereport(level, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); +#ifdef DOLPHIN + *time_error_type = TIME_INCORRECT; +#endif return TIMESTAMP_ZERO; } /* has time field : YYYYMMDDhhmmss or YYMMDDhhmmss */ @@ -952,10 +974,11 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t } #ifdef DOLPHIN -static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore, TimeErrorType* time_error_type) +static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore, TimeErrorType* time_error_type, + int* ts_cnt) { TimestampTz result; - result = int64_b_format_timestamp_internal(hasTz, ts, 0, can_ignore, time_error_type); + result = int64_b_format_timestamp_internal(hasTz, ts, 0, can_ignore, time_error_type, ts_cnt); PG_RETURN_TIMESTAMP(result); } #else @@ -978,6 +1001,17 @@ Datum timestamp_to_datum(PG_FUNCTION_ARGS, bool hasTz, int64 ts) PG_RETURN_TIMESTAMP(result); } +Datum timestamp_to_datum_with_null_result(PG_FUNCTION_ARGS, bool hasTz, int64 ts) +{ + TimeErrorType time_error_type = TIME_CORRECT; + int ts_cnt = 0; + int64 result = integer_b_format_timestamp(hasTz, ts, fcinfo->can_ignore, &time_error_type, &ts_cnt); + if (ts_cnt > TIMESTAMP_YYYYMMDDhhmmss_LEN) { + PG_RETURN_TIMESTAMP(B_FORMAT_TIMESTAMP_MAX_VALUE); + } + PG_RETURN_TIMESTAMP(result); +} + Datum int8_b_format_datetime(PG_FUNCTION_ARGS) { int64 ts = PG_GETARG_INT64(0); @@ -1026,6 +1060,30 @@ Datum int64_b_format_timestamp(PG_FUNCTION_ARGS) return timestamp_to_datum(fcinfo, true, ts); } +Datum convert_datetime_double(PG_FUNCTION_ARGS) +{ + int64 ts = (int64)PG_GETARG_FLOAT8(0); + return timestamp_to_datum_with_null_result(fcinfo, false, ts); +} + +Datum convert_timestamptz_double(PG_FUNCTION_ARGS) +{ + int64 ts = (int64)PG_GETARG_FLOAT8(0); + return timestamp_to_datum_with_null_result(fcinfo, true, ts); +} + +Datum convert_datetime_uint64(PG_FUNCTION_ARGS) +{ + int64 ts = (uint64)PG_GETARG_UINT64(0); + return timestamp_to_datum_with_null_result(fcinfo, false, ts); +} + +Datum convert_timestamptz_uint64(PG_FUNCTION_ARGS) +{ + int64 ts = (uint64)PG_GETARG_UINT64(0); + return timestamp_to_datum_with_null_result(fcinfo, true, ts); +} + #else Datum int32_b_format_datetime(PG_FUNCTION_ARGS) { @@ -2576,6 +2634,11 @@ int timestamp2tm(Timestamp dt, int* tzp, struct pg_tm* tm, fsec_t* fsec, const c Timestamp time; pg_time_t utime; + + error_t rc = EOK; + rc = memset_s(tm, sizeof(pg_tm), 0, sizeof(pg_tm)); + securec_check(rc, "\0", "\0"); + /* * If u_sess->time_cxt.HasCTZSet is true then we have a brute force time zone specified. Go * ahead and rotate to the local time zone since we will later bypass any diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 480904c0f..afeeb1452 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -174,4 +174,84 @@ DROP FUNCTION IF EXISTS pg_catalog.second(timetz); DROP FUNCTION IF EXISTS pg_catalog.hour(timestamptz); DROP FUNCTION IF EXISTS pg_catalog.microsecond(timestamptz); DROP FUNCTION IF EXISTS pg_catalog.minute(timestamptz); -DROP FUNCTION IF EXISTS pg_catalog.second(timestamptz); \ No newline at end of file +DROP FUNCTION IF EXISTS pg_catalog.second(timestamptz); + + +DROP OPERATOR IF EXISTS pg_catalog.>(timestamp without time zone, double precision); +drop function if exists pg_catalog.datetime_double_gt(timestamp without time zone, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp without time zone, double precision); +drop function if exists pg_catalog.datetime_double_ge(timestamp without time zone, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp without time zone, double precision); +drop function if exists pg_catalog.datetime_double_lt(timestamp without time zone, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp without time zone, double precision); +drop function if exists pg_catalog.datetime_double_le(timestamp without time zone, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp without time zone, double precision); +drop function if exists pg_catalog.datetime_double_eq(timestamp without time zone, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp without time zone, double precision); +drop function if exists pg_catalog.datetime_double_ne(timestamp without time zone, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.>(timestamptz, double precision); +drop function if exists pg_catalog.timestamp_double_gt(timestamptz, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp, double); +drop function if exists pg_catalog.timestamp_double_ge(timestamptz, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp, double); +drop function if exists pg_catalog.timestamp_double_lt(timestamptz, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp, double); +drop function if exists pg_catalog.timestamp_double_le(timestamptz, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp, double); +drop function if exists pg_catalog.timestamp_double_eq(timestamptz, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp, double); +drop function if exists pg_catalog.timestamp_double_ne(timestamptz, double precision); + +DROP OPERATOR IF EXISTS pg_catalog.>(timestamp without time zone, uint8); +drop function if exists pg_catalog.datetime_uint8_gt(timestamp without time zone, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamp without time zone, uint8); +drop function if exists pg_catalog.datetime_uint8_ge(timestamp without time zone, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.<(timestamp without time zone, uint8); +drop function if exists pg_catalog.datetime_uint8_lt(timestamp without time zone, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamp without time zone, uint8); +drop function if exists pg_catalog.datetime_uint8_le(timestamp without time zone, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.=(timestamp without time zone, uint8); +drop function if exists pg_catalog.datetime_uint8_eq(timestamp without time zone, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamp without time zone, uint8); +drop function if exists pg_catalog.datetime_uint8_ne(timestamp without time zone, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.>(timestamptz, uint8); +drop function if exists pg_catalog.timestamp_uint8_gt(timestamptz, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.>=(timestamptz, uint8); +drop function if exists pg_catalog.timestamp_uint8_ge(timestamptz, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.<(timestamptz, uint8); +drop function if exists pg_catalog.timestamp_uint8_lt(timestamptz, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.<=(timestamptz, uint8); +drop function if exists pg_catalog.timestamp_uint8_le(timestamptz, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.=(timestamptz, uint8); +drop function if exists pg_catalog.timestamp_uint8_eq(timestamptz, uint8); + +DROP OPERATOR IF EXISTS pg_catalog.<>(timestamptz, uint8); +drop function if exists pg_catalog.timestamp_uint8_ne(timestamptz, uint8); + +drop function if exists pg_catalog.convert_datetime_double(double precision); +drop function if exists pg_catalog.convert_timestamptz_double(double precision); +drop function if exists pg_catalog.convert_datetime_uint8(uint8); +drop function if exists pg_catalog.convert_timestamptz_uint8(uint8); + + diff --git a/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql b/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql index bad4e97ae..cd4f5203f 100644 --- a/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql +++ b/contrib/dolphin/sql/operator_compatibility_test/time_operator_test.sql @@ -552,6 +552,106 @@ select date '2022-02-01' - 2 AS result; select date '2022-02-01' * 2 AS result; select date '2022-02-01' / 2 AS result; +-- test timestamp>,>=,<,<=,=,<> with int/uint +set dolphin.sql_mode='sql_mode_strict,sql_mode_full_group,pipes_as_concat,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +create table t1 (id int, dt datetime, dt1 timestamp); +insert into t1 values (1,"2001-08-14 00:00:00","2001-08-14 00:00:00"),(2,"2001-08-15 00:00:00","2001-08-15 00:00:00"),(3,"2001-08-16 00:00:00","2001-08-16 00:00:00"),(4,"2003-09-15 01:20:30","2003-09-15 01:20:30"); +select * from t1 where dt > 20021020 and dt1 > 20021020 order by dt; +select * from t1 where dt > 20 and dt1 > 20 order by dt; +select * from t1 where dt > 20000000000000 and dt1 > 20000000000000 order by dt; +select * from t1 where dt > 200000000000000 and dt1 > 200000000000000 order by dt; +select * from t1 where dt >= 20021020 and dt1 >= 20021020 order by dt; +select * from t1 where dt >= 20 and dt1 >= 20 order by dt; +select * from t1 where dt >= 20000000000000 and dt1 >= 20000000000000 order by dt; +select * from t1 where dt >= 200000000000000 and dt1 >= 200000000000000 order by dt; +select * from t1 where dt < 20030915 and dt1 < 20030915 order by dt; +select * from t1 where dt < 2003 and dt < 2003 order by dt; +select * from t1 where dt < 200000000000000 and dt < 200000000000000 order by dt; +select * from t1 where dt <= 20030915 and dt <= 20030915 order by dt; +select * from t1 where dt <= 2003 and dt <= 2003 order by dt; +select * from t1 where dt <= 200000000000000 and dt <= 200000000000000 order by dt; +select * from t1 where dt <> 20010814 and dt <> 20010814 order by dt; +select * from t1 where dt = 20010814; + +select * from t1 where dt > 20021020::uint4 and dt1 > 20021020::uint4 order by dt; +select * from t1 where dt > 20::uint4 and dt1 > 20::uint4 order by dt; +select * from t1 where dt > 20000000000000::uint4 and dt1 > 20000000000000::uint4 order by dt; +select * from t1 where dt > 200000000000000::uint4 and dt1 > 200000000000000::uint4 order by dt; +select * from t1 where dt >= 20021020::uint4 and dt1 >= 20021020::uint4 order by dt; +select * from t1 where dt >= 20::uint4 and dt1 >= 20::uint4 order by dt; +select * from t1 where dt >= 20000000000000::uint4 and dt1 >= 20000000000000::uint4 order by dt; +select * from t1 where dt >= 200000000000000::uint4 and dt1 >= 200000000000000::uint4 order by dt; +select * from t1 where dt < 20030915::uint4 and dt1 < 20030915::uint4 order by dt; +select * from t1 where dt < 2003::uint4 and dt1 < 2003::uint4 order by dt; +select * from t1 where dt < 200000000000000::uint4 and dt1 < 200000000000000::uint4 order by dt; +select * from t1 where dt <= 20030915::uint4 and dt1 <= 20030915::uint4 order by dt; +select * from t1 where dt <= 2003::uint4 and dt1 <= 2003::uint4 order by dt; +select * from t1 where dt <= 200000000000000::uint4 and dt1 <= 200000000000000::uint4 order by dt; +select * from t1 where dt <> 20010814::uint4 and dt1 <> 20010814::uint4 order by dt; + +select * from t1 where dt > 20021020::int8 and dt1 > 20021020::int8 order by dt; +select * from t1 where dt > 20::int8 and dt1 > 20::int8 order by dt; +select * from t1 where dt > 20000000000000::int8 and dt1 > 20000000000000::int8 order by dt; +select * from t1 where dt > 200000000000000::int8 and dt1 > 200000000000000::int8 order by dt; +select * from t1 where dt >= 20021020::int8 and dt1 >= 20021020::int8 order by dt; +select * from t1 where dt >= 20::int8 and dt1 >= 20::int8 order by dt; +select * from t1 where dt >= 20000000000000::int8 and dt1 >= 20000000000000::int8 order by dt; +select * from t1 where dt >= 200000000000000::int8 and dt1 >= 200000000000000::int8 order by dt; +select * from t1 where dt < 20030915::int8 and dt1 < 20030915::int8 order by dt; +select * from t1 where dt < 2003::int8 and dt1 < 2003::int8 order by dt; +select * from t1 where dt < 200000000000000::int8 and dt1 < 200000000000000::int8 order by dt; +select * from t1 where dt <= 20030915::int8 and dt1 <= 20030915::int8 order by dt; +select * from t1 where dt <= 2003::int8 and dt1 <= 2003::int8 order by dt; +select * from t1 where dt <= 200000000000000::int8 and dt1 <= 200000000000000::int8 order by dt; +select * from t1 where dt <> 20010814::int8 and dt1 <> 20010814::int8 order by dt; + +select * from t1 where dt > 20021020::uint8 and dt1 > 20021020::uint8 order by dt; +select * from t1 where dt > 20::uint8 and dt1 > 20::uint8 order by dt; +select * from t1 where dt > 20000000000000::uint8 and dt1 > 20000000000000::uint8 order by dt; +select * from t1 where dt > 200000000000000::uint8 and dt1 > 200000000000000::uint8 order by dt; +select * from t1 where dt >= 20021020::uint8 and dt1 >= 20021020::uint8 order by dt; +select * from t1 where dt >= 20::uint8 and dt1 >= 20::uint8 order by dt; +select * from t1 where dt >= 20000000000000::uint8 and dt1 >= 20000000000000::uint8 order by dt; +select * from t1 where dt >= 200000000000000::uint8 and dt1 >= 200000000000000::uint8 order by dt; +select * from t1 where dt < 20030915::uint8 and dt1 < 20030915::uint8 order by dt; +select * from t1 where dt < 2003::uint8 and dt1 < 2003::uint8 order by dt; +select * from t1 where dt < 200000000000000::uint8 and dt1 < 200000000000000::uint8 order by dt; +select * from t1 where dt <= 20030915::uint8 and dt1 <= 20030915::uint8 order by dt; +select * from t1 where dt <= 2003::uint8 and dt1 <= 2003::uint8 order by dt; +select * from t1 where dt <= 200000000000000::uint8 and dt1 <= 200000000000000::uint8 order by dt; +select * from t1 where dt <> 20010814::uint8 and dt1 <> 20010814::uint8 order by dt; + +select * from t1 where dt > 20::int1 and dt1 > 20::int1 order by dt; +select * from t1 where dt >= 20::int1 and dt1 >= 20::int1 order by dt; +select * from t1 where dt < 20::int1 and dt1 < 20::int1 order by dt; +select * from t1 where dt <= 20::int1 and dt1 <= 20::int1 order by dt; +select * from t1 where dt <> 20::int1 and dt1 <> 20::int1 order by dt; +select * from t1 where dt > 2000::int2 and dt1 > 2000::int2 order by dt; +select * from t1 where dt >= 2000::int2 and dt1 >= 2000::int2 order by dt; +select * from t1 where dt < 2000::int2 and dt1 < 2000::int2 order by dt; +select * from t1 where dt <= 2000::int2 and dt1 <= 2000::int2 order by dt; +select * from t1 where dt <> 2000::int2 and dt1 <> 2000::int2 order by dt; +select * from t1 where dt > 20::uint1 and dt1 > 20::uint1 order by dt; +select * from t1 where dt >= 20::uint1 and dt1 >= 20::uint1 order by dt; +select * from t1 where dt < 20::uint1 and dt1 < 20::uint1 order by dt; +select * from t1 where dt <= 20::uint1 and dt1 <= 20::uint1 order by dt; +select * from t1 where dt <> 20::uint1 and dt1 <> 20::uint1 order by dt; +select * from t1 where dt > 2000::uint2 and dt1 > 2000::uint2 order by dt; +select * from t1 where dt >= 2000::uint2 and dt1 >= 2000::uint2 order by dt; +select * from t1 where dt < 2000::uint2 and dt1 < 2000::uint2 order by dt; +select * from t1 where dt <= 2000::uint2 and dt1 <= 2000::uint2 order by dt; +select * from t1 where dt <> 2000::uint2 and dt1 <> 2000::uint2 order by dt; + + +select * from t1 where dt > 20021020::float and dt1 > 20021020::float order by dt; +select * from t1 where dt > 20021020::double and dt1 > 20021020::double order by dt; +select * from t1 where dt > 20021020::numeric and dt1 > 20021020::numeric order by dt; +select * from t1 where dt <> 20021020::float and dt1 <> 20021020::float order by dt; +select * from t1 where dt <> 20021020::double and dt1 <> 20021020::double order by dt; +select * from t1 where dt <> 20021020::numeric and dt1 <> 20021020::numeric order by dt; + +drop table t1; + ---------- tail ---------- drop schema time_operator_test_schema cascade; reset current_schema; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 57f86ad2e..ac1c2ab84 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -280,3 +280,154 @@ CREATE OR REPLACE FUNCTION pg_catalog.hour (timestamptz) RETURNS float8 LANGUAGE CREATE OR REPLACE FUNCTION pg_catalog.microsecond (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMicrosecondFromTimestampTz'; CREATE OR REPLACE FUNCTION pg_catalog.minute (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetMinuteFromTimestampTz'; CREATE OR REPLACE FUNCTION pg_catalog.second (timestamptz) RETURNS float8 LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'GetSecondFromTimestampTz'; + +CREATE OR REPLACE FUNCTION pg_catalog.convert_datetime_double(double precision) RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'convert_datetime_double'; +CREATE OR REPLACE FUNCTION pg_catalog.convert_timestamptz_double(double precision) RETURNS timestamp LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'convert_timestamptz_double'; +CREATE OR REPLACE FUNCTION pg_catalog.convert_datetime_uint8(uint8) RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'convert_datetime_uint64'; +CREATE OR REPLACE FUNCTION pg_catalog.convert_timestamptz_uint8(uint8) RETURNS timestamp LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'convert_timestamptz_uint64'; + +create or replace function pg_catalog.datetime_double_gt( + timestamp without time zone, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 > convert_datetime_double($2)) $$; +create operator pg_catalog.>(leftarg = timestamp without time zone, rightarg = double precision, procedure = pg_catalog.datetime_double_gt); + +create or replace function pg_catalog.datetime_double_ge( + timestamp without time zone, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 >= convert_datetime_double($2)) $$; +create operator pg_catalog.>=(leftarg = timestamp without time zone, rightarg = double precision, procedure = pg_catalog.datetime_double_ge); + +create or replace function pg_catalog.datetime_double_lt( + timestamp without time zone, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 < convert_datetime_double($2)) $$; +create operator pg_catalog.<(leftarg = timestamp without time zone, rightarg = double precision, procedure = pg_catalog.datetime_double_lt); + +create or replace function pg_catalog.datetime_double_le( + timestamp without time zone, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <= convert_datetime_double($2)) $$; +create operator pg_catalog.<=(leftarg = timestamp without time zone, rightarg = double precision, procedure = pg_catalog.datetime_double_le); + +create or replace function pg_catalog.datetime_double_eq( + timestamp without time zone, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 = convert_datetime_double($2)) $$; +create operator pg_catalog.=(leftarg = timestamp without time zone, rightarg = double precision, procedure = pg_catalog.datetime_double_eq); + +create or replace function pg_catalog.datetime_double_ne( + timestamp without time zone, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <> convert_datetime_double($2)) $$; +create operator pg_catalog.<>(leftarg = timestamp without time zone, rightarg = double precision, procedure = pg_catalog.datetime_double_ne); + + +create or replace function pg_catalog.timestamp_double_gt( + timestamptz, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 > convert_timestamptz_double($2)) $$; +create operator pg_catalog.>(leftarg = timestamptz, rightarg = double precision, procedure = pg_catalog.timestamp_double_gt); + +create or replace function pg_catalog.timestamp_double_ge( + timestamptz, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 >= convert_timestamptz_double($2)) $$; +create operator pg_catalog.>=(leftarg = timestamptz, rightarg = double precision, procedure = pg_catalog.timestamp_double_ge); + +create or replace function pg_catalog.timestamp_double_lt( + timestamptz, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 < convert_timestamptz_double($2)) $$; +create operator pg_catalog.<(leftarg = timestamptz, rightarg = double precision, procedure = pg_catalog.timestamp_double_lt); + +create or replace function pg_catalog.timestamp_double_le( + timestamptz, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <= convert_timestamptz_double($2)) $$; +create operator pg_catalog.<=(leftarg = timestamptz, rightarg = double precision, procedure = pg_catalog.timestamp_double_le); + +create or replace function pg_catalog.timestamp_double_eq( + timestamptz, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 = convert_timestamptz_double($2)) $$; +create operator pg_catalog.=(leftarg = timestamptz, rightarg = double precision, procedure = pg_catalog.timestamp_double_eq); + +create or replace function pg_catalog.timestamp_double_ne( + timestamptz, + double precision +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <> convert_timestamptz_double($2)) $$; +create operator pg_catalog.<>(leftarg = timestamptz, rightarg = double precision, procedure = pg_catalog.timestamp_double_ne); + +create or replace function pg_catalog.datetime_uint8_gt( + timestamp without time zone, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 > convert_datetime_uint8($2)) $$; +create operator pg_catalog.>(leftarg = timestamp without time zone, rightarg = uint8, procedure = pg_catalog.datetime_uint8_gt); + +create or replace function pg_catalog.datetime_uint8_ge( + timestamp without time zone, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 >= convert_datetime_uint8($2)) $$; +create operator pg_catalog.>=(leftarg = timestamp without time zone, rightarg = uint8, procedure = pg_catalog.datetime_uint8_ge); + +create or replace function pg_catalog.datetime_uint8_lt( + timestamp without time zone, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 < convert_datetime_uint8($2)) $$; +create operator pg_catalog.<(leftarg = timestamp without time zone, rightarg = uint8, procedure = pg_catalog.datetime_uint8_lt); + +create or replace function pg_catalog.datetime_uint8_le( + timestamp without time zone, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <= convert_datetime_uint8($2)) $$; +create operator pg_catalog.<=(leftarg = timestamp without time zone, rightarg = uint8, procedure = pg_catalog.datetime_uint8_le); + +create or replace function pg_catalog.datetime_uint8_eq( + timestamp without time zone, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 = convert_datetime_uint8($2)) $$; +create operator pg_catalog.=(leftarg = timestamp without time zone, rightarg = uint8, procedure = pg_catalog.datetime_uint8_eq); + +create or replace function pg_catalog.datetime_uint8_ne( + timestamp without time zone, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <> convert_datetime_uint8($2)) $$; +create operator pg_catalog.<>(leftarg = timestamp without time zone, rightarg = uint8, procedure = pg_catalog.datetime_uint8_ne); + + +create or replace function pg_catalog.timestamp_uint8_gt( + timestamptz, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 > convert_timestamptz_uint8($2)) $$; +create operator pg_catalog.>(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_gt); + +create or replace function pg_catalog.timestamp_uint8_ge( + timestamptz, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 >= convert_timestamptz_uint8($2)) $$; +create operator pg_catalog.>=(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_ge); + +create or replace function pg_catalog.timestamp_uint8_lt( + timestamptz, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 < convert_timestamptz_uint8($2)) $$; +create operator pg_catalog.<(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_lt); + +create or replace function pg_catalog.timestamp_uint8_le( + timestamptz, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <= convert_timestamptz_uint8($2)) $$; +create operator pg_catalog.<=(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_le); + +create or replace function pg_catalog.timestamp_uint8_eq( + timestamptz, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 = convert_timestamptz_uint8($2)) $$; +create operator pg_catalog.=(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_eq); + +create or replace function pg_catalog.timestamp_uint8_ne( + timestamptz, + uint8 +) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <> convert_timestamptz_uint8($2)) $$; +create operator pg_catalog.<>(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_ne); -- Gitee From fde138c88921d728a212d4ec615f30cfc738e137 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Tue, 12 Dec 2023 17:11:00 +0800 Subject: [PATCH 117/434] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E8=B5=84=E6=96=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 208 ++++++++++++++-------------------- 1 file changed, 83 insertions(+), 125 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index b026041bd..df89cf53a 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -1,76 +1,81 @@ -|Linux/macOS|Windows|Coverity|Code Coverage| -|:---:|:---:|:---:|:---:| -|[![Build Status](https://travis-ci.org/timescale/timescaledb.svg?branch=master)](https://travis-ci.org/timescale/timescaledb/builds)|[![Windows build status](https://ci.appveyor.com/api/projects/status/15sqkl900t04hywu/branch/master?svg=true)](https://ci.appveyor.com/project/timescale/timescaledb/branch/master)|[![Coverity Scan Build Status](https://scan.coverity.com/projects/timescale-timescaledb/badge.svg)](https://scan.coverity.com/projects/timescale-timescaledb)|[![Code Coverage](https://codecov.io/gh/timescale/timescaledb/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/timescale/timescaledb) +目录 +[Toc] -## TimescaleDB +# **1.** 概述 -TimescaleDB is an open-source database designed to make SQL scalable for -time-series data. It is engineered up from PostgreSQL and packaged as a -PostgreSQL extension, providing automatic partitioning across time and space -(partitioning key), as well as full SQL support. +## **1.1.** 目的 -[Timescale Cloud](https://tsdb.co/GitHubTimescaleCloud) is our fully managed, -hosted version of TimescaleDB, available in the cloud of your choice -(pay-as-you-go, with free trial credits to start). To determine which option -is best for you, see [Timescale -Products](https://tsdb.co/GitHubTimescaleProducts) for more information about -our Apache-2 version, TimescaleDB Community (self-hosted) and Timescale Cloud -(hosted), including: feature comparisons, FAQ, documentation, and support. +本文旨在指导如何安装ã€ä½¿ç”¨TimescaleDB。 -Below is an introduction to TimescaleDB. For more information, please check out -these other resources: -- [Developer Documentation](https://tsdb.co/GitHubTimescaleDocs) -- [Slack Channel](https://slack-login.timescale.com) -- [Timescale Release Notes & Future Plans](https://tsdb.co/GitHubTimescaleReleaseNotes) +## **1.2.** TimescaleDBä»‹ç» -For reference and clarity, all code files in this repository reference -licensing in their header (either Apache License, Version 2.0 or [Timescale -License -(TSL)](https://github.com/timescale/timescaledb/blob/master/tsl/LICENSE-TIMESCALE)). Apache-2 -licensed binaries can be built by passing `-DAPACHE_ONLY=1` to `bootstrap`. +TimescaleDB是一个开æºçš„æ—¶é—´åºåˆ—æ•°æ®åº“ï¼Œä¸“é—¨ç”¨äºŽé«˜æ€§èƒ½å’Œå¯æ‰©å±•的时间åºåˆ—æ•°æ®å­˜å‚¨å’Œåˆ†æžã€‚它结åˆäº†å…³ç³»åž‹æ•°æ®åº“çš„åŠŸèƒ½å’Œä¼˜åŠ¿ï¼Œä»¥åŠæ—¶é—´åºåˆ—æ•°æ®åº“的特性,æä¾›äº†ä¸€å¥—强大的功能æ¥å¤„ç†å¤§è§„模时间åºåˆ—æ•°æ®ã€‚基于以上æè¿°ï¼ŒTimescaleDB 在以下场景中éžå¸¸é€‚用: -[Contributors welcome.](https://github.com/timescale/timescaledb/blob/master/CONTRIBUTING.md) +1. 物è”网(IoT)应用:物è”网应用通常会产生大é‡çš„æ—¶é—´åºåˆ—æ•°æ®ï¼Œä¾‹å¦‚传感器数æ®ã€è®¾å¤‡ç›‘控数æ®ç­‰ã€‚TimescaleDB 的高性能和数æ®åˆ†åŒºåŠŸèƒ½å¯ä»¥æœ‰æ•ˆåœ°å¤„ç†è¿™äº›æ•°æ®ï¼Œå¹¶æ”¯æŒå¿«é€Ÿçš„实时查询和分æžã€‚ +2. 金èžå’Œäº¤æ˜“æ•°æ®ï¼šé‡‘èžè¡Œä¸šéœ€è¦å¯¹äº¤æ˜“æ•°æ®è¿›è¡Œé«˜æ•ˆçš„存储和分æžã€‚TimescaleDB 的连续èšåˆå’Œæ•°æ®ä¿ç•™ç­–略功能å¯ä»¥æ–¹ä¾¿åœ°è®¡ç®—和维护èšåˆæ•°æ®ï¼ŒåŒæ—¶è‡ªåŠ¨åˆ é™¤è¿‡æœŸçš„æ•°æ®ã€‚ +3. 日志和监控数æ®ï¼šåœ¨æ—¥å¿—和监控系统中,需è¦å¯¹å¤§é‡çš„事件和指标数æ®è¿›è¡Œå­˜å‚¨å’Œåˆ†æžã€‚TimescaleDB 的数æ®è¿žç»­æ€§å’Œæ•°æ®åŽ‹ç¼©åŠŸèƒ½å¯ä»¥æ»¡è¶³é«˜å¹¶å‘的写入需求,并å‡å°‘存储空间的使用。 +4. æ—¶é—´åºåˆ—分æžï¼šå¯¹äºŽéœ€è¦è¿›è¡Œæ—¶é—´åºåˆ—分æžçš„场景,TimescaleDB æä¾›äº† SQL 接å£å’Œä¸°å¯Œçš„æ—¶é—´åºåˆ—函数,å¯ä»¥æ–¹ä¾¿åœ°è¿›è¡Œå¤æ‚çš„æŸ¥è¯¢å’Œåˆ†æžæ“作。 -(To build TimescaleDB from source, see instructions in [_Building from source_](https://github.com/timescale/timescaledb/blob/master/docs/BuildSource.md).) +TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€openGaussçš„å‘å±•ï¼Œå¯¹æ—¶åºæ•°æ®çš„处ç†èƒ½åŠ›ä¹Ÿæˆä¸ºäº†å¼€å‘组é‡ç‚¹è€ƒè™‘的功能,而且openGauss基于PostgreSQL 9.2.4版本优化,所以从PostgreSQLæ•°æ®åº“å°†TimescaleDB扩展è¿ç§»è¿‡æ¥æ˜¯ä¸€é¡¹æ»¡è¶³ç»æµŽæ€§å’Œç§‘学性的决定。 -### Using TimescaleDB +## **1.3.** 注æ„事项 -TimescaleDB scales PostgreSQL for time-series data via automatic -partitioning across time and space (partitioning key), yet retains -the standard PostgreSQL interface. +### 1.3.1. 一般性é™åˆ¶ -In other words, TimescaleDB exposes what look like regular tables, but -are actually only an -abstraction (or a virtual view) of many individual tables comprising the -actual data. This single-table view, which we call a -[hypertable](https://tsdb.co/GitHubTimescaleHypertable), -is comprised of many chunks, which are created by partitioning -the hypertable's data in either one or two dimensions: by a time -interval, and by an (optional) "partition key" such as -device id, location, user id, etc. ([Architecture discussion](https://tsdb.co/GitHubTimescaleArchitecture)) +- 䏿”¯æŒéžç¼–译安装版本; +- ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› +- TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› +- TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; +- 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› -Virtually all user interactions with TimescaleDB are with -hypertables. Creating tables and indexes, altering tables, inserting -data, selecting data, etc., can (and should) all be executed on the -hypertable. +# **2.** TimescaleDB安装方法 -From the perspective of both use and management, TimescaleDB just -looks and feels like PostgreSQL, and can be managed and queried as -such. -#### Before you start +## **2.1.** æºç å®‰è£… -PostgreSQL's out-of-the-box settings are typically too conservative for modern -servers and TimescaleDB. You should make sure your `postgresql.conf` -settings are tuned, either by using [timescaledb-tune](https://github.com/timescale/timescaledb-tune) -or doing it manually. -#### Creating a hypertable +从Plugin仓下载好TimescaleDBæºç ï¼Œè§£åŽ‹å®ŒæˆåŽï¼Œæ”¾å…¥openGauss-server/contrib目录下,在脚本所在目录执行离线安装脚本 ./run_to_build.sh + +``` +cd cd contrib/timescaledb +sudo ./run_to_build.sh +``` + +进入`./build`文件夹中,执行`make && make install` + +在执行make install之åŽï¼Œéœ€è¦ä¸»æ–‡ä»¶å¤¹ä¸‹çš„`og-timescaledb1.7.4.sql`的内容替æ¢åˆ°openGauss-server安装路径下的`share/postgresql/extension/timescaledb--1.7.4.sql`文件中。 + +在对应数æ®åº“é…置文件(比如data/postgresql.conf)中的最åŽä¸€è¡Œå†™å…¥`shared_preload_libraries = '$libdir/timescaledb'` + +å¯åŠ¨æ•°æ®åº“,进入到sql命令行界é¢ï¼Œæ‰§è¡Œ`create extension timescaledb;`,若出现以下结果,则说明安装æˆåŠŸ + +```sql +openguass=# create extension timescaledb; +WELCOME TO + _____ _ _ ____________ +|_ _(_) | | | _ \ ___ \ + | | _ _ __ ___ ___ ___ ___ __ _| | ___| | | | |_/ / + | | | | _ ` _ \ / _ \/ __|/ __/ _` | |/ _ \ | | | ___ \ + | | | | | | | | | __/\__ \ (_| (_| | | __/ |/ /| |_/ / + |_| |_|_| |_| |_|\___||___/\___\__,_|_|\___|___/ \____/ + Running version 1.7.4 +For more information on TimescaleDB, please visit the following links: + + 1. Getting started: https://docs.timescale.com/getting-started + 2. API reference documentation: https://docs.timescale.com/api + 3. How TimescaleDB is designed: https://docs.timescale.com/introduction/architecture + +Note: TimescaleDB collects anonymous reports to better understand and assist our users. +For more information and how to disable, please see our docs https://docs.timescaledb.com/using-timescaledb/telemetry. +CREATE EXTENSION +``` + +## **2.2.** 创建超表 ```sql -- Do not forget to create timescaledb extension -CREATE EXTENSION timescaledb; +-- CREATE EXTENSION timescaledb; -- We start by creating a regular SQL table CREATE TABLE conditions ( @@ -84,10 +89,8 @@ CREATE TABLE conditions ( SELECT create_hypertable('conditions', 'time'); ``` -- [Quick start: Creating hypertables](https://tsdb.co/GitHubTimescaleCreateHypertables) -- [Reference examples](https://tsdb.co/GitHubTimescaleHypertableReference) -#### Inserting and querying data +## **2.3.** æ’å…¥å’ŒæŸ¥è¯¢æ•°æ® Inserting data into the hypertable is done via normal SQL commands: @@ -107,72 +110,27 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, ORDER BY fifteen_min DESC, max_temp DESC; ``` -In addition, TimescaleDB includes additional functions for time-series -analysis that are not present in vanilla PostgreSQL. (For example, the `time_bucket` function above.) - -- [Quick start: Basic operations](https://tsdb.co/GitHubTimescaleBasicOperations) -- [Reference examples](https://tsdb.co/GitHubTimescaleWriteData) -- [TimescaleDB API](https://tsdb.co/GitHubTimescaleAPI) - -### Installation - -TimescaleDB is available pre-packaged for several platforms: - -- Linux: - - [RedHat / CentOS](https://tsdb.co/GitHubTimescaleRedHatCentOS) - - [Ubuntu](https://tsdb.co/GitHubTimescaleUbuntu) - - [Debian](https://tsdb.co/GitHubTimescaleDebian) -- [Docker](https://tsdb.co/GitHubTimescaleDocker) -- [MacOS (Homebrew)](https://tsdb.co/GitHubTimescaleMacOS) -- [Windows](https://tsdb.co/GitHubTimescaleWindows) - -[Timescale Cloud](https://tsdb.co/GitHubTimescaleInstallCloud) -(database-as-a-service) is available via free trial. You create database -instances in the cloud of your choice and use TimescaleDB to power your -queries, automating common operational tasks and reducing management overhead. - -We recommend following our detailed [installation instructions](https://tsdb.co/GitHubTimescaleInstall). - -To build from source, see instructions -[here](https://github.com/timescale/timescaledb/blob/master/docs/BuildSource.md). - - -## Resources - -### Useful tools - -- [timescaledb-tune](https://github.com/timescale/timescaledb-tune): Helps -set your PostgreSQL configuration settings based on your system's resources. -- [timescaledb-parallel-copy](https://github.com/timescale/timescaledb-parallel-copy): -Parallelize your initial bulk loading by using PostgreSQL's `COPY` across -multiple workers. - -### Additional documentation - -- [Why use TimescaleDB?](https://tsdb.co/GitHubTimescaleIntro) -- [Migrating from PostgreSQL](https://tsdb.co/GitHubTimescalePostgresMigrate) -- [Writing data](https://tsdb.co/GitHubTimescaleWriteData) -- [Querying and data analytics](https://tsdb.co/GitHubTimescaleReadData) -- [Tutorials and sample data](https://tsdb.co/GitHubTimescaleTutorials) - -### Community & help - -- [Slack Channel](https://slack.timescale.com) -- [Github Issues](https://github.com/timescale/timescaledb/issues) -- [Timescale Support](https://tsdb.co/GitHubTimescaleSupport): see support options (community & subscription) - -### Releases & updates - - - [Timescale Release Notes & Future - Plans](https://tsdb.co/GitHubTimescaleReleaseNotes): see planned and - in-progress updates and detailed information about current and past - releases. - - [Subscribe to Timescale Release - Notes](https://tsdb.co/GitHubTimescaleGetReleaseNotes) to get notified about - new releases, fixes, and early access/beta programs. - -### Contributing - -- [Contributor instructions](https://github.com/timescale/timescaledb/blob/master/CONTRIBUTING.md) -- [Code style guide](https://github.com/timescale/timescaledb/blob/master/docs/StyleGuide.md) +# **3.** TimescaleDBå¯ç”¨æŽ¥å£ +| åºå· | 接å£åç§° | 说明 | +| ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| 1 | chunk_relation_size | 获å–超表å—çš„å…³ç³»å¤§å° | +| 2 | chunk_relation_size_pretty | 获å–超表å—çš„å…³ç³»å¤§å° | +| 3 | drop_chunks | 删除时间范围完全在指定时间之å‰ï¼ˆæˆ–之åŽï¼‰çš„æ•°æ®åŒºå—,跨所有超级表或针对特定超级表è¿è¡Œã€‚ | +| 4 | hypertable_relation_size | 获å–è¶…çº§è¡¨çš„å…³ç³»å¤§å° | +| 5 | hypertable_relation_size_pretty | 获å–è¶…çº§è¡¨çš„å…³ç³»å¤§å° | +| 6 | indexes_relation_size | 获å–è¶…è¡¨ä¸Šçš„ç´¢å¼•å¤§å° | +| 7 | indexes_relation_size_pretty | 获å–è¶…è¡¨ä¸Šçš„ç´¢å¼•å¤§å° | +| 8 | set_number_partitions | 设置超表上空间维度的分区(片)数 | +| 9 | show_chunks | 获å–与超表关è”的区å—列表 | +| 10 | add_dimension()空间分区 | å‘超表添加é¢å¤–的分区维度。选择作为维度的列å¯ä»¥ä½¿ç”¨é—´éš”分区或哈希分区。 | +| 11 | attach_tablespace()将表空间附加到超表 | 将表空间附加到超表并使用它æ¥å­˜å‚¨å— | +| 12 | create_hypertable()创建超表 | 创建超表 | +| 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | +| 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | +| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš” | +| 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | +| 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | +| 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | +| 19 | timescaledb_information.license获å–许å¯ä¿¡æ¯ | èŽ·å–æœ‰å…³å½“å‰è®¸å¯è¯çš„ä¿¡æ¯ | +| 20 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | \ No newline at end of file -- Gitee From 0f04c6e0e5e17fc4a2ccb0bb709d1e5153722dd5 Mon Sep 17 00:00:00 2001 From: yuchao Date: Tue, 12 Dec 2023 17:19:11 +0800 Subject: [PATCH 118/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5server=E6=B5=8B?= =?UTF-8?q?=E8=AF=95=E7=94=A8=E4=BE=8B=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/output/view_definer_test.source | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/dolphin/output/view_definer_test.source b/contrib/dolphin/output/view_definer_test.source index 62f04dd9c..61a1bfe62 100644 --- a/contrib/dolphin/output/view_definer_test.source +++ b/contrib/dolphin/output/view_definer_test.source @@ -96,7 +96,7 @@ CREATE TABLE tab_1107262 ( ) WITH (orientation=row, compression=no); ALTER TABLE public.tab_1107262 OWNER TO "Root_Test"; -COPY tab_1107262 (id, c1) FROM stdin; +COPY public.tab_1107262 (id, c1) FROM stdin; \. ; \! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp -- Gitee From f3d6d0e5e40dc88d889af3debaeead18a84299ba Mon Sep 17 00:00:00 2001 From: lukeman Date: Fri, 8 Dec 2023 11:41:47 +0800 Subject: [PATCH 119/434] =?UTF-8?q?=E5=A4=84=E7=90=86=E7=BC=BA=E9=99=B7?= =?UTF-8?q?=EF=BC=9Aand/or=20=E8=BF=90=E7=AE=97=E7=AC=A6=E5=AF=B9=E4=BA=8E?= =?UTF-8?q?=E2=80=98true=E2=80=99=E7=9A=84=E5=A4=84=E7=90=86=E7=BB=93?= =?UTF-8?q?=E6=9E=9C=E4=B8=8EMysql=E4=B8=8D=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../json_operator_test.out | 24 ++ contrib/dolphin/expected/test_mysql_char.out | 266 ++++++++++++++++++ .../dolphin/expected/test_mysql_operator.out | 9 + contrib/dolphin/plugin_parser/parse_expr.cpp | 23 ++ contrib/dolphin/plugin_utils/adt/varchar.cpp | 31 +- contrib/dolphin/sql/test_mysql_char.sql | 11 + 6 files changed, 359 insertions(+), 5 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index 956fda7f2..9dc12481f 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -3317,7 +3317,11 @@ CONTEXT: referenced column: char&json WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' CONTEXT: referenced column: char&json UPDATE test_json_type, test_json_table SET test_json_type.`char||json` = test_json_table.`char` || test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: char||json UPDATE test_json_type, test_json_table SET test_json_type.`char&&json` = test_json_table.`char` && test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: char&&json UPDATE test_json_type, test_json_table SET test_json_type.`char_div_json` = test_json_table.`char` div test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: PL/pgSQL function "div"(anyelement,json) line 3 at RETURN @@ -3342,7 +3346,11 @@ CONTEXT: SQL statement "SELECT (select a ^ b)" PL/pgSQL function "xor"(anyelement,json) line 3 at RETURN referenced column: char_xor_json UPDATE test_json_type, test_json_table SET test_json_type.`char_and_json` = test_json_table.`char` and test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: char_and_json UPDATE test_json_type, test_json_table SET test_json_type.`char_or_json` = test_json_table.`char` or test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: char_or_json UPDATE test_json_type, test_json_table SET test_json_type.`varchar+json` = test_json_table.`varchar` + test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: varchar+json @@ -3392,7 +3400,11 @@ CONTEXT: referenced column: varchar&json WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' CONTEXT: referenced column: varchar&json UPDATE test_json_type, test_json_table SET test_json_type.`varchar||json` = test_json_table.`varchar` || test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: varchar||json UPDATE test_json_type, test_json_table SET test_json_type.`varchar&&json` = test_json_table.`varchar` && test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: varchar&&json UPDATE test_json_type, test_json_table SET test_json_type.`varchar_div_json` = test_json_table.`varchar` div test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: PL/pgSQL function "div"(anyelement,json) line 3 at RETURN @@ -3417,7 +3429,11 @@ CONTEXT: SQL statement "SELECT (select a ^ b)" PL/pgSQL function "xor"(anyelement,json) line 3 at RETURN referenced column: varchar_xor_json UPDATE test_json_type, test_json_table SET test_json_type.`varchar_and_json` = test_json_table.`varchar` and test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: varchar_and_json UPDATE test_json_type, test_json_table SET test_json_type.`varchar_or_json` = test_json_table.`varchar` or test_json_table.`json`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: varchar_or_json UPDATE test_json_type, test_json_table SET test_json_type.`binary+json` = test_json_table.`binary` + test_json_table.`json`; WARNING: invalid input syntax for type double precision: "1.23a" CONTEXT: referenced column: binary+json @@ -5232,6 +5248,8 @@ CONTEXT: referenced column: json&char WARNING: invalid input syntax for type bigint: "1.23a" CONTEXT: referenced column: json&char UPDATE test_json_type, test_json_table SET test_json_type.`json||char` = test_json_table.`json` || test_json_table.`char`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: json||char UPDATE test_json_type, test_json_table SET test_json_type.`json&&char` = test_json_table.`json` && test_json_table.`char`; UPDATE test_json_type, test_json_table SET test_json_type.`json_div_char` = test_json_table.`json` div test_json_table.`char`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" @@ -5258,6 +5276,8 @@ PL/pgSQL function "xor"(json,anyelement) line 3 at RETURN referenced column: json_xor_char UPDATE test_json_type, test_json_table SET test_json_type.`json_and_char` = test_json_table.`json` and test_json_table.`char`; UPDATE test_json_type, test_json_table SET test_json_type.`json_or_char` = test_json_table.`json` or test_json_table.`char`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: json_or_char UPDATE test_json_type, test_json_table SET test_json_type.`json+varchar` = test_json_table.`json` + test_json_table.`varchar`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json+varchar @@ -5307,6 +5327,8 @@ CONTEXT: referenced column: json&varchar WARNING: invalid input syntax for type bigint: "1.23a" CONTEXT: referenced column: json&varchar UPDATE test_json_type, test_json_table SET test_json_type.`json||varchar` = test_json_table.`json` || test_json_table.`varchar`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: json||varchar UPDATE test_json_type, test_json_table SET test_json_type.`json&&varchar` = test_json_table.`json` && test_json_table.`varchar`; UPDATE test_json_type, test_json_table SET test_json_type.`json_div_varchar` = test_json_table.`json` div test_json_table.`varchar`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" @@ -5333,6 +5355,8 @@ PL/pgSQL function "xor"(json,anyelement) line 3 at RETURN referenced column: json_xor_varchar UPDATE test_json_type, test_json_table SET test_json_type.`json_and_varchar` = test_json_table.`json` and test_json_table.`varchar`; UPDATE test_json_type, test_json_table SET test_json_type.`json_or_varchar` = test_json_table.`json` or test_json_table.`varchar`; +WARNING: Truncated incorrect DOUBLE value: 1.23a +CONTEXT: referenced column: json_or_varchar UPDATE test_json_type, test_json_table SET test_json_type.`json+binary` = test_json_table.`json` + test_json_table.`binary`; WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" CONTEXT: referenced column: json+binary diff --git a/contrib/dolphin/expected/test_mysql_char.out b/contrib/dolphin/expected/test_mysql_char.out index 178e6f2ae..5892020c0 100644 --- a/contrib/dolphin/expected/test_mysql_char.out +++ b/contrib/dolphin/expected/test_mysql_char.out @@ -154,6 +154,24 @@ select a || b from testfortext; (18 rows) select a and b from testforchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -177,6 +195,24 @@ select a and b from testforchar; (18 rows) select a && b from testforchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -200,6 +236,24 @@ select a && b from testforchar; (18 rows) select a or b from testforchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -223,6 +277,24 @@ select a or b from testforchar; (18 rows) select a || b from testforchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -246,6 +318,24 @@ select a || b from testforchar; (18 rows) select a and b from testforvarchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -269,6 +359,24 @@ select a and b from testforvarchar; (18 rows) select a && b from testforvarchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -292,6 +400,24 @@ select a && b from testforvarchar; (18 rows) select a or b from testforvarchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -315,6 +441,24 @@ select a or b from testforvarchar; (18 rows) select a || b from testforvarchar; +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: 00100abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: -1abc +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: 00200 +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: abc1 +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t @@ -342,220 +486,342 @@ drop table testfortext; drop table testforchar; drop table testforvarchar; select '00100abc' && 1; +WARNING: Truncated incorrect DOUBLE value: 00100abc ?column? ---------- t (1 row) select '00100abc' && 0; +WARNING: Truncated incorrect DOUBLE value: 00100abc ?column? ---------- f (1 row) select '00100abc' && null; +WARNING: Truncated incorrect DOUBLE value: 00100abc ?column? ---------- (1 row) select '-1abc' && 1; +WARNING: Truncated incorrect DOUBLE value: -1abc ?column? ---------- t (1 row) select '-1abc' && 0; +WARNING: Truncated incorrect DOUBLE value: -1abc ?column? ---------- f (1 row) select '-1abc' && null; +WARNING: Truncated incorrect DOUBLE value: -1abc ?column? ---------- (1 row) select '00200' && 1; +WARNING: Truncated incorrect DOUBLE value: 00200 ?column? ---------- t (1 row) select '00200' && 0; +WARNING: Truncated incorrect DOUBLE value: 00200 ?column? ---------- f (1 row) select '00200' && null; +WARNING: Truncated incorrect DOUBLE value: 00200 ?column? ---------- (1 row) select 'abc' && 1; +WARNING: Truncated incorrect DOUBLE value: abc ?column? ---------- f (1 row) select 'abc' && 0; +WARNING: Truncated incorrect DOUBLE value: abc ?column? ---------- f (1 row) select 'abc' && null; +WARNING: Truncated incorrect DOUBLE value: abc ?column? ---------- f (1 row) select 'abc1' && 1; +WARNING: Truncated incorrect DOUBLE value: abc1 ?column? ---------- f (1 row) select 'abc1' && 0; +WARNING: Truncated incorrect DOUBLE value: abc1 ?column? ---------- f (1 row) select 'abc1' && null; +WARNING: Truncated incorrect DOUBLE value: abc1 ?column? ---------- f (1 row) select '0.0100abc' && 1; +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t (1 row) select '0.0100abc' && 0; +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- f (1 row) select '0.0100abc' && null; +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- (1 row) select '00100abc' || 1; +WARNING: Truncated incorrect DOUBLE value: 00100abc ?column? ---------- t (1 row) select '00100abc' || 0; +WARNING: Truncated incorrect DOUBLE value: 00100abc ?column? ---------- t (1 row) select '00100abc' || null; +WARNING: Truncated incorrect DOUBLE value: 00100abc ?column? ---------- t (1 row) select '-1abc' || 1; +WARNING: Truncated incorrect DOUBLE value: -1abc ?column? ---------- t (1 row) select '-1abc' || 0; +WARNING: Truncated incorrect DOUBLE value: -1abc ?column? ---------- t (1 row) select '-1abc' || null; +WARNING: Truncated incorrect DOUBLE value: -1abc ?column? ---------- t (1 row) select '00200' || 1; +WARNING: Truncated incorrect DOUBLE value: 00200 ?column? ---------- t (1 row) select '00200' || 0; +WARNING: Truncated incorrect DOUBLE value: 00200 ?column? ---------- t (1 row) select '00200' || null; +WARNING: Truncated incorrect DOUBLE value: 00200 ?column? ---------- t (1 row) select 'abc' || 1; +WARNING: Truncated incorrect DOUBLE value: abc ?column? ---------- t (1 row) select 'abc' || 0; +WARNING: Truncated incorrect DOUBLE value: abc ?column? ---------- f (1 row) select 'abc' || null; +WARNING: Truncated incorrect DOUBLE value: abc ?column? ---------- (1 row) select 'abc1' || 1; +WARNING: Truncated incorrect DOUBLE value: abc1 ?column? ---------- t (1 row) select 'abc1' || 0; +WARNING: Truncated incorrect DOUBLE value: abc1 ?column? ---------- f (1 row) select 'abc1' || null; +WARNING: Truncated incorrect DOUBLE value: abc1 ?column? ---------- (1 row) select '0.0100abc' || 1; +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t (1 row) select '0.0100abc' || 0; +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t (1 row) select '0.0100abc' || null; +WARNING: Truncated incorrect DOUBLE value: 0.0100abc ?column? ---------- t (1 row) +select true and 'true' AS false; +WARNING: Truncated incorrect DOUBLE value: true +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select true and 'false' AS false; +WARNING: Truncated incorrect DOUBLE value: false +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select true and '1.0 false' AS true; +WARNING: Truncated incorrect DOUBLE value: 1.0 false +CONTEXT: referenced column: true + true +------ + t +(1 row) + +select true and '0.0 true' AS false; +WARNING: Truncated incorrect DOUBLE value: 0.0 true +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select false or 'true' AS false; +WARNING: Truncated incorrect DOUBLE value: true +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select false or 'false' AS false; +WARNING: Truncated incorrect DOUBLE value: false +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select false or '1.0 false' AS true; +WARNING: Truncated incorrect DOUBLE value: 1.0 false +CONTEXT: referenced column: true + true +------ + t +(1 row) + +select false or '0.0 true' AS false; +WARNING: Truncated incorrect DOUBLE value: 0.0 true +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select true and 'true'::text AS false; + false +------- + f +(1 row) + +select true and 'true'::char AS false; +WARNING: Truncated incorrect DOUBLE value: true +CONTEXT: referenced column: false + false +------- + f +(1 row) + +select true and 'true'::varchar AS false; +WARNING: Truncated incorrect DOUBLE value: true +CONTEXT: referenced column: false + false +------- + f +(1 row) + drop schema test_char cascade; reset current_schema; diff --git a/contrib/dolphin/expected/test_mysql_operator.out b/contrib/dolphin/expected/test_mysql_operator.out index 4465ab4ff..2f955d0ae 100644 --- a/contrib/dolphin/expected/test_mysql_operator.out +++ b/contrib/dolphin/expected/test_mysql_operator.out @@ -225,6 +225,12 @@ select a||b from testfortime; (10 rows) select a&&b from testforstring; +WARNING: Truncated incorrect DOUBLE value: 10 +WARNING: Truncated incorrect DOUBLE value: 10 +WARNING: Truncated incorrect DOUBLE value: -10 +WARNING: Truncated incorrect DOUBLE value: +01 +WARNING: Truncated incorrect DOUBLE value: 123a +WARNING: Truncated incorrect DOUBLE value: 123a ?column? ---------- t @@ -233,6 +239,9 @@ select a&&b from testforstring; (3 rows) select a||b from testforstring; +WARNING: Truncated incorrect DOUBLE value: 10 +WARNING: Truncated incorrect DOUBLE value: -10 +WARNING: Truncated incorrect DOUBLE value: 123a ?column? ---------- t diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index 4f40872d8..2fc341fa9 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -58,6 +58,9 @@ #include "utils/guc_tables.h" #include "utils/varbit.h" #include "plugin_parser/parse_utilcmd.h" +#ifdef DOLPHIN +#include "plugin_commands/mysqlmode.h" +#endif extern Node* build_column_default(Relation rel, int attrno, bool isInsertCmd = false, bool needOnUpdate = false); extern Node* makeAConst(Value* v, int location); @@ -1459,6 +1462,22 @@ static Node* transformAExprOp(ParseState* pstate, A_Expr* a) return result; } +static void CheckUnknownConstNode(Node* node, bool can_ignore) +{ + if (!ENABLE_B_CMPT_MODE || node->type != T_Const || ((Const*)node)->constisnull) { + return; + } + Const* cons = (Const*)node; + double resval = 0.0; + char* newval = DatumGetCString(cons->constvalue); + char* stopstring = NULL; + resval = strtod(newval, &stopstring); + if (stopstring) { + ereport((can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, + (errmsg("Truncated incorrect DOUBLE value: %s", newval))); + } +} + static Node* transformAExprAnd(ParseState* pstate, A_Expr* a) { a->rexpr = (Node *)copyObject(a->rexpr); @@ -1466,6 +1485,7 @@ static Node* transformAExprAnd(ParseState* pstate, A_Expr* a) Node* rexpr = transformExprRecurse(pstate, a->rexpr); #ifdef DOLPHIN if (exprType(lexpr) == UNKNOWNOID) { + CheckUnknownConstNode(lexpr, pstate->p_has_ignore); lexpr = coerce_to_target_type( pstate, lexpr, UNKNOWNOID, TEXTOID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); lexpr = coerce_to_boolean(pstate, lexpr, "AND"); @@ -1475,6 +1495,7 @@ static Node* transformAExprAnd(ParseState* pstate, A_Expr* a) lexpr = coerce_to_boolean(pstate, lexpr, "AND"); #ifdef DOLPHIN if (exprType(rexpr) == UNKNOWNOID) { + CheckUnknownConstNode(rexpr, pstate->p_has_ignore); rexpr = coerce_to_target_type( pstate, rexpr, UNKNOWNOID, TEXTOID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); rexpr = coerce_to_boolean(pstate, rexpr, "AND"); @@ -1492,6 +1513,7 @@ static Node* transformAExprOr(ParseState* pstate, A_Expr* a) Node* rexpr = transformExprRecurse(pstate, a->rexpr); #ifdef DOLPHIN if (exprType(lexpr) == UNKNOWNOID) { + CheckUnknownConstNode(lexpr, pstate->p_has_ignore); lexpr = coerce_to_target_type( pstate, lexpr, UNKNOWNOID, TEXTOID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); lexpr = coerce_to_boolean(pstate, lexpr, "OR"); @@ -1501,6 +1523,7 @@ static Node* transformAExprOr(ParseState* pstate, A_Expr* a) lexpr = coerce_to_boolean(pstate, lexpr, "OR"); #ifdef DOLPHIN if (exprType(rexpr) == UNKNOWNOID) { + CheckUnknownConstNode(rexpr, pstate->p_has_ignore); rexpr = coerce_to_target_type( pstate, rexpr, UNKNOWNOID, TEXTOID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); rexpr = coerce_to_boolean(pstate, rexpr, "OR"); diff --git a/contrib/dolphin/plugin_utils/adt/varchar.cpp b/contrib/dolphin/plugin_utils/adt/varchar.cpp index cd050741e..f5d68f07a 100644 --- a/contrib/dolphin/plugin_utils/adt/varchar.cpp +++ b/contrib/dolphin/plugin_utils/adt/varchar.cpp @@ -2157,6 +2157,25 @@ ScalarVector* vbpcharlen(PG_FUNCTION_ARGS) #ifdef DOLPHIN +static double ProcessStrval(char* str, int len, PG_FUNCTION_ARGS) +{ + if (!ENABLE_B_CMPT_MODE) { + return atof(str); + } + double resval = 0.0; + char* newstr = (char*)palloc0(len + 1); + errno_t rc = memcpy_s(newstr, len + 1, str, len); + securec_check(rc, "\0", "\0"); + newstr[len] = '\0'; + char* stopstring = NULL; + resval = strtod(newstr, &stopstring); + if (stopstring) { + ereport((fcinfo->can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, + (errmsg("Truncated incorrect DOUBLE value: %s", newstr))); + } + pfree_ext(newstr); + return resval; +} PG_FUNCTION_INFO_V1_PUBLIC(text_bool); extern "C" DLL_PUBLIC Datum text_bool(PG_FUNCTION_ARGS); @@ -2169,9 +2188,11 @@ Datum text_bool(PG_FUNCTION_ARGS) a1p = VARDATA_ANY(input); tmp = atof(a1p); - bool result = false; - if (parse_bool_with_len(a1p, len, &result)) { - PG_RETURN_BOOL((tmp ? true : false) || result); + if (!ENABLE_B_CMPT_MODE) { + bool result = false; + if (parse_bool_with_len(a1p, len, &result)) { + PG_RETURN_BOOL((tmp ? true : false) || result); + } } PG_RETURN_BOOL(tmp ? true : false); } @@ -2186,7 +2207,7 @@ Datum varchar_bool(PG_FUNCTION_ARGS) double tmp; a1p = VARDATA_ANY(input); - tmp = atof(a1p); + tmp = ProcessStrval(a1p, VARSIZE_ANY_EXHDR(input), fcinfo); PG_RETURN_BOOL(tmp ? true : false); } @@ -2201,7 +2222,7 @@ Datum char_bool(PG_FUNCTION_ARGS) double tmp; a1p = VARDATA_ANY(input); - tmp = atof(a1p); + tmp = ProcessStrval(a1p, VARSIZE_ANY_EXHDR(input), fcinfo); PG_RETURN_BOOL(tmp ? true : false); } diff --git a/contrib/dolphin/sql/test_mysql_char.sql b/contrib/dolphin/sql/test_mysql_char.sql index 60abf7313..894d08659 100644 --- a/contrib/dolphin/sql/test_mysql_char.sql +++ b/contrib/dolphin/sql/test_mysql_char.sql @@ -124,6 +124,17 @@ select 'abc1' || null; select '0.0100abc' || 1; select '0.0100abc' || 0; select '0.0100abc' || null; +select true and 'true' AS false; +select true and 'false' AS false; +select true and '1.0 false' AS true; +select true and '0.0 true' AS false; +select false or 'true' AS false; +select false or 'false' AS false; +select false or '1.0 false' AS true; +select false or '0.0 true' AS false; +select true and 'true'::text AS false; +select true and 'true'::char AS false; +select true and 'true'::varchar AS false; drop schema test_char cascade; reset current_schema; -- Gitee From 951b62a7b1025afa10b0c5d25b758892469a4d00 Mon Sep 17 00:00:00 2001 From: totaj Date: Thu, 7 Dec 2023 16:27:00 +0800 Subject: [PATCH 120/434] Add deparse_query hook. --- contrib/dolphin/expected/ansi_quotes_test.out | 39 +++++++++++++++++++ contrib/dolphin/plugin_postgres.cpp | 4 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 7 ++++ contrib/dolphin/sql/ansi_quotes_test.sql | 16 ++++++++ 4 files changed, 65 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/ansi_quotes_test.out b/contrib/dolphin/expected/ansi_quotes_test.out index d8796049d..45943b57e 100644 --- a/contrib/dolphin/expected/ansi_quotes_test.out +++ b/contrib/dolphin/expected/ansi_quotes_test.out @@ -143,6 +143,45 @@ select * from test_quotes where a = "test1"; test1 (1 row) +-- test ctas +set dolphin.sql_mode TO 'treat_bxconst_as_binary'; +create table ctas_test1 select convert(0x21, unsigned); +WARNING: invalid input syntax for type double precision: "!" +CONTEXT: referenced column: uint8 +\d ctas_test1 +Table "test_ansi_quotes.ctas_test1" + Column | Type | Modifiers +--------+-------+----------- + uint8 | uint8 | + +create table ctas_test3 select convert(0x21, binary); +\d ctas_test3 +Table "test_ansi_quotes.ctas_test3" + Column | Type | Modifiers +--------+----------+----------- + binary | `binary` | + +set dolphin.sql_mode TO 'treat_bxconst_as_binary,ansi_quotes'; +create table ctas_test2 select convert(0x21, unsigned); +WARNING: invalid input syntax for type double precision: "!" +CONTEXT: referenced column: uint8 +\d ctas_test2 +Table "test_ansi_quotes.ctas_test2" + Column | Type | Modifiers +--------+-------+----------- + uint8 | uint8 | + +create table ctas_test4 select convert(0x21, binary); +\d ctas_test4 +Table "test_ansi_quotes.ctas_test4" + Column | Type | Modifiers +--------+----------+----------- + binary | "binary" | + +drop table ctas_test1; +drop table ctas_test2; +drop table ctas_test3; +drop table ctas_test4; drop schema test_ansi_quotes cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table test_quotes diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index 4c9365beb..7283ee7f7 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -142,7 +142,8 @@ extern struct HTAB* b_oidHash; extern RegExternFunc b_plpgsql_function_table[3]; extern int tmp_b_fmgr_nbuiltins; extern FmgrBuiltin tmp_b_fmgr_builtins[]; - +extern void deparse_query(Query* query, StringInfo buf, List* parentnamespace, bool finalise_aggs, bool sortgroup_colno, + void* parserArg, bool qrw_phase, bool is_fqs); extern bool isAllTempObjects(Node* parse_tree, const char* query_string, bool sent_to_remote); extern void ts_check_feature_disable(); extern void ExecAlterDatabaseSetStmt(Node* parse_tree, const char* query_string, bool sent_to_remote); @@ -293,6 +294,7 @@ void init_plugin_object() global_hook_inited = true; } + u_sess->hook_cxt.deparseQueryHook = (void*)deparse_query; u_sess->hook_cxt.transformStmtHook = (void*)transformStmt; u_sess->hook_cxt.execInitExprHook = (void*)ExecInitExpr; u_sess->hook_cxt.computeHashHook = (void*)compute_hash_default; diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index 4cc282c71..f437ed341 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -5923,6 +5923,13 @@ static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, i void deparse_query(Query* query, StringInfo buf, List* parentnamespace, bool finalise_aggs, bool sortgroup_colno, void* parserArg, bool qrw_phase, bool is_fqs) { +#ifndef DOLPHIN + if (u_sess->hook_cxt.deparseQueryHook != NULL) { + ((deparse_query_func)(u_sess->hook_cxt.deparseQueryHook))(query, buf, parentnamespace, + finalise_aggs, sortgroup_colno, parserArg, qrw_phase, is_fqs); + return; + } +#endif OverrideSearchPath* tmp_search_path = NULL; List* schema_list = NIL; ListCell* schema = NULL; diff --git a/contrib/dolphin/sql/ansi_quotes_test.sql b/contrib/dolphin/sql/ansi_quotes_test.sql index fbd53ccf0..86ec1c090 100644 --- a/contrib/dolphin/sql/ansi_quotes_test.sql +++ b/contrib/dolphin/sql/ansi_quotes_test.sql @@ -38,5 +38,21 @@ select "test" != "test"; select * from test_quotes where a like "%test%"; select * from test_quotes where a = "test1"; +-- test ctas +set dolphin.sql_mode TO 'treat_bxconst_as_binary'; +create table ctas_test1 select convert(0x21, unsigned); +\d ctas_test1 +create table ctas_test3 select convert(0x21, binary); +\d ctas_test3 +set dolphin.sql_mode TO 'treat_bxconst_as_binary,ansi_quotes'; +create table ctas_test2 select convert(0x21, unsigned); +\d ctas_test2 +create table ctas_test4 select convert(0x21, binary); +\d ctas_test4 +drop table ctas_test1; +drop table ctas_test2; +drop table ctas_test3; +drop table ctas_test4; + drop schema test_ansi_quotes cascade; reset current_schema; \ No newline at end of file -- Gitee From 7ccf6425d9fb1f5e6638df688185df80a1d0b7bf Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Wed, 13 Dec 2023 10:19:40 +0800 Subject: [PATCH 121/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dhex=E6=97=A0=E6=B3=95?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2blob=E7=B1=BB=E5=9E=8B=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/builtin_funcs/db_b_hex.out | 16 +++++++++++++++- .../rollback_script/dolphin--3.0--2.0.sql | 5 ++++- contrib/dolphin/sql/builtin_funcs/db_b_hex.sql | 7 +++++++ .../dolphin/upgrade_script/dolphin--2.0--3.0.sql | 5 +++++ 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/db_b_hex.out b/contrib/dolphin/expected/builtin_funcs/db_b_hex.out index 85ad94507..064275441 100644 --- a/contrib/dolphin/expected/builtin_funcs/db_b_hex.out +++ b/contrib/dolphin/expected/builtin_funcs/db_b_hex.out @@ -232,6 +232,20 @@ select hex(c1) from bytea_to_hex_test; deadbeef (1 row) +-- test for blob with hex format +set dolphin.b_compatibility_mode to on; +create table t1 (c1 tinyblob, c2 blob, c3 mediumblob, c4 longblob); +insert into t1 values('aa', 'aa', 'aa', 'aa'); +insert into t1 values(12312, 12312, 12312, 12312); +select hex(c1) as "tinyblob_result", hex(c2) as "blob_result", hex(c3) as "mediumblob_result", hex(c4) as "longblob_result" from t1; + tinyblob_result | blob_result | mediumblob_result | longblob_result +-----------------+-------------+-------------------+----------------- + 6161 | 6161 | 6161 | 6161 + 3132333132 | 3132333132 | 3132333132 | 3132333132 +(2 rows) + drop schema db_b_hex cascade; -NOTICE: drop cascades to table bytea_to_hex_test +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table bytea_to_hex_test +drop cascades to table t1 reset current_schema; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index afeeb1452..acc4b67ad 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -254,4 +254,7 @@ drop function if exists pg_catalog.convert_timestamptz_double(double precision); drop function if exists pg_catalog.convert_datetime_uint8(uint8); drop function if exists pg_catalog.convert_timestamptz_uint8(uint8); - +DROP FUNCTION IF EXISTS pg_catalog.hex(tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.hex(blob); +DROP FUNCTION IF EXISTS pg_catalog.hex(mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.hex(longblob); diff --git a/contrib/dolphin/sql/builtin_funcs/db_b_hex.sql b/contrib/dolphin/sql/builtin_funcs/db_b_hex.sql index 02e2c005d..f6a284aaa 100644 --- a/contrib/dolphin/sql/builtin_funcs/db_b_hex.sql +++ b/contrib/dolphin/sql/builtin_funcs/db_b_hex.sql @@ -57,5 +57,12 @@ create table bytea_to_hex_test(c1 bytea); insert into bytea_to_hex_test values (E'\\xDEADBEEF'); select hex(c1) from bytea_to_hex_test; +-- test for blob with hex format +set dolphin.b_compatibility_mode to on; +create table t1 (c1 tinyblob, c2 blob, c3 mediumblob, c4 longblob); +insert into t1 values('aa', 'aa', 'aa', 'aa'); +insert into t1 values(12312, 12312, 12312, 12312); +select hex(c1) as "tinyblob_result", hex(c2) as "blob_result", hex(c3) as "mediumblob_result", hex(c4) as "longblob_result" from t1; + drop schema db_b_hex cascade; reset current_schema; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index ac1c2ab84..116323c2e 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -431,3 +431,8 @@ create or replace function pg_catalog.timestamp_uint8_ne( uint8 ) RETURNS bool LANGUAGE SQL IMMUTABLE STRICT as $$ (SELECT $1 <> convert_timestamptz_uint8($2)) $$; create operator pg_catalog.<>(leftarg = timestamptz, rightarg = uint8, procedure = pg_catalog.timestamp_uint8_ne); + +CREATE OR REPLACE FUNCTION pg_catalog.hex(tinyblob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; +CREATE OR REPLACE FUNCTION pg_catalog.hex(blob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; +CREATE OR REPLACE FUNCTION pg_catalog.hex(mediumblob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; +CREATE OR REPLACE FUNCTION pg_catalog.hex(longblob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; -- Gitee From 55faa59678b3852811af00167ced22f3b51913a6 Mon Sep 17 00:00:00 2001 From: yuchao Date: Thu, 14 Dec 2023 11:40:25 +0800 Subject: [PATCH 122/434] =?UTF-8?q?=E5=9B=9E=E5=90=88server=E4=BB=A3?= =?UTF-8?q?=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/plugin_parser/parse_utilcmd.cpp | 32 ++++++++++++++++--- contrib/dolphin/tablecmds.cpp | 2 +- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index 2995682a6..2c425a1c6 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -400,6 +400,26 @@ static void ConvertAnonymousEnum(TypeName* type) } #endif +static bool is_create_as_col_store(CreateStmt* stmt) +{ + ListCell *cell = NULL; + char* storeTypeStr = NULL; + foreach(cell, stmt->options) { + DefElem *def = (DefElem *)lfirst(cell); + if (pg_strcasecmp(def->defname, "orientation") == 0) { + if (nodeTag(def->arg) == T_String) { + storeTypeStr = strVal(def->arg); + } else if (nodeTag(def->arg) == T_TypeName) { + storeTypeStr = TypeNameToString((TypeName *)def->arg); + } else { + Assert(false); + } + } + } + return storeTypeStr && (pg_strcasecmp(storeTypeStr, ORIENTATION_COLUMN) == 0 || + pg_strcasecmp(storeTypeStr, ORIENTATION_ORC) == 0); +} + List* transformCreateStmt(CreateStmt* stmt, const char* queryString, const List* uuids, bool preCheck, Oid *namespaceid, bool isFirstNode) { @@ -919,9 +939,11 @@ Oid *namespaceid, bool isFirstNode) checkClusterConstraints(&cxt); /* - * Check reserve column + * Check reserve column if the table is column store */ - checkReserveColumn(&cxt); + if (is_create_as_col_store(stmt)) { + checkReserveColumn(&cxt); + } /* * Output results. @@ -5543,9 +5565,11 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query checkClusterConstraints(&cxt); /* - * Check reserve column + * Check reserve column if the table is column store */ - checkReserveColumn(&cxt); + if (RelationIsColStore(rel)) { + checkReserveColumn(&cxt); + } if ((stmt->relkind == OBJECT_FOREIGN_TABLE || stmt->relkind == OBJECT_STREAM) && cxt.alist != NIL) { Oid relationId; diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 7c20f1e4b..e699ee3ff 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -6035,7 +6035,7 @@ static AttrNumber renameatt_internal(Oid myrelid, const char* oldattname, const ); /* new name should not conflict with system columns */ - if (CHCHK_PSORT_RESERVE_COLUMN(newattname)) { + if (RelationIsColStore(targetrelation) && CHCHK_PSORT_RESERVE_COLUMN(newattname)) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column name \"%s\" conflicts with a system column name", newattname))); -- Gitee From a94c4afa1ee8f34fbb3cb95584ef7449d17a0d59 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 13 Dec 2023 20:21:09 +0800 Subject: [PATCH 123/434] Sync server code. 49824ec77fc6edf0dfd941f91958e863e4e6aa1f --- .../case_sensitive_test/alter_table.out | 12 +- .../case_sensitive_test/create_table_like.out | 14 +-- .../single_node_foreign_key.out | 14 +-- .../alter_table.out | 12 +- .../create_table_like.out | 14 +-- .../single_node_foreign_key.out | 14 +-- contrib/dolphin/expected/option.out | 2 +- contrib/dolphin/include/plugin_catalog/heap.h | 6 +- .../dolphin/include/plugin_nodes/parsenodes.h | 5 - .../dolphin/include/plugin_optimizer/prep.h | 4 +- .../dolphin/include/plugin_utils/plpgsql.h | 7 -- ...est_procedure_ddl_import_and_export.source | 4 +- .../test_shows_1.source} | 2 +- ...est_procedure_ddl_import_and_export.source | 4 +- .../test_shows_1.source} | 21 +--- contrib/dolphin/plugin_catalog/heap.cpp | 106 ++---------------- .../plugin_optimizer/commands/copy.cpp | 4 +- .../dolphin/plugin_optimizer/plan/planner.cpp | 4 +- .../plugin_optimizer/prep/prepunion.cpp | 61 ++++++++++ .../dolphin/plugin_optimizer/util/relnode.cpp | 47 +++++++- contrib/dolphin/plugin_parser/gram.y | 21 +++- .../dolphin/plugin_parser/parse_clause.cpp | 5 + .../dolphin/plugin_parser/parse_utilcmd.cpp | 12 +- contrib/dolphin/plugin_pl/plpgsql/src/gram.y | 1 + .../dolphin/plugin_pl/plpgsql/src/pl_comp.cpp | 1 + .../plugin_pl/plpgsql/src/pl_handler.cpp | 1 + .../plugin_pl/plpgsql/src/pl_scanner.cpp | 1 + .../dolphin/plugin_utils/adt/pg_locale.cpp | 7 ++ contrib/dolphin/tablecmds.cpp | 78 +++++++------ 29 files changed, 253 insertions(+), 231 deletions(-) rename contrib/dolphin/{sql/test_shows_1.sql => input/test_shows_1.source} (95%) rename contrib/dolphin/{expected/test_shows_1.out => output/test_shows_1.source} (98%) diff --git a/contrib/dolphin/expected/case_sensitive_test/alter_table.out b/contrib/dolphin/expected/case_sensitive_test/alter_table.out index b2630daed..e39db9cc9 100644 --- a/contrib/dolphin/expected/case_sensitive_test/alter_table.out +++ b/contrib/dolphin/expected/case_sensitive_test/alter_table.out @@ -192,7 +192,7 @@ Check constraints: "con1" CHECK (a > 0) CREATE TABLE Constraint_Rename_Test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) InheRITS (Constraint_Rename_Test); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d "Constraint_Rename_Test2" ALTER TABLE Constraint_Rename_Test2 RENAME CONSTRAINT con1 TO con1foo; -- fail ERROR: relation "Constraint_Rename_Test2" does not exist @@ -306,9 +306,9 @@ select * from Tmp3; (1 row) CREATE TABLE Tmp6 () INHERITS (Tmp3); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update CREATE TABLE Tmp7 () INHERITS (Tmp3); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update INSERT INTO Tmp6 VALUES (6, 30), (7, 16); ERROR: relation "Tmp6" does not exist on datanode1 LINE 1: INSERT INTO Tmp6 VALUES (6, 30), (7, 16); @@ -351,9 +351,9 @@ DROP TABLE Tmp2; set constraint_exclusion TO 'partition'; create table NV_Parent (d date); create table NV_Child_2010 () Inherits (NV_Parent); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table NV_Child_2011 () Inherits (NV_Parent); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update alter table NV_Child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid; ERROR: relation "NV_Child_2010" does not exist alter table NV_Child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid; @@ -366,7 +366,7 @@ explain (costs off) select * from NV_Parent where d between '2011-08-01' and '20 (2 rows) create table NV_Child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) Inherits (NV_Parent); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update explain (costs off) select * from NV_Parent where d between '2011-08-01'::date and '2011-08-31'::date; QUERY PLAN --------------------------------------------------------------------- diff --git a/contrib/dolphin/expected/case_sensitive_test/create_table_like.out b/contrib/dolphin/expected/case_sensitive_test/create_table_like.out index 15d27f519..2650236c9 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_table_like.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_table_like.out @@ -13,13 +13,13 @@ CREATE TABLE Inhx (xx text DEFAULT 'text'); */ CREATE TABLE Ctla (aa TEXT); CREATE TABLE Ctlb (bb TEXT) InheRITS (Ctla); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update CREATE TABLE Foo (LIKE nonexistent); ERROR: Table (null).nonexistent does not exist in current datanode. LINE 1: CREATE TABLE Foo (LIKE nonexistent); ^ CREATE TABLE Inhe (ee text, LIKE Inhx) Inherits (Ctlb); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update INSERT INTO Inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); ERROR: relation "Inhe" does not exist on datanode1 LINE 1: INSERT INTO Inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-... @@ -161,17 +161,17 @@ Has OIDs: no Options: orientation=row, compression=no CREATE TABLE Ctlt1_inh (LIKE Ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) InheRITS (Ctlt1); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d+ "Ctlt1_inh" SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'Ctlt1_inh'::regclass; ERROR: relation "ctlt1_inh" does not exist LINE 1: ...nt'::regclass AND objoid = c.oid AND c.conrelid = 'Ctlt1_inh... ^ CREATE TABLE Ctlt13_inh () InheRITS (Ctlt1, Ctlt3); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d+ "Ctlt13_inh" CREATE TABLE ctlt13_like (LIKE Ctlt3 INCLUDING CONSTRAINTS INCLUDING COMMENTS INCLUDING STORAGE) InheRITS (Ctlt1); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d+ "ctlt13_like" SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass; ERROR: relation "ctlt13_like" does not exist @@ -199,9 +199,9 @@ ERROR: relation "ctlt_all" does not exist LINE 1: ...exrelid AND c.oid = i.indexrelid AND i.indrelid = 'Ctlt_all'... ^ CREATE TABLE Inh_error1 () InheRITS (Ctlt1, Ctlt4); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update CREATE TABLE Inh_error2 (LIKE Ctlt4 INCLUDING STORAGE) InheRITS (Ctlt1); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update DROP TABLE if exists Ctlt1, Ctlt2, Ctlt3, Ctlt4, Ctlt12_storage, Ctlt12_comments, Ctlt1_inh, Ctlt13_inh, ctlt13_like, Ctlt_all, Ctla, Ctlb CASCADE; NOTICE: table "Ctlt1_inh" does not exist, skipping NOTICE: table "Ctlt13_inh" does not exist, skipping diff --git a/contrib/dolphin/expected/case_sensitive_test/single_node_foreign_key.out b/contrib/dolphin/expected/case_sensitive_test/single_node_foreign_key.out index c2dbed97d..0eecf3055 100644 --- a/contrib/dolphin/expected/case_sensitive_test/single_node_foreign_key.out +++ b/contrib/dolphin/expected/case_sensitive_test/single_node_foreign_key.out @@ -867,7 +867,7 @@ DETAIL: Key columns "ptest4" and "ptest1" are of incompatible types: inet and i -- Basic 2 table case: 1 column of matching types. create table pktable_base (base1 int not null); create table PKTABLE (ptest1 int, primary key(base1), unique(base1, ptest1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table FKTABLE (ftest1 int references PKTABLE(base1)); ERROR: relation "PKTABLE" does not exist -- now some ins, upd, del @@ -978,7 +978,7 @@ drop table pktable_base; create table pktable_base(base1 int not null, base2 int); create table PKTABLE(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references PKTABLE(base1, ptest1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update insert into PKTABLE (base1, ptest1, base2, ptest2) values (1, 1, 1, 1); ERROR: relation "PKTABLE" does not exist on datanode1 LINE 1: insert into PKTABLE (base1, ptest1, base2, ptest2) values (1... @@ -1025,7 +1025,7 @@ drop table pktable_base; -- 2 columns (2 tables), mismatched types create table pktable_base(base1 int not null); create table PKTABLE(ptest1 inet, primary key(base1, ptest1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update -- just generally bad types (with and without column references on the referenced table) create table FKTABLE(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references PKTABLE); ERROR: relation "PKTABLE" does not exist @@ -1045,16 +1045,16 @@ drop table pktable_base; create table pktable_base(base1 int not null, base2 int); create table PKTABLE(ptest1 inet, ptest2 inet[], primary key(base1, ptest1), foreign key(base2, ptest2) references PKTABLE(base1, ptest1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table PKTABLE(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(base2, ptest2) references PKTABLE(ptest1, base1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table PKTABLE(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references PKTABLE(base1, ptest1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table PKTABLE(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references PKTABLE(base1, ptest1)) Inherits (pktable_base); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update drop table PKTABLE; ERROR: table "PKTABLE" does not exist drop table pktable_base; diff --git a/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out b/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out index eb49faec3..6f2b6e404 100644 --- a/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out +++ b/contrib/dolphin/expected/case_sensitive_test_backquote/alter_table.out @@ -196,7 +196,7 @@ Check constraints: "con1" CHECK (a > 0) CREATE TABLE `Constraint_Rename_Test2` (`a` int CONSTRAINT `con1` CHECK (`a` > 0), `d` int) InheRITS (`Constraint_Rename_Test`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d "Constraint_Rename_Test2" ALTER TABLE `Constraint_Rename_Test2` RENAME CONSTRAINT `con1` TO `con1foo`; -- fail ERROR: relation "Constraint_Rename_Test2" does not exist @@ -310,9 +310,9 @@ select * from `Tmp3`; (1 row) CREATE TABLE `Tmp6` () INHERITS (`Tmp3`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update CREATE TABLE `Tmp7` () INHERITS (`Tmp3`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update INSERT INTO `Tmp6` VALUES (6, 30), (7, 16); ERROR: relation "Tmp6" does not exist on datanode1 LINE 1: INSERT INTO `Tmp6` VALUES (6, 30), (7, 16); @@ -355,9 +355,9 @@ DROP TABLE `Tmp2`; set constraint_exclusion TO 'partition'; create table `NV_Parent` (`d` date); create table `NV_Child_2010` () Inherits (`NV_Parent`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table `NV_Child_2011` () Inherits (`NV_Parent`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update alter table `NV_Child_2010` add check (`d` between '2010-01-01'::date and '2010-12-31'::date) not valid; ERROR: relation "NV_Child_2010" does not exist alter table `NV_Child_2011` add check (`d` between '2011-01-01'::date and '2011-12-31'::date) not valid; @@ -370,7 +370,7 @@ explain (costs off) select * from `NV_Parent` where `d` between '2011-08-01' and (2 rows) create table `NV_Child_2009` (check (`d` between '2009-01-01'::date and '2009-12-31'::date)) Inherits (`NV_Parent`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update explain (costs off) select * from `NV_Parent` where `d` between '2011-08-01'::date and '2011-08-31'::date; QUERY PLAN --------------------------------------------------------------------- diff --git a/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out b/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out index 756ff2fdd..4d50701f1 100644 --- a/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out +++ b/contrib/dolphin/expected/case_sensitive_test_backquote/create_table_like.out @@ -13,13 +13,13 @@ CREATE TABLE `Inhx` (`xx` text DEFAULT 'text'); */ CREATE TABLE `Ctla` (`aa` TEXT); CREATE TABLE `Ctlb` (`bb` TEXT) InheRITS (`Ctla`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update CREATE TABLE `Foo` (LIKE `nonexistent`); ERROR: Table (null).nonexistent does not exist in current datanode. LINE 1: CREATE TABLE `Foo` (LIKE `nonexistent`); ^ CREATE TABLE `Inhe` (`ee` text, LIKE `Inhx`) Inherits (`Ctlb`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update INSERT INTO `Inhe` VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); ERROR: relation "Inhe" does not exist on datanode1 LINE 1: INSERT INTO `Inhe` VALUES ('ee-col1', 'ee-col2', DEFAULT, 'e... @@ -164,17 +164,17 @@ Has OIDs: no Options: orientation=row, compression=no CREATE TABLE `Ctlt1_inh` (LIKE `Ctlt1` INCLUDING CONSTRAINTS INCLUDING COMMENTS) InheRITS (`Ctlt1`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d+ "Ctlt1_inh" SELECT `description` FROM `pg_description`, `pg_constraint` c WHERE `classoid` = 'pg_constraint'::regclass AND `objoid` = `c`.`oid` AND `c`.`conrelid` = 'Ctlt1_inh'::regclass; ERROR: relation "ctlt1_inh" does not exist LINE 1: ...ass AND `objoid` = `c`.`oid` AND `c`.`conrelid` = 'Ctlt1_inh... ^ CREATE TABLE `Ctlt13_inh` () InheRITS (`Ctlt1`, `Ctlt3`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d+ "Ctlt13_inh" CREATE TABLE `ctlt13_like` (LIKE `Ctlt3` INCLUDING CONSTRAINTS INCLUDING COMMENTS INCLUDING STORAGE) InheRITS (`Ctlt1`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update \d+ "ctlt13_like" SELECT `description` FROM `pg_description`, `pg_constraint` c WHERE `classoid` = 'pg_constraint'::regclass AND `objoid` = `c`.`oid` AND `c`.`conrelid` = 'ctlt13_like'::regclass; ERROR: relation "ctlt13_like" does not exist @@ -202,9 +202,9 @@ ERROR: relation "ctlt_all" does not exist LINE 1: ...`c`.`oid` = `i`.`indexrelid` AND `i`.`indrelid` = 'Ctlt_all'... ^ CREATE TABLE `Inh_error1` () InheRITS (`Ctlt1`, `Ctlt4`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update CREATE TABLE `Inh_error2` (LIKE `Ctlt4` INCLUDING STORAGE) InheRITS (`Ctlt1`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update DROP TABLE if exists `Ctlt1`, `Ctlt2`, `Ctlt3`, `Ctlt4`, `Ctlt12_storage`, `Ctlt12_comments`, `Ctlt1_inh`, `Ctlt13_inh`, `ctlt13_like`, `Ctlt_all`, `Ctla`, `Ctlb` CASCADE; NOTICE: table "Ctlt1_inh" does not exist, skipping NOTICE: table "Ctlt13_inh" does not exist, skipping diff --git a/contrib/dolphin/expected/case_sensitive_test_backquote/single_node_foreign_key.out b/contrib/dolphin/expected/case_sensitive_test_backquote/single_node_foreign_key.out index d5561f25c..5736cf519 100644 --- a/contrib/dolphin/expected/case_sensitive_test_backquote/single_node_foreign_key.out +++ b/contrib/dolphin/expected/case_sensitive_test_backquote/single_node_foreign_key.out @@ -867,7 +867,7 @@ DETAIL: Key columns "ptest4" and "ptest1" are of incompatible types: inet and i -- Basic 2 table case: 1 column of matching types. create table `pktable_base` (`base1` int not null); create table `PKTABLE` (`ptest1` int, primary key(`base1`), unique(`base1`, `ptest1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table `FKTABLE` (`ftest1` int references `PKTABLE`(`base1`)); ERROR: relation "PKTABLE" does not exist -- now some ins, upd, del @@ -978,7 +978,7 @@ drop table `pktable_base`; create table `pktable_base`(`base1` int not null, `base2` int); create table `PKTABLE`(`ptest1` int, `ptest2` int, primary key(`base1`, `ptest1`), foreign key(`base2`, `ptest2`) references `PKTABLE`(`base1`, `ptest1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update insert into `PKTABLE` (`base1`, `ptest1`, `base2`, `ptest2`) values (1, 1, 1, 1); ERROR: relation "PKTABLE" does not exist on datanode1 LINE 1: insert into `PKTABLE` (`base1`, `ptest1`, `base2`, `ptest2`)... @@ -1025,7 +1025,7 @@ drop table `pktable_base`; -- 2 columns (2 tables), mismatched types create table `pktable_base`(`base1` int not null); create table `PKTABLE`(`ptest1` inet, primary key(`base1`, `ptest1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update -- just generally bad types (with and without column references on the referenced table) create table `FKTABLE`(`ftest1` cidr, `ftest2` int[], foreign key (`ftest1`, `ftest2`) references `PKTABLE`); ERROR: relation "PKTABLE" does not exist @@ -1045,16 +1045,16 @@ drop table `pktable_base`; create table `pktable_base`(`base1` int not null, `base2` int); create table `PKTABLE`(`ptest1` inet, `ptest2` inet[], primary key(`base1`, `ptest1`), foreign key(`base2`, `ptest2`) references `PKTABLE`(`base1`, `ptest1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table `PKTABLE`(`ptest1` inet, `ptest2` inet, primary key(`base1`, `ptest1`), foreign key(`base2`, `ptest2`) references `PKTABLE`(`ptest1`, `base1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table `PKTABLE`(`ptest1` inet, `ptest2` inet, primary key(`base1`, `ptest1`), foreign key(`ptest2`, `base2`) references `PKTABLE`(`base1`, `ptest1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table `PKTABLE`(`ptest1` inet, `ptest2` inet, primary key(`base1`, `ptest1`), foreign key(`ptest2`, `base2`) references `PKTABLE`(`base1`, `ptest1`)) Inherits (`pktable_base`); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update drop table `PKTABLE`; ERROR: table "PKTABLE" does not exist drop table `pktable_base`; diff --git a/contrib/dolphin/expected/option.out b/contrib/dolphin/expected/option.out index d55925c04..fb2db5c16 100644 --- a/contrib/dolphin/expected/option.out +++ b/contrib/dolphin/expected/option.out @@ -85,7 +85,7 @@ CREATE TABLE test10 ); create table test11 nocompress as select * from test4; create table test12() inherits (test11); -ERROR: CREATE TABLE ... INHERITS is not yet supported. +ERROR: inherits is not support in B-format database, it conflicts with multi-relation update create table if not exists test13(a int); create index idx1 on test1 using ubtree(a) with(fillfactor = 30) INCLUDE(b); ERROR: ubtree index is only supported for ustore diff --git a/contrib/dolphin/include/plugin_catalog/heap.h b/contrib/dolphin/include/plugin_catalog/heap.h index b78941970..30795c465 100644 --- a/contrib/dolphin/include/plugin_catalog/heap.h +++ b/contrib/dolphin/include/plugin_catalog/heap.h @@ -71,7 +71,7 @@ typedef struct SliceConstInfo { char* sliceName; int sliceNum; List* sliceBoundary; - Const* sliceBoundaryValue[RANGE_PARTKEYMAXNUM]; + Const* sliceBoundaryValue[MAX_RANGE_PARTKEY_NUMS]; } SliceConstInfo; extern Relation heap_create(const char *relname, @@ -179,9 +179,9 @@ extern void addNewPartitionTuple(Relation pg_part_desc, Partition new_part_desc, extern void heap_truncate_one_part(Relation rel , Oid partOid); extern Oid getPartitionIdFromTuple(Relation rel, void *tuple, EState* estate, TupleTableSlot* slot, int *partitionno, bool isDDL = false, bool canIgnore = false); -extern Oid heapTupleGetPartitionId(Relation rel, void *tuple, int *partitionno, bool isDDL = false, +extern Oid heapTupleGetPartitionOid(Relation rel, void *tuple, int *partitionno, bool isDDL = false, bool canIgnore = false, bool partExprKeyIsNull = true); -extern Oid heapTupleGetSubPartitionId(Relation rel, void *tuple); +extern Oid heapTupleGetSubPartitionOid(Relation rel, void *tuple); extern void heap_truncate(List *relids); extern void heap_truncate_one_rel(Relation rel); extern void heap_truncate_check_FKs(List *relations, bool tempTables); diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index 3bf2e80b0..9d7936c37 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -214,8 +214,6 @@ typedef enum RTEKind { * RTEs are added by the planner, they're not * present during parsing or rewriting */ #ifdef USE_SPQ - RTE_NAMEDTUPLESTORE, - RTE_TABLEFUNC, /* TableFunc(.., column list) */ RTE_VOID, /* CDB: deleted RTE */ RTE_TABLEFUNCTION /* CDB: Functions over multiset input */ #endif @@ -383,9 +381,6 @@ typedef struct RangeTblEntry { * Select * from table_name subpartition (subpartition_name); * or delete from table_name partition (partition_name, ...) */ -#ifdef USE_SPQ - bool forceDistRandom; -#endif } RangeTblEntry; /* diff --git a/contrib/dolphin/include/plugin_optimizer/prep.h b/contrib/dolphin/include/plugin_optimizer/prep.h index e2b671e3e..f3588cc09 100644 --- a/contrib/dolphin/include/plugin_optimizer/prep.h +++ b/contrib/dolphin/include/plugin_optimizer/prep.h @@ -104,7 +104,9 @@ extern void make_inh_translation_list( extern Bitmapset* translate_col_privs(const Bitmapset* parent_privs, List* translated_vars); extern Node* adjust_appendrel_attrs(PlannerInfo* root, Node* node, AppendRelInfo* appinfo); - +extern Node* adjust_appendrel_attrs_multilevel( + PlannerInfo *root, Node *node, Relids child_relids, Relids top_parent_relids); +extern AppendRelInfo **find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos); extern void mark_parent_child_pushdown_flag(Query *parent, Query *child); extern bool check_base_rel_in_fromlist(Query *parse, Node *jtnode); extern UNIONALL_SHIPPING_TYPE precheck_shipping_union_all(Query *subquery, Node *setOp); diff --git a/contrib/dolphin/include/plugin_utils/plpgsql.h b/contrib/dolphin/include/plugin_utils/plpgsql.h index c791a4b48..583bc8090 100644 --- a/contrib/dolphin/include/plugin_utils/plpgsql.h +++ b/contrib/dolphin/include/plugin_utils/plpgsql.h @@ -33,13 +33,6 @@ * Definitions **********************************************************************/ -/* define our text domain for translations */ -#undef TEXTDOMAIN -#define TEXTDOMAIN PG_TEXTDOMAIN("plpgsql") - -#undef _ -#define _(x) dgettext(TEXTDOMAIN, x) - #define TABLEOFINDEXBUCKETNUM 128 /* diff --git a/contrib/dolphin/input/test_procedure_ddl_import_and_export.source b/contrib/dolphin/input/test_procedure_ddl_import_and_export.source index 2b5b4e803..d8861420d 100644 --- a/contrib/dolphin/input/test_procedure_ddl_import_and_export.source +++ b/contrib/dolphin/input/test_procedure_ddl_import_and_export.source @@ -1,8 +1,8 @@ --create procedure测试 drop database if exists dump_procedure_db; drop database if exists restore_procedure_db; -create database dump_procedure_db with dbcompatibility = 'B'; -create database restore_procedure_db with dbcompatibility = 'B'; +create database dump_procedure_db lc_collate='C' dbcompatibility = 'B'; +create database restore_procedure_db lc_collate='C' dbcompatibility = 'B'; \c dump_procedure_db create user test_procedure_definer password 'Test@123'; diff --git a/contrib/dolphin/sql/test_shows_1.sql b/contrib/dolphin/input/test_shows_1.source similarity index 95% rename from contrib/dolphin/sql/test_shows_1.sql rename to contrib/dolphin/input/test_shows_1.source index 114c6c2eb..82dd4ab46 100644 --- a/contrib/dolphin/sql/test_shows_1.sql +++ b/contrib/dolphin/input/test_shows_1.source @@ -36,7 +36,7 @@ id int ); -- show plugins -show plugins; +\! @abs_bindir@/gsql contrib_regression -p @portstring@ -c "show plugins" >/dev/null 2>&1; echo $? -- show tables test show tables; diff --git a/contrib/dolphin/output/test_procedure_ddl_import_and_export.source b/contrib/dolphin/output/test_procedure_ddl_import_and_export.source index 1176fefc5..859ce23fd 100644 --- a/contrib/dolphin/output/test_procedure_ddl_import_and_export.source +++ b/contrib/dolphin/output/test_procedure_ddl_import_and_export.source @@ -3,8 +3,8 @@ drop database if exists dump_procedure_db; NOTICE: database "dump_procedure_db" does not exist, skipping drop database if exists restore_procedure_db; NOTICE: database "restore_procedure_db" does not exist, skipping -create database dump_procedure_db with dbcompatibility = 'B'; -create database restore_procedure_db with dbcompatibility = 'B'; +create database dump_procedure_db lc_collate='C' dbcompatibility = 'B'; +create database restore_procedure_db lc_collate='C' dbcompatibility = 'B'; \c dump_procedure_db create user test_procedure_definer password 'Test@123'; NOTICE: The iteration value of password is not recommended.Setting the iteration value too small reduces the security of the password, and setting it too large results in performance degradation. diff --git a/contrib/dolphin/expected/test_shows_1.out b/contrib/dolphin/output/test_shows_1.source similarity index 98% rename from contrib/dolphin/expected/test_shows_1.out rename to contrib/dolphin/output/test_shows_1.source index 76e509eaa..8542de327 100644 --- a/contrib/dolphin/expected/test_shows_1.out +++ b/contrib/dolphin/output/test_shows_1.source @@ -75,25 +75,8 @@ CREATE TABLE tst_t3 id int ); -- show plugins -show plugins; ---? Name | Status | Type | Library | License | Comment ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* -(14 rows) - +\! @abs_bindir@/gsql contrib_regression -p @portstring@ -c "show plugins" >/dev/null 2>&1; echo $? +0 -- show tables test show tables; Tables_in_tst_schema2 diff --git a/contrib/dolphin/plugin_catalog/heap.cpp b/contrib/dolphin/plugin_catalog/heap.cpp index 1a4f214ce..415bfbc79 100644 --- a/contrib/dolphin/plugin_catalog/heap.cpp +++ b/contrib/dolphin/plugin_catalog/heap.cpp @@ -6209,10 +6209,10 @@ Oid heapAddRangePartition(Relation pgPartRel, Oid partTableOid, Oid partTablespa if (!PointerIsValid(newPartDef->boundary)) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("boundary not defined for new partition"))); } - if (newPartDef->boundary->length > PARTITION_PARTKEYMAXNUM) { + if (newPartDef->boundary->length > MAX_PARTKEY_NUMS) { ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), - errmsg("too many partition keys, allowed is %d", PARTITION_PARTKEYMAXNUM))); + errmsg("too many partition keys, allowed is %d", MAX_PARTKEY_NUMS))); } /*new partition name check*/ @@ -6666,7 +6666,7 @@ Datum Timestamp2Boundarys(Relation rel, Timestamp ts) } else { columnRaw = TimestampGetDatum(ts); } - int2vector* partKeyColumn = partMap->partitionKey; + int2vector* partKeyColumn = partMap->base.partitionKey; Assert(partKeyColumn->dim1 == 1); (void)transformDatum2Const(rel->rd_att, partKeyColumn->values[0], columnRaw, false, &consts); @@ -6679,9 +6679,9 @@ Datum Timestamp2Boundarys(Relation rel, Timestamp ts) Datum GetPartBoundaryByTuple(Relation rel, HeapTuple tuple) { RangePartitionMap* partMap = (RangePartitionMap*)rel->partMap; - int2vector* partKeyColumn = partMap->partitionKey; + int2vector* partKeyColumn = partMap->base.partitionKey; Assert(partKeyColumn->dim1 == 1); - Assert(partMap->type.type == PART_TYPE_INTERVAL); + Assert(partMap->base.type == PART_TYPE_INTERVAL); Assert(partMap->rangeElementsNum >= 1); Const* lastPartBoundary = partMap->rangeElements[partMap->rangeElementsNum - 1].boundary[0]; @@ -7048,10 +7048,10 @@ static void addNewPartitionTupleForTable(Relation pg_partition_rel, const char* RangePartitionDefState* lastPartition = NULL; lastPartition = (RangePartitionDefState*)lfirst(partTableState->partitionList->tail); - if (lastPartition->boundary->length > PARTITION_PARTKEYMAXNUM) { + if (lastPartition->boundary->length > MAX_PARTKEY_NUMS) { ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), - errmsg("number of partition key columns MUST less or equal than %d", PARTITION_PARTKEYMAXNUM))); + errmsg("number of partition key columns MUST less or equal than %d", MAX_PARTKEY_NUMS))); } } @@ -7594,99 +7594,15 @@ Oid getPartitionIdFromTuple(Relation rel, void *tuple, EState* estate, TupleTabl Oid targetOid = InvalidOid; bool partExprKeyIsNull = PartExprKeyIsNull(rel, &partExprKeyStr); if (partExprKeyIsNull) { - targetOid = heapTupleGetPartitionId(rel, tuple, partitionno, isDDL, canIgnore); + targetOid = heapTupleGetPartitionOid(rel, tuple, partitionno, isDDL, canIgnore); } else { Datum newval = ComputePartKeyExprTuple(rel, estate, slot, NULL, partExprKeyStr); - targetOid = heapTupleGetPartitionId(rel, (void*)newval, partitionno, isDDL, canIgnore, false); + targetOid = heapTupleGetPartitionOid(rel, (void*)newval, partitionno, isDDL, canIgnore, false); } pfree_ext(partExprKeyStr); return targetOid; } -/* - * @@GaussDB@@ - * Target : data partition - * Brief : get special table partition for a tuple - * : create a partition if necessary - * Description : - * Notes : - */ -Oid heapTupleGetPartitionId(Relation rel, void *tuple, int *partitionno, bool isDDL, bool canIgnore, bool partExprKeyIsNull) -{ - Oid partitionid = InvalidOid; - - /* get routing result */ - partitionRoutingForTuple(rel, tuple, u_sess->catalog_cxt.route, canIgnore, partExprKeyIsNull); - - /* if the partition exists, return partition's oid */ - if (u_sess->catalog_cxt.route->fileExist) { - Assert(OidIsValid(u_sess->catalog_cxt.route->partitionId)); - partitionid = u_sess->catalog_cxt.route->partitionId; - if (PointerIsValid(partitionno)) { - *partitionno = GetPartitionnoFromSequence(rel->partMap, u_sess->catalog_cxt.route->partSeq); - } - return partitionid; - } - - /* - * feedback for non-existing table partition. - * If the routing result indicates a range partition, give error report - */ - int level = canIgnore ? WARNING : ERROR; - switch (u_sess->catalog_cxt.route->partArea) { - /* - * If it is a range partition, give error report - */ - case PART_AREA_RANGE: { - ereport( - level, - (errcode(ERRCODE_NO_DATA_FOUND), errmsg("inserted partition key does not map to any table partition"))); - } break; - case PART_AREA_INTERVAL: { - return AddNewIntervalPartition(rel, tuple, partitionno, isDDL); - } break; - case PART_AREA_LIST: { - ereport( - level, - (errcode(ERRCODE_NO_DATA_FOUND), errmsg("inserted partition key does not map to any table partition"))); - } break; - case PART_AREA_HASH: { - ereport( - level, - (errcode(ERRCODE_NO_DATA_FOUND), errmsg("inserted partition key does not map to any table partition"))); - } break; - /* never happen; just to be self-contained */ - default: { - ereport( - level, - (errcode(ERRCODE_NO_DATA_FOUND), errmsg("Inserted partition key does not map to any table partition"), - errdetail("Unrecognized PartitionArea %d", u_sess->catalog_cxt.route->partArea))); - } break; - } - - return partitionid; -} - -Oid heapTupleGetSubPartitionId(Relation rel, void *tuple) -{ - Oid partitionId = InvalidOid; - Oid subPartitionId = InvalidOid; - int partitionno = INVALID_PARTITION_NO; - Partition part = NULL; - Relation partRel = NULL; - /* get partititon oid for the record */ - partitionId = heapTupleGetPartitionId(rel, tuple, &partitionno); - part = PartitionOpenWithPartitionno(rel, partitionId, partitionno, RowExclusiveLock); - partRel = partitionGetRelation(rel, part); - /* get subpartititon oid for the record */ - subPartitionId = heapTupleGetPartitionId(partRel, tuple, NULL); - - releaseDummyRelation(&partRel); - partitionClose(rel, part, RowExclusiveLock); - - return subPartitionId; -} - static bool binary_upgrade_is_next_part_pg_partition_oid_valid() { if (NULL == u_sess->upg_cxt.binary_upgrade_next_part_pg_partition_oid) { @@ -8090,7 +8006,7 @@ bool* CheckPartkeyHasTimestampwithzone(Relation partTableRel, bool isForSubParti n_key_column = ARR_DIMS(partkey_columns)[0]; /*CHECK: the ArrayType of partition key is valid*/ - if (ARR_NDIM(partkey_columns) != 1 || n_key_column < 0 || n_key_column > RANGE_PARTKEYMAXNUM || + if (ARR_NDIM(partkey_columns) != 1 || n_key_column < 0 || n_key_column > MAX_RANGE_PARTKEY_NUMS || ARR_HASNULL(partkey_columns) || ARR_ELEMTYPE(partkey_columns) != INT2OID) { relation_close(pgPartRel, AccessShareLock); ereport(ERROR, @@ -8099,7 +8015,7 @@ bool* CheckPartkeyHasTimestampwithzone(Relation partTableRel, bool isForSubParti "type.", RelationGetRelationName(partTableRel)))); } - Assert(n_key_column <= RANGE_PARTKEYMAXNUM); + Assert(n_key_column <= MAX_RANGE_PARTKEY_NUMS); /* Get int2 array of partition key column numbers*/ attnums = (int16*)ARR_DATA_PTR(partkey_columns); diff --git a/contrib/dolphin/plugin_optimizer/commands/copy.cpp b/contrib/dolphin/plugin_optimizer/commands/copy.cpp index b366555ab..85fd52c38 100644 --- a/contrib/dolphin/plugin_optimizer/commands/copy.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/copy.cpp @@ -4974,7 +4974,7 @@ uint64 CopyFrom(CopyState cstate) /* step 1: query and get the caching buffer */ if (isPartitionRel) { if (RelationIsSubPartitioned(resultRelationDesc)) { - targetOid = heapTupleGetSubPartitionId(resultRelationDesc, tuple); + targetOid = heapTupleGetSubPartitionOid(resultRelationDesc, tuple); } else { targetOid = getPartitionIdFromTuple(resultRelationDesc, tuple, estate, slot, NULL); } @@ -5027,7 +5027,7 @@ uint64 CopyFrom(CopyState cstate) if (isPartitionRel && needflush) { Oid targetPartOid = InvalidOid; if (RelationIsSubPartitioned(resultRelationDesc)) { - targetPartOid = heapTupleGetSubPartitionId(resultRelationDesc, tuple); + targetPartOid = heapTupleGetSubPartitionOid(resultRelationDesc, tuple); } else { targetPartOid = getPartitionIdFromTuple(resultRelationDesc, tuple, estate, slot, NULL); } diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index e25d0c7e5..7fc4d9568 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -1921,8 +1921,10 @@ Plan* subquery_planner(PlannerGlobal* glob, Query* parse, PlannerInfo* parent_ro */ if (parse->resultRelations && parse->commandType != CMD_INSERT && rt_fetch(linitial_int(parse->resultRelations), parse->rtable)->inh) + { plan = inheritance_planner(root); - else { + plan->isinherit = true; + } else { plan = grouping_planner(root, tuple_fraction); /* * Make sure the topmost plan node's targetlist exposes the original diff --git a/contrib/dolphin/plugin_optimizer/prep/prepunion.cpp b/contrib/dolphin/plugin_optimizer/prep/prepunion.cpp index 1a9068e61..45bf266eb 100644 --- a/contrib/dolphin/plugin_optimizer/prep/prepunion.cpp +++ b/contrib/dolphin/plugin_optimizer/prep/prepunion.cpp @@ -1804,6 +1804,67 @@ Bitmapset* translate_col_privs(const Bitmapset* parent_privs, List* translated_v return child_privs; } +/* + * find_appinfos_by_relids + * Find AppendRelInfo structures for all relations specified by relids. + * + * The AppendRelInfos are returned in an array, which can be pfree'd by the + * caller. *nappinfos is set to the number of entries in the array. + */ +AppendRelInfo **find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos) +{ + AppendRelInfo **appinfos; + int cnt = 0; + int i; + + *nappinfos = bms_num_members(relids); + appinfos = (AppendRelInfo **) palloc(sizeof(AppendRelInfo *) * *nappinfos); + + i = -1; + while ((i = bms_next_member(relids, i)) >= 0) { + AppendRelInfo *appinfo = root->append_rel_array[i]; + if (!appinfo) + elog(ERROR, "child rel %d not found in append_rel_array", i); + appinfos[cnt++] = appinfo; + } + return appinfos; +} + +/* + * adjust_appendrel_attrs_multilevel + * Apply Var translations from a toplevel appendrel parent down to a child. + * + * In some cases we need to translate expressions referencing a parent relation + * to reference an appendrel child that's multiple levels removed from it. + */ +Node *adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, Relids child_relids, Relids top_parent_relids) +{ + AppendRelInfo **appinfos; + AppendRelInfo *appinfo; + Bitmapset *parent_relids = NULL; + int nappinfos; + int cnt; + + Assert(bms_num_members(child_relids) == bms_num_members(top_parent_relids)); + appinfos = find_appinfos_by_relids(root, child_relids, &nappinfos); + + /* Construct relids set for the immediate parent of given child. */ + for (cnt = 0; cnt < nappinfos; cnt++) { + appinfo = appinfos[cnt]; + parent_relids = bms_add_member(parent_relids, appinfo->parent_relid); + } + + /* Recurse if immediate parent is not the top parent. */ + if (!bms_equal(parent_relids, top_parent_relids)) + node = adjust_appendrel_attrs_multilevel(root, node, parent_relids, + top_parent_relids); + + /* Now translate for this child */ + node = adjust_appendrel_attrs(root, node, appinfo); + pfree(appinfos); + return node; +} + /* * adjust_appendrel_attrs * Copy the specified query or expression and translate Vars referring diff --git a/contrib/dolphin/plugin_optimizer/util/relnode.cpp b/contrib/dolphin/plugin_optimizer/util/relnode.cpp index bc9103c84..a5d6f7548 100644 --- a/contrib/dolphin/plugin_optimizer/util/relnode.cpp +++ b/contrib/dolphin/plugin_optimizer/util/relnode.cpp @@ -135,16 +135,41 @@ void setup_simple_rel_arrays(PlannerInfo* root) rti = 1; foreach (lc, root->parse->rtable) { RangeTblEntry* rte = (RangeTblEntry*)lfirst(lc); - root->simple_rte_array[rti++] = rte; } + + /* append_rel_array is not needed if there are no AppendRelInfos */ + if (root->append_rel_list == NIL) { + root->append_rel_array = NULL; + return; + } + + root->append_rel_array = (AppendRelInfo **) + palloc0(root->simple_rel_array_size * sizeof(AppendRelInfo *)); + + /* + * append_rel_array is filled with any already-existing AppendRelInfos, + * which currently could only come from UNION ALL flattening. We might + * add more later during inheritance expansion, but it's the + * responsibility of the expansion code to update the array properly. + */ + foreach(lc, root->append_rel_list) { + AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc); + int child_relid = appinfo->child_relid; + + /* Sanity check */ + Assert(child_relid < root->simple_rel_array_size); + if (root->append_rel_array[child_relid]) + elog(ERROR, "child relation already exists"); + root->append_rel_array[child_relid] = appinfo; + } } /* * build_simple_rel * Construct a new RelOptInfo for a base relation or 'other' relation. */ -RelOptInfo* build_simple_rel(PlannerInfo* root, int relid, RelOptKind reloptkind) +RelOptInfo* build_simple_rel(PlannerInfo* root, int relid, RelOptKind reloptkind, Bitmapset *parent) { RelOptInfo* rel = NULL; RangeTblEntry* rte = NULL; @@ -279,6 +304,22 @@ RelOptInfo* build_simple_rel(PlannerInfo* root, int relid, RelOptKind reloptkind rel->locator_type = LOCATOR_TYPE_NONE; #endif + /* + * Pass assorted information down the inheritance hierarchy. + */ + + /* + * Each direct or indirect child wants to know the relids of its + * topmost parent. + */ + + if (parent) { + rel->top_parent_relids = bms_copy(parent); + } else { + rel->top_parent_relids = bms_copy(rel->relids); + parent = bms_copy(rel->top_parent_relids); + } + /* Check type of rtable entry */ switch (rte->rtekind) { case RTE_RELATION: @@ -350,7 +391,7 @@ RelOptInfo* build_simple_rel(PlannerInfo* root, int relid, RelOptKind reloptkind if (appinfo->parent_relid != (unsigned int)relid) continue; - (void)build_simple_rel(root, appinfo->child_relid, RELOPT_OTHER_MEMBER_REL); + (void)build_simple_rel(root, appinfo->child_relid, RELOPT_OTHER_MEMBER_REL, parent); } } diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 609b86100..5d6ce486f 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -11293,11 +11293,12 @@ key_action: OptInherit_without_empty: INHERITS '(' dolphin_qualified_name_list ')' { - const char* message = "CREATE TABLE ... INHERITS is not yet supported."; - InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); - ereport(errstate, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("CREATE TABLE ... INHERITS is not yet supported."))); + if (u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) { + const char* message = "inherits is not support in B-format database, it conflicts with multi-relation update"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("inherits is not support in B-format database, it conflicts with multi-relation update"))); + } $$ = $3; } ; @@ -36773,7 +36774,15 @@ dolphin_func_name_opt_arg: DOLPHINIDENT: IDENT { - $$ = $1; + if (u_sess->attr.attr_sql.enable_ignore_case_in_dquotes + && (pg_yyget_extra(yyscanner))->core_yy_extra.ident_quoted) + { + $$ = CreateDolphinIdent(pg_strtolower(pstrdup($1->str)), false); + } + else + { + $$ = $1; + } } | DB_B_JSON { diff --git a/contrib/dolphin/plugin_parser/parse_clause.cpp b/contrib/dolphin/plugin_parser/parse_clause.cpp index 32b309506..4c7347148 100644 --- a/contrib/dolphin/plugin_parser/parse_clause.cpp +++ b/contrib/dolphin/plugin_parser/parse_clause.cpp @@ -225,6 +225,11 @@ int setTargetTable(ParseState* pstate, RangeVar* relRv, bool inh, bool alsoSourc (errcode(ERRCODE_DUPLICATE_ALIAS), errmsg("table name \"%s\" specified more than once", relRv->alias->aliasname))); } else if (relRv->alias == NULL && strcmp(rte1->eref->aliasname, relRv->relname) == 0) { + if (rte1->rtekind != RTE_RELATION) { + ereport(ERROR, + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), + errmsg("The target table \"%s\" of the DELETE is not updatable", relRv->relname))); + } if (list_member_ptr(pstate->p_target_rangetblentry, rte1)) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_ALIAS), errmsg("table name \"%s\" specified more than once", diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index 2995682a6..1be261bd4 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -2691,7 +2691,7 @@ static void transformTableLikePartitionKeys( RelationGetRelationName(relation)))); } - AssertEreport(n_key_column <= RANGE_PARTKEYMAXNUM, MOD_OPT, ""); + AssertEreport(n_key_column <= MAX_RANGE_PARTKEY_NUMS, MOD_OPT, ""); /* Get int2 array of partition key column numbers */ attnums = (int16*)ARR_DATA_PTR(partkey_columns); @@ -6201,11 +6201,11 @@ void checkPartitionSynax(CreateStmt* stmt) } /* check partition key number for none value-partition table */ - if (!value_partition && stmt->partTableState->partitionKey->length > PARTITION_PARTKEYMAXNUM) { + if (!value_partition && stmt->partTableState->partitionKey->length > MAX_PARTKEY_NUMS) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("too many partition keys for partitioned table"), - errhint("Partittion key columns can not be more than %d", PARTITION_PARTKEYMAXNUM))); + errhint("Partittion key columns can not be more than %d", MAX_PARTKEY_NUMS))); } /* check PARTITIONS clause */ @@ -7252,8 +7252,8 @@ static void get_rel_partition_info(Relation partTableRel, List** pos, Const** up return; /* nothing to do */ partMap = (RangePartitionMap*)partTableRel->partMap; - partitionKey = partMap->partitionKey; - partKeyNum = partMap->partitionKey->dim1; + partitionKey = partMap->base.partitionKey; + partKeyNum = partMap->base.partitionKey->dim1; /* get position of the partition key */ if (pos != NULL) { @@ -7352,7 +7352,7 @@ static Oid get_split_partition_oid(Relation partTableRel, SplitPartitionState* s } else { Assert(PointerIsValid(splitState->partition_for_values)); splitState->partition_for_values = transformConstIntoTargetType( - partTableRel->rd_att->attrs, partMap->partitionKey, splitState->partition_for_values); + partTableRel->rd_att->attrs, partMap->base.partitionKey, splitState->partition_for_values); srcPartOid = PartitionValuesGetPartitionOid( partTableRel, splitState->partition_for_values, AccessExclusiveLock, true, false, false); } diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y index 3cd88f8c2..52d0c4a89 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y +++ b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y @@ -13,6 +13,7 @@ * ------------------------------------------------------------------------- */ +#include "utils/plpgsql_domain.h" #include "utils/plpgsql.h" #include "access/xact.h" diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp index b4c9b4108..958f55d81 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp @@ -14,6 +14,7 @@ */ #include "plugin_utils/plpgsql.h" +#include "utils/plpgsql_domain.h" #include "utils/pl_package.h" #include diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp index 14ad26601..bc680fbd5 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp @@ -15,6 +15,7 @@ */ #include "plugin_utils/plpgsql.h" +#include "utils/plpgsql_domain.h" #include "utils/fmgroids.h" #include "utils/pl_package.h" #include "auditfuncs.h" diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_scanner.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_scanner.cpp index 7641d65b5..df2d685d1 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_scanner.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_scanner.cpp @@ -13,6 +13,7 @@ * * ------------------------------------------------------------------------- */ +#include "utils/plpgsql_domain.h" #include "utils/builtins.h" #include "utils/plpgsql.h" #include "utils/pl_package.h" diff --git a/contrib/dolphin/plugin_utils/adt/pg_locale.cpp b/contrib/dolphin/plugin_utils/adt/pg_locale.cpp index 0b287b20a..e07aeda3e 100644 --- a/contrib/dolphin/plugin_utils/adt/pg_locale.cpp +++ b/contrib/dolphin/plugin_utils/adt/pg_locale.cpp @@ -157,6 +157,10 @@ char* pg_perm_setlocale(int category, const char* locale) envvar = "LC_TIME"; envbuf = t_thrd.lc_cxt.lc_time_envbuf; break; + case LC_MESSAGES: + envvar = "LC_MESSAGES"; + envbuf = t_thrd.lc_cxt.lc_messages_envbuf; + break; default: ereport(FATAL, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized LC category: %d", category))); envvar = NULL; /* keep compiler quiet */ @@ -313,6 +317,9 @@ void assign_locale_messages(const char* newval, void* extra) * LC_MESSAGES category does not exist everywhere, but accept it anyway. * We ignore failure, as per comment above. */ +#if defined(ENABLE_NLS) && defined(LC_MESSAGES) + (void) pg_perm_setlocale(LC_MESSAGES, newval); +#endif } /* diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 7c20f1e4b..530678526 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -1581,11 +1581,11 @@ static void validateDfsTableDef(CreateStmt* stmt, bool isDfsTbl) ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR), errmsg("Num of partition keys in value-partitioned table should not be zeror"))); - } else if (list_length(stmt->partTableState->partitionKey) > VALUE_PARTKEYMAXNUM) { + } else if (list_length(stmt->partTableState->partitionKey) > MAX_VALUE_PARTKEY_NUMS) { ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR), errmsg("Num of partition keys in value-partitioned table exceeds max allowed num:%d", - RANGE_PARTKEYMAXNUM))); + MAX_RANGE_PARTKEY_NUMS))); } /* Partition stragegy check */ @@ -7125,6 +7125,7 @@ ObjectAddress renamePartition(RenameStmt* stmt) ParseState* pstate = NULL; RangePartitionDefState* rangePartDef = NULL; Relation rel = NULL; + int2vector *partKeyArray = NULL; ObjectAddress address; /* shouldn't happen */ @@ -7207,9 +7208,9 @@ ObjectAddress renamePartition(RenameStmt* stmt) rangePartDef->boundary = stmt->object; transformPartitionValue(pstate, (Node*)rangePartDef, false); - + partKeyArray = PartitionMapGetPartKeyArray(rel->partMap); rangePartDef->boundary = transformConstIntoTargetType( - rel->rd_att->attrs, ((RangePartitionMap*)rel->partMap)->partitionKey, rangePartDef->boundary); + rel->rd_att->attrs, partKeyArray, rangePartDef->boundary); partitionOid = PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, true, false); @@ -16791,7 +16792,7 @@ static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, * data type of a partitioned table's partition key can not be changed */ if (RELATION_IS_PARTITIONED(rel) && is_partition_column(rel, attnum)) { - int2vector* partKey = ((RangePartitionMap*)rel->partMap)->partitionKey; + int2vector* partKey = PartitionMapGetPartKeyArray(rel->partMap); int i = 0; for (; i < partKey->dim1; i++) { @@ -19169,7 +19170,7 @@ static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation re rangePartDef = (RangePartitionDefState*)partition; transformPartitionValue(make_parsestate(NULL), (Node*)rangePartDef, false); rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, + ((RangePartitionMap*)rel->partMap)->base.partitionKey, rangePartDef->boundary); partOid = PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, false, false); @@ -24187,14 +24188,14 @@ static Oid FindPartOidByListBoundary(Relation rel, ListPartitionMap *partMap, No Oid res; if (IsA(boundKey, RowExpr)) { /* Multi-keys partition boundary values */ partKeyValueList = transformConstIntoTargetType( - rel->rd_att->attrs, partMap->partitionKey, ((RowExpr*)boundKey)->args); + rel->rd_att->attrs, partMap->base.partitionKey, ((RowExpr*)boundKey)->args); res = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); list_free_ext(partKeyValueList); return res; } Const* con = (Const*)boundKey; - FormData_pg_attribute attr = rel->rd_att->attrs[partMap->partitionKey->values[0] - 1]; + FormData_pg_attribute attr = rel->rd_att->attrs[partMap->base.partitionKey->values[0] - 1]; if (con->ismaxvalue) { /* @@ -24245,8 +24246,8 @@ static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partD errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.", partDef->partitionInitName ? partDef->partitionInitName : partDef->partitionName))); } - partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, - partMap->partitionKey, partDef->boundary, partkeyIsFunc); + partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, partMap->base.partitionKey, + partDef->boundary, partkeyIsFunc); pfree_ext(curBound); existingPartOid = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); list_free_ext(partKeyValueList); @@ -25005,17 +25006,17 @@ static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *comma case PART_TYPE_RANGE: case PART_TYPE_INTERVAL: rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, + rel->partMap->partitionKey, rangePartDef->boundary); break; case PART_TYPE_LIST: rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((ListPartitionMap*)rel->partMap)->partitionKey, + rel->partMap->partitionKey, rangePartDef->boundary); break; case PART_TYPE_HASH: rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((HashPartitionMap*)rel->partMap)->partitionKey, + rel->partMap->partitionKey, rangePartDef->boundary); break; default: @@ -25060,13 +25061,13 @@ static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid, int2vector *partitionKey = NULL; switch (rel->partMap->type) { case PART_TYPE_RANGE: - partitionKey = ((RangePartitionMap*)rel->partMap)->partitionKey; + partitionKey = rel->partMap->partitionKey; break; case PART_TYPE_LIST: - partitionKey = ((ListPartitionMap*)rel->partMap)->partitionKey; + partitionKey = rel->partMap->partitionKey; break; case PART_TYPE_HASH: - partitionKey = ((HashPartitionMap*)rel->partMap)->partitionKey; + partitionKey = rel->partMap->partitionKey; break; default: ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Unknown partitioned type"), @@ -25248,6 +25249,8 @@ static void ATExecUnusableIndexPartition(Relation rel, const char* partition_nam AccessExclusiveLock); // lock on heap partition // call the internal function ATExecSetIndexUsableState(PartitionRelationId, indexPartOid, false); + /* Invoke cache invalidation to refresh index relation data */ + CacheInvalidateRelcache(rel); } static void ATUnusableGlobalIndex(Relation rel) @@ -25360,6 +25363,8 @@ static void ATExecUnusableAllIndexOnPartition(Relation rel, const char* partitio // close index and it's partition partitionClose(parentIndex, indexPart, NoLock); index_close(parentIndex, NoLock); + /* Invoke cache invalidation to refresh index relation data */ + CacheInvalidateRelcacheByRelid(parentIndId); } freePartList(partIndexlist); @@ -25548,15 +25553,12 @@ List* GetPartitionBoundary(Relation partTableRel, Node *PartDef) switch (nodeTag(PartDef)) { case T_RangePartitionDefState: boundary = ((RangePartitionDefState *)PartDef)->boundary; - partitionKey = ((RangePartitionMap *)partTableRel->partMap)->partitionKey; break; case T_ListPartitionDefState: boundary = ((ListPartitionDefState *)PartDef)->boundary; - partitionKey = ((ListPartitionMap *)partTableRel->partMap)->partitionKey; break; case T_HashPartitionDefState: boundary = ((HashPartitionDefState *)PartDef)->boundary; - partitionKey = ((HashPartitionMap *)partTableRel->partMap)->partitionKey; break; default: ereport(ERROR, @@ -25569,6 +25571,7 @@ List* GetPartitionBoundary(Relation partTableRel, Node *PartDef) erraction("Check the table type."))); break; } + partitionKey = PartitionMapGetPartKeyArray(partTableRel->partMap); boundary = transformConstIntoTargetType(partTableRel->rd_att->attrs, partitionKey, boundary); return boundary; } @@ -28008,7 +28011,7 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT int2 bucketId = InvalidBktId; // get right partition oid for the tuple - targetPartOid = heapTupleGetPartitionId(partTableRel, (HeapTuple)tuple, NULL, true); + targetPartOid = heapTupleGetPartitionOid(partTableRel, (HeapTuple)tuple, NULL, true); searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, INVALID_PARTITION_NO, partRel, part, RowExclusiveLock); @@ -28173,7 +28176,7 @@ template static void checkValidationForExchangeCStore(Relation partTableRel, Relation ordTableRel, int partSeq) { RangePartitionMap* partMap = (RangePartitionMap*)(partTableRel->partMap); - int2vector* partkeyColumns = partMap->partitionKey; + int2vector* partkeyColumns = partMap->base.partitionKey; int partkeyColumnNum = partkeyColumns->dim1; AttrNumber* scanAttrNumbers = NULL; @@ -28187,8 +28190,8 @@ static void checkValidationForExchangeCStore(Relation partTableRel, Relation ord bool* nulls = NULL; FormData_pg_attribute* attrs = ordTableRel->rd_att->attrs; - Const consts[RANGE_PARTKEYMAXNUM]; - Const* partKeyValues[RANGE_PARTKEYMAXNUM]; + Const consts[MAX_RANGE_PARTKEY_NUMS]; + Const* partKeyValues[MAX_RANGE_PARTKEY_NUMS]; bool isInPart = false; const int tididx = 1; // junk column for cstore delete @@ -28659,7 +28662,7 @@ static void ATExecReorganizePartition(Relation partTableRel, AlterTableCmd* cmd) destPartDefList = reorgPart->dest_partition_define_list; srcPartitionNameList = reorgPart->split_point; partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; + partKeyNum = partMap->base.partitionKey->dim1; partTableOid = RelationGetRelid(partTableRel); // check final partition num int targetPartNum = 0; @@ -28927,7 +28930,7 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) splitPart = (SplitPartitionState*)cmd->def; destPartDefList = splitPart->dest_partition_define_list; partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; + partKeyNum = partMap->base.partitionKey->dim1; partTableOid = RelationGetRelid(partTableRel); // get src partition oid @@ -28943,7 +28946,7 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) NoLock); } else { splitPart->partition_for_values = transformConstIntoTargetType( - partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->partition_for_values); + partTableRel->rd_att->attrs, partMap->base.partitionKey, splitPart->partition_for_values); srcPartOid = PartitionValuesGetPartitionOid( partTableRel, splitPart->partition_for_values, AccessExclusiveLock, true, true, false); } @@ -29606,14 +29609,14 @@ static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation par RangePartitionMap* partMap = NULL; ParseState* pstate = NULL; ListCell* cell = NULL; - Const* partKeyValueArr[RANGE_PARTKEYMAXNUM] = {NULL}; + Const* partKeyValueArr[MAX_RANGE_PARTKEY_NUMS] = {NULL}; int i = 0; int partKeyNum = 0; int compareSrcPart = 0; // get partition key number partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; + partKeyNum = partMap->base.partitionKey->dim1; // check split point length if (partKeyNum != list_length(splitPart->split_point)) { @@ -29635,7 +29638,7 @@ static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation par List *tmp = splitPart->split_point; splitPart->split_point = - transformConstIntoTargetType(partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->split_point); + transformConstIntoTargetType(partTableRel->rd_att->attrs, partMap->base.partitionKey, splitPart->split_point); list_free_ext(tmp); foreach (cell, splitPart->split_point) { @@ -29698,11 +29701,11 @@ static List* getDestPartBoundaryList(Relation partTableRel, List* destPartDefLis RangePartitionDefState* rangePartDef = (RangePartitionDefState*)lfirst(cell); List* partKeyValueList = NIL; ListCell* otherCell = NULL; - Const** partKeyValueArr = (Const**)palloc0(sizeof(Const*) * RANGE_PARTKEYMAXNUM); + Const** partKeyValueArr = (Const**)palloc0(sizeof(Const*) * MAX_RANGE_PARTKEY_NUMS); int i = 0; partKeyValueList = transformConstIntoTargetType(partTableRel->rd_att->attrs, - ((RangePartitionMap*)partTableRel->partMap)->partitionKey, + ((RangePartitionMap*)partTableRel->partMap)->base.partitionKey, rangePartDef->boundary, partkeyIsFunc); foreach (otherCell, partKeyValueList) { @@ -30457,11 +30460,11 @@ static void readTuplesAndInsertInternal(Relation tempTableRel, Relation partTabl #ifdef DOLPHIN if (RelationIsPartitioned(partTableRel)) { #endif - targetPartOid = heapTupleGetPartitionId(partTableRel, (void *)tuple, &partitionno, true); + targetPartOid = heapTupleGetPartitionOid(partTableRel, (void *)tuple, &partitionno, true); searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, partitionno, partRel, part, RowExclusiveLock); if (RelationIsSubPartitioned(partTableRel)) { - targetSubPartOid = heapTupleGetPartitionId(partRel, (void *)tuple, &subpartitionno, true); + targetSubPartOid = heapTupleGetPartitionOid(partRel, (void *)tuple, &subpartitionno, true); searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partRel, targetSubPartOid, subpartitionno, subPartRel, subPart, RowExclusiveLock); partRel = subPartRel; @@ -30624,7 +30627,8 @@ void addToastTableForNewPartition(Relation relation, Oid newPartId, bool isForSu firstPartitionId = ((RangePartitionMap*)relation->partMap)->rangeElements[0].partitionOid; firstPartition = partitionOpen(relation, firstPartitionId, NoLock); firstPartitionToastId = firstPartition->pd_part->reltoastrelid; - + + if (OidIsValid(firstPartitionToastId)) { reltuple = SearchSysCache1(RELOID, ObjectIdGetDatum(firstPartitionToastId)); if (!PointerIsValid(reltuple)) { @@ -32174,7 +32178,7 @@ bool is_partition_column(Relation rel, AttrNumber att_no) } } } else if (RelationIsCommonPartitioned(rel)) { - int2vector* part_key = ((RangePartitionMap*)rel->partMap)->partitionKey; + int2vector* part_key = PartitionMapGetPartKeyArray(rel->partMap); for (int i = 0; i < part_key->dim1; i++) { if (att_no == part_key->values[i]) { is_part_col = true; @@ -32182,7 +32186,7 @@ bool is_partition_column(Relation rel, AttrNumber att_no) } } } else if (RelationIsSubPartitioned(rel)) { - int2vector *partKey = ((RangePartitionMap *)rel->partMap)->partitionKey; + int2vector* partKey = PartitionMapGetPartKeyArray(rel->partMap); for (int i = 0; i < partKey->dim1; i++) { if (att_no == partKey->values[i]) { return true; @@ -32192,7 +32196,7 @@ bool is_partition_column(Relation rel, AttrNumber att_no) Oid partOid = linitial_oid(partOidList); Partition part = partitionOpen(rel, partOid, NoLock); Relation partRel = partitionGetRelation(rel, part); - int2vector *subPartKey = ((RangePartitionMap *)partRel->partMap)->partitionKey; + int2vector* subPartKey = PartitionMapGetPartKeyArray(partRel->partMap); for (int i = 0; i < subPartKey->dim1; i++) { if (att_no == subPartKey->values[i]) { is_part_col = true; -- Gitee From e6b468fa5c07b484af17e53be599d2cde68f7368 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Thu, 14 Dec 2023 19:08:16 +0800 Subject: [PATCH 124/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8LWND=E6=89=80=E7=A4=BA=E7=9A=84int=E8=BD=AC?= =?UTF-8?q?time=E7=9A=84=E6=97=B6=E5=80=99=EF=BC=8Cinsert=E7=9A=84?= =?UTF-8?q?=E8=A1=A8=E7=8E=B0=E5=92=8Cmysql=E4=B8=8D=E4=B8=80=E8=87=B4?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE=E5=A4=8DI8LWND?= =?UTF-8?q?=E6=89=80=E7=A4=BA=E7=9A=84int=E8=BD=ACtime=E7=9A=84=E6=97=B6?= =?UTF-8?q?=E5=80=99=EF=BC=8Cinsert=E7=9A=84=E8=A1=A8=E7=8E=B0=E5=92=8Cmys?= =?UTF-8?q?ql=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20?= =?UTF-8?q?=E4=B8=BB=E8=A6=81=E6=98=AF=E5=9C=A8int32=5Fb=5Fformat=5Ftime?= =?UTF-8?q?=E4=B8=AD=EF=BC=8C=E5=9C=A8=E9=9D=9E=E4=B8=A5=E6=A0=BC=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E5=86=99=E5=9C=BA=E6=99=AF=E4=B8=8B=EF=BC=8C=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E5=BC=82=E5=B8=B8=E7=9A=84=E6=97=B6=E5=80=99=EF=BC=8C?= =?UTF-8?q?=E6=B2=A1=E8=BF=94=E5=9B=9E0=E3=80=82=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20int32=5Fb=5Fformat=5Fti?= =?UTF-8?q?me=E5=9C=A8=E9=9D=9E=E4=B8=A5=E6=A0=BC=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=E5=86=99=E5=B9=B6=E4=B8=94=E5=85=A5=E5=8F=82=E6=95=B0=E6=8D=AE?= =?UTF-8?q?=E5=BC=82=E5=B8=B8=E7=9A=84=E6=97=B6=E5=80=99=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E8=BF=94=E5=9B=9E0=EF=BC=8C=E4=BD=BF=E5=BE=97=E5=92=8Cmysql?= =?UTF-8?q?=E4=BF=9D=E6=8C=81=E6=88=90=E4=B8=80=E8=87=B4=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue?= =?UTF-8?q?=E3=80=91:=20https://e.gitee.com/opengaussorg/dashboard=3Fissue?= =?UTF-8?q?=3DI8LWND?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 49 +++++++++++++++++++ .../dolphin/include/plugin_utils/datetime.h | 2 +- contrib/dolphin/include/plugin_utils/fmgr.h | 22 ++++++--- contrib/dolphin/plugin_utils/adt/date.cpp | 29 ++++++----- contrib/dolphin/plugin_utils/adt/datetime.cpp | 15 +++++- contrib/dolphin/plugin_utils/fmgr/fmgr.cpp | 27 +++++++--- .../b_compatibility_time_funcs3.sql | 13 +++++ 7 files changed, 128 insertions(+), 29 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index a2e66a8b7..142c7fb06 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -1565,6 +1565,55 @@ select to_char(t, 'hh24miss') from test_time; (3 rows) drop table test_time; +set dolphin.b_compatibility_mode = true; +set dolphin.sql_mode=''; +create table t1(c1 int1, c2 int2, c3 int4, c4 int8, c5 uint1, c6 uint2, c7 uint8); +create table t2(c1 time, c2 time, c3 time, c4 time, c5 time, c6 time, c7 time); +insert into t1 values('-69', '-32769', '-32769', '-32769', '69', '32769', '32769'); +WARNING: value "-32769" is out of range for type smallint +LINE 1: insert into t1 values('-69', '-32769', '-32769', '-32769', '... + ^ +CONTEXT: referenced column: c2 +insert into t2 select c1, c2, c3, c4, c5, c6, c7 from t1; +WARNING: time out of range +CONTEXT: referenced column: c1 +WARNING: time out of range +CONTEXT: referenced column: c2 +WARNING: time out of range +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type time: "-32769" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type time: "69" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type time: "32769" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type time: "32769" +CONTEXT: referenced column: c7 +reset dolphin.sql_mode; +insert ignore into t2 select c1, c2, c3, c4, c5, c6, c7 from t1; +WARNING: time out of range +CONTEXT: referenced column: c1 +WARNING: time out of range +CONTEXT: referenced column: c2 +WARNING: time out of range +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type time: "-32769" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type time: "69" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type time: "32769" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type time: "32769" +CONTEXT: referenced column: c7 +select * from t2; + c1 | c2 | c3 | c4 | c5 | c6 | c7 +----------+----------+----------+----------+----------+----------+---------- + 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 + 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 | 00:00:00 +(2 rows) + +drop table if exists t1; +drop table if exists t2; drop schema b_time_funcs3 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table func_test3 diff --git a/contrib/dolphin/include/plugin_utils/datetime.h b/contrib/dolphin/include/plugin_utils/datetime.h index 3b1f06d3e..174e4836b 100644 --- a/contrib/dolphin/include/plugin_utils/datetime.h +++ b/contrib/dolphin/include/plugin_utils/datetime.h @@ -132,7 +132,7 @@ bool CheckDatetimeRange(const pg_tm *tm, const fsec_t fsec, const int tm_type); extern bool datetime_add_nanoseconds_with_round(pg_tm *tm, fsec_t &fsec, int nano); extern bool cstring_to_tm(const char *expr, pg_tm *tm, fsec_t &fsec, int* tzp = NULL, int* invalid_tz = NULL); -extern bool IsResetUnavailableDataTime(int dterr, bool is_support_reset_unavailable_datatime = false); +extern bool IsResetUnavailableDataTime(int dterr, pg_tm tm, bool is_support_reset_unavailable_datatime = false); #define tmfsec2float(tm, fsec) ((tm)->tm_hour * 10000 + (tm)->tm_min * 100 + (tm)->tm_sec + (fsec) / 1000000.0) diff --git a/contrib/dolphin/include/plugin_utils/fmgr.h b/contrib/dolphin/include/plugin_utils/fmgr.h index 578131cef..bc464b799 100644 --- a/contrib/dolphin/include/plugin_utils/fmgr.h +++ b/contrib/dolphin/include/plugin_utils/fmgr.h @@ -457,19 +457,25 @@ typedef const Pg_magic_struct* (*PGModuleMagicFunction)(void); * are allowed to be NULL. */ extern Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1, bool can_ignore = false); -extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2); -extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3); -extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4); +extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, + bool can_ignore = false); +extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, + bool can_ignore = false); +extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, + bool can_ignore = false); extern Datum DirectFunctionCall5Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5); + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, + bool can_ignore = false); extern Datum DirectFunctionCall6Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6); + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, + bool can_ignore = false); extern Datum DirectFunctionCall7Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7); + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, + bool can_ignore = false); extern Datum DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, - Datum arg5, Datum arg6, Datum arg7, Datum arg8); + Datum arg5, Datum arg6, Datum arg7, Datum arg8, bool can_ignore = false); extern Datum DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, - Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9); + Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9, bool can_ignore = false); /* These are for invocation of a previously-looked-up function with a * directly-computed parameter list. Note that neither arguments nor result diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index f3dbe2673..27b7cc404 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -1847,7 +1847,6 @@ Datum time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorTy } int trunc_val = getStartingDigits(field_str); if (trunc_val < 0 || trunc_val >= 60) { - *time_error_type = TIME_INCORRECT; PG_RETURN_TIMEADT(0); } *time_error_type = TIME_INCORRECT; @@ -1855,9 +1854,7 @@ Datum time_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorTy } else if (SQL_MODE_NOT_STRICT_ON_INSERT()) { /* for case insert unavailable data, need to set the unavailable data to 0 to compatible with M */ DateTimeParseError(dterr, str, "time", true); - if (IsResetUnavailableDataTime(dterr, !CMD_TAG_IS_SELECT() && - time_cast_type != TIME_CAST_IMPLICIT)) { - *time_error_type = TIME_IGNORED_INCORRECT; + if (IsResetUnavailableDataTime(dterr, tt, time_cast_type != TIME_CAST_IMPLICIT)) { PG_RETURN_TIMEADT(0); } else { tm = &tt; // switch to M*'s parsing result @@ -2076,24 +2073,28 @@ Datum float4_cast_time(PG_FUNCTION_ARGS) Datum uint8_b_format_time(PG_FUNCTION_ARGS) { - return DirectFunctionCall1(uint64_b_format_time, UInt64GetDatum((uint64)PG_GETARG_UINT8(0))); + return DirectFunctionCall1Coll(uint64_b_format_time, InvalidOid, UInt64GetDatum((uint64)PG_GETARG_UINT8(0)), + fcinfo->can_ignore); } Datum uint16_b_format_time(PG_FUNCTION_ARGS) { - return DirectFunctionCall1(uint64_b_format_time, UInt64GetDatum((uint64)PG_GETARG_UINT16(0))); + return DirectFunctionCall1Coll(uint64_b_format_time, InvalidOid, UInt64GetDatum((uint64)PG_GETARG_UINT16(0)), + fcinfo->can_ignore); } Datum uint32_b_format_time(PG_FUNCTION_ARGS) { - return DirectFunctionCall1(uint64_b_format_time, UInt64GetDatum((uint64)PG_GETARG_UINT32(0))); + return DirectFunctionCall1Coll(uint64_b_format_time, InvalidOid, UInt64GetDatum((uint64)PG_GETARG_UINT32(0)), + fcinfo->can_ignore); } Datum uint64_b_format_time(PG_FUNCTION_ARGS) { uint64 number = PG_GETARG_UINT64(0); char *str = DatumGetCString(DirectFunctionCall1(uint8out, UInt64GetDatum(number))); - return DirectFunctionCall3(time_in, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + return DirectFunctionCall3Coll(time_in, InvalidOid, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1), fcinfo->can_ignore); } Datum uint8_cast_time(PG_FUNCTION_ARGS) @@ -2157,12 +2158,14 @@ Datum uint_cast_time_internal(PG_FUNCTION_ARGS, uint64 number, bool* isnull) Datum int8_b_format_time(PG_FUNCTION_ARGS) { - return DirectFunctionCall1(int32_b_format_time, Int32GetDatum((int32)PG_GETARG_INT8(0))); + return DirectFunctionCall1Coll(int32_b_format_time, InvalidOid, Int32GetDatum((int32)PG_GETARG_INT8(0)), + fcinfo->can_ignore); } Datum int16_b_format_time(PG_FUNCTION_ARGS) { - return DirectFunctionCall1(int32_b_format_time, Int32GetDatum((int32)PG_GETARG_INT16(0))); + return DirectFunctionCall1Coll(int32_b_format_time, InvalidOid, Int32GetDatum((int32)PG_GETARG_INT16(0)), + fcinfo->can_ignore); } /* int4(hhmmss) convert to b format time */ @@ -2180,6 +2183,9 @@ Datum int32_b_format_time(PG_FUNCTION_ARGS) if (dterr) { ereport(errlevel, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("time out of range"))); + if (fcinfo->can_ignore || (SQL_MODE_NOT_STRICT_ON_INSERT())) { + PG_RETURN_TIMEADT(0); + } } tm2time(tm, 0, &result); PG_RETURN_TIMEADT(result * sign); @@ -2194,7 +2200,8 @@ Datum int64_b_format_time(PG_FUNCTION_ARGS) return DirectFunctionCall1(timestamp_time, datetime); } char *str = DatumGetCString(DirectFunctionCall1(int8out, UInt64GetDatum(number))); - return DirectFunctionCall3(time_in, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + return DirectFunctionCall3Coll(time_in, InvalidOid, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1), fcinfo->can_ignore); } Datum int8_cast_time(PG_FUNCTION_ARGS) diff --git a/contrib/dolphin/plugin_utils/adt/datetime.cpp b/contrib/dolphin/plugin_utils/adt/datetime.cpp index b7f0498ed..f469123dc 100644 --- a/contrib/dolphin/plugin_utils/adt/datetime.cpp +++ b/contrib/dolphin/plugin_utils/adt/datetime.cpp @@ -3590,12 +3590,22 @@ void DateTimeParseErrorInternal(int dterr, const char* str, const char* datatype } } +/* + * check if the the time oveflow. + * + * return values: true: overflow, false : normal time + */ +bool IsTimeOverFlow(pg_tm tm) +{ + return tm.tm_min > TIME_MAX_MINUTE || tm.tm_sec > TIME_MAX_SECOND; +} + /* * ignore the error on unstrict mode. * * return values: need to reset the time value or not */ -bool IsResetUnavailableDataTime(int dterr, bool is_support_reset_unavailable_datatime) +bool IsResetUnavailableDataTime(int dterr, pg_tm tm, bool is_support_reset_unavailable_datatime) { switch (dterr) { case DTERR_FIELD_OVERFLOW: @@ -3609,6 +3619,9 @@ bool IsResetUnavailableDataTime(int dterr, bool is_support_reset_unavailable_dat case DTERR_ZERO_DATE: break; case DTERR_BAD_FORMAT: + /* some case overflow will be parserd as bad format error, + such as insert into xx as select 32769 */ + return is_support_reset_unavailable_datatime && IsTimeOverFlow(tm); default: break; } diff --git a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp index 1b9cf7511..434a55e44 100644 --- a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp +++ b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp @@ -1310,7 +1310,7 @@ Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1, bool c return result; } -Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2) +Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1321,6 +1321,7 @@ Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum fcinfo.arg[1] = arg2; fcinfo.argnull[0] = false; fcinfo.argnull[1] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1333,7 +1334,7 @@ Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum return result; } -Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3) +Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1346,6 +1347,7 @@ Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum fcinfo.argnull[0] = false; fcinfo.argnull[1] = false; fcinfo.argnull[2] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1358,7 +1360,8 @@ Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum return result; } -Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4) +Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, + bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1373,6 +1376,7 @@ Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum fcinfo.argnull[1] = false; fcinfo.argnull[2] = false; fcinfo.argnull[3] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1386,7 +1390,7 @@ Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum } Datum DirectFunctionCall5Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5) + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1403,6 +1407,7 @@ Datum DirectFunctionCall5Coll( fcinfo.argnull[2] = false; fcinfo.argnull[3] = false; fcinfo.argnull[4] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1416,7 +1421,8 @@ Datum DirectFunctionCall5Coll( } Datum DirectFunctionCall6Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6) + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, + bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1435,6 +1441,7 @@ Datum DirectFunctionCall6Coll( fcinfo.argnull[3] = false; fcinfo.argnull[4] = false; fcinfo.argnull[5] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1448,7 +1455,8 @@ Datum DirectFunctionCall6Coll( } Datum DirectFunctionCall7Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7) + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, Datum arg6, Datum arg7, + bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1469,6 +1477,7 @@ Datum DirectFunctionCall7Coll( fcinfo.argnull[4] = false; fcinfo.argnull[5] = false; fcinfo.argnull[6] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1482,7 +1491,7 @@ Datum DirectFunctionCall7Coll( } Datum DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, - Datum arg5, Datum arg6, Datum arg7, Datum arg8) + Datum arg5, Datum arg6, Datum arg7, Datum arg8, bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1505,6 +1514,7 @@ Datum DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum fcinfo.argnull[5] = false; fcinfo.argnull[6] = false; fcinfo.argnull[7] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); @@ -1518,7 +1528,7 @@ Datum DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum } Datum DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, - Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9) + Datum arg5, Datum arg6, Datum arg7, Datum arg8, Datum arg9, bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; @@ -1543,6 +1553,7 @@ Datum DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum fcinfo.argnull[6] = false; fcinfo.argnull[7] = false; fcinfo.argnull[8] = false; + fcinfo.can_ignore = can_ignore; result = (*func)(&fcinfo); diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index be6458f73..620477a9f 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -413,5 +413,18 @@ select date_format(t, '%H%i%s.%f') from test_time; select to_char(t, 'hh24miss') from test_time; drop table test_time; + +set dolphin.b_compatibility_mode = true; +set dolphin.sql_mode=''; +create table t1(c1 int1, c2 int2, c3 int4, c4 int8, c5 uint1, c6 uint2, c7 uint8); +create table t2(c1 time, c2 time, c3 time, c4 time, c5 time, c6 time, c7 time); +insert into t1 values('-69', '-32769', '-32769', '-32769', '69', '32769', '32769'); +insert into t2 select c1, c2, c3, c4, c5, c6, c7 from t1; +reset dolphin.sql_mode; +insert ignore into t2 select c1, c2, c3, c4, c5, c6, c7 from t1; +select * from t2; +drop table if exists t1; +drop table if exists t2; + drop schema b_time_funcs3 cascade; reset current_schema; -- Gitee From 3b4bdd918eead79c03df4c6c1811dd9b198a4613 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Thu, 14 Dec 2023 21:05:58 +0800 Subject: [PATCH 125/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E6=94=AF=E6=8C=81DML+Select?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/include/spq/spq_mutate.h | 2 +- .../translate/CTranslatorDXLToPlStmt.h | 5 + contrib/spq_plugin/src/guc_spq.cpp | 98 ++++++++++ contrib/spq_plugin/src/spq/spq_mutate.cpp | 122 +++++++++++- .../translate/CTranslatorDXLToPlStmt.cpp | 173 +++++++++++++----- .../translate/CTranslatorDXLToScalar.cpp | 5 +- .../translate/CTranslatorQueryToDXL.cpp | 1 + contrib/spq_plugin/src/spqplugin.cpp | 57 +++++- 8 files changed, 400 insertions(+), 63 deletions(-) diff --git a/contrib/spq_plugin/include/spq/spq_mutate.h b/contrib/spq_plugin/include/spq/spq_mutate.h index 782caa531..9b425161e 100644 --- a/contrib/spq_plugin/include/spq/spq_mutate.h +++ b/contrib/spq_plugin/include/spq/spq_mutate.h @@ -31,6 +31,6 @@ extern Plan *apply_shareinput_xslice(Plan *plan, PlannerInfo *root, PlanSlice *s extern void remove_subquery_in_RTEs(Node *node); extern bool is_plan_node(Node *node); extern void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal *glob); -extern Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *result, Plan *plan, bool &top); +extern Plan *replace_motion_stream_recurse(PlannerInfo* root, SpqSliceContext *result, Plan *plan, bool &top, bool fromdml = false); #endif /* SPQ_MUTATE_H */ diff --git a/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.h b/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.h index a4150e177..846722424 100644 --- a/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.h +++ b/contrib/spq_plugin/include/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.h @@ -378,6 +378,11 @@ private: ctxt_translation_prev_siblings // translation contexts of previous siblings ); + // add a target entry for a junk column with given colid to the target list + static void AddJunkTargetEntryForColId( + List **target_list, CDXLTranslateContext *dxl_translate_ctxt, + ULONG colid, const char *resname); + // translate a Split operator Plan *TranslateDXLSplit( const CDXLNode *split_dxlnode, CDXLTranslateContext *output_context, diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 529c7330c..1ec426c76 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1149,6 +1149,56 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_insert_select", + "Enable insert concurrently on spq.", + NULL, + &u_sess->attr.attr_spq.spq_enable_insert_select, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_insert_from_tableless", + "Enable insert from tableless on spq.", + NULL, + &u_sess->attr.attr_spq.spq_enable_insert_from_tableless, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_insert_order_sensitive", + "Enable parallel insert need ordered.", + NULL, + &u_sess->attr.attr_spq.spq_enable_insert_order_sensitive, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_delete", + "Enable delete concurrently on spq.", + NULL, + &u_sess->attr.attr_spq.spq_enable_delete, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_update", + "Enable update concurrently on spq.", + NULL, + &u_sess->attr.attr_spq.spq_enable_update, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesInt() @@ -1333,6 +1383,54 @@ static void InitSpqConfigureNamesInt() NULL, NULL, NULL); + DefineCustomIntVariable("spqplugin.spq_insert_dop_num", + "Sets spq the num of insert dop", + NULL, + &u_sess->attr.attr_spq.spq_insert_dop_num, + 1, + 1, + 1, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomIntVariable("spqplugin.spq_delete_dop_num", + "Sets spq the num of delete dop", + NULL, + &u_sess->attr.attr_spq.spq_delete_dop_num, + 1, + 1, + 1, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomIntVariable("spqplugin.spq_update_dop_num", + "Sets spq the num of update dop", + NULL, + &u_sess->attr.attr_spq.spq_update_dop_num, + 1, + 1, + 1, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomIntVariable("spqplugin.spq_wr_node_index", + "DML : Sets the write node' index according to cluster_map", + NULL, + &u_sess->attr.attr_spq.spq_wr_node_index, + 0, + 0, + 128, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesReal() diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index cfa3cebf2..ed72c1130 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -660,6 +660,7 @@ bool check_slice_dop(PlanSlice *slices, Plan *subplan, PlannedStmt *result) } return producerDop == subplan->dop; } + Plan* make_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedStmt *result) { PlanSlice *slices = &(result->slices[motion->motionID]); @@ -729,6 +730,41 @@ Plan* make_sort(Motion *motion, Plan *subplan) node->nullsFirst = motion->nullsFirst; return (Plan*)node; } + +Plan* make_dml_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedStmt *result) +{ + Stream* stream = makeNode(Stream); + Plan* plan = &stream->scan.plan; + Distribution* distribution = ng_get_dest_distribution(subplan); + stream->distribute_keys = make_distributed_key_by_groupingset(root, subplan, motion->hashExprs); + stream->is_sorted = false; + stream->sort = NULL; + copy_plan_costsize(plan, &motion->plan); + plan->distributed_keys = stream->distribute_keys; + plan->targetlist = list_copy(motion->plan.targetlist); + plan->lefttree = subplan; + plan->righttree = NULL; + plan->exec_nodes = ng_get_dest_execnodes(subplan); + plan->hasUniqueResults = subplan->hasUniqueResults; + plan->multiple = 1.0; + + // set by redistribute_keys? + stream->smpDesc.producerDop = subplan->dop; + stream->smpDesc.consumerDop = 1; + + plan->dop = stream->smpDesc.consumerDop; + + stream->smpDesc.distriType = REMOTE_DML_WRITE_NODE; + stream->type = STREAM_REDISTRIBUTE; + stream->consumer_nodes = ng_convert_to_exec_nodes(distribution, LOCATOR_TYPE_HASH, RELATION_ACCESS_READ); + + stream->streamID = motion->motionID; + if (motion->sendSorted) { + return make_sort(motion, (Plan*)stream); + } + return (Plan*)stream; +} + Plan* create_spq_local_gather(PlannerInfo* root, Plan* plan, Motion *motion) { if (IsA(plan, Stream)) { @@ -838,7 +874,29 @@ Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion, Plann } return (Plan*)stream_node; } -Plan* tran_motion_to_stream(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, bool &top) +Plan *make_gather_remote_top(Plan *lefttree, PlannedStmt *result) +{ + PlanSlice *slices = &(result->slices[0]); + RemoteQuery* remote_query = makeNode(RemoteQuery); + remote_query->combine_type = COMBINE_TYPE_NONE; + remote_query->base_tlist = NIL; + remote_query->sql_statement = NULL; + remote_query->exec_nodes = lefttree->exec_nodes; + remote_query->read_only = true; + + remote_query->scan.plan.exec_nodes = remote_query->exec_nodes; + remote_query->scan.plan.lefttree = lefttree; + remote_query->scan.plan.exec_type = EXEC_ON_COORDS; + remote_query->is_simple = true; + remote_query->rq_need_proj = false; + remote_query->scan.plan.plan_width = lefttree->plan_width; + remote_query->sort = NULL; + remote_query->scan.plan.dop = 1; + remote_query->nodeCount = slices->numsegments > 1 ? t_thrd.spq_ctx.num_nodes : 1; + return (Plan*)remote_query; +} + +Plan *tran_motion_to_stream(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, bool &top, bool fromdml) { bool backtop = top; top = false; @@ -860,6 +918,11 @@ Plan* tran_motion_to_stream(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, ereport(LOG,(errmsg("[SliceInfo] sliceIndex[%d] slicetype[%d] worker_idx[%d] parentIndex[%d]", slices->sliceIndex, slices->gangType, slices->worker_idx, slices->parentIndex))); } + if (fromdml && backtop) { + top = backtop; + return make_dml_stream(root, subplan, motion, cxt->result); + } + // no need check motion->motionID again in below func; if (motion->motionType == MOTIONTYPE_GATHER) { if (backtop) { @@ -874,7 +937,7 @@ Plan* tran_motion_to_stream(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, } } //TODO SPQ need fix: dops and multiple gather -Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *cxt, Plan *plan, bool &top) +Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *cxt, Plan *plan, bool &top, bool fromdml) { ListCell* lc = NULL; @@ -896,7 +959,7 @@ Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *cxt, Pla if (IsA(node, SubPlan)) { subplan = (SubPlan*)lfirst(lc); initNode = (Plan*)list_nth(subplans, subplan->plan_id - 1); - lfirst(lc) = replace_motion_stream_recurse(root, cxt, initNode, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, initNode, top, fromdml); } } list_free_ext(subplan_list); @@ -906,14 +969,14 @@ Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *cxt, Pla List* initplans = plan->initPlan; foreach (lc, initplans) { Plan* initplan = (Plan*)lfirst(lc); - lfirst(lc) = replace_motion_stream_recurse(root, cxt, initplan, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, initplan, top, fromdml); } if (IsA(plan, Append)) { Append* node = (Append*)plan; foreach(lc, node->appendplans) { Plan* initNode = (Plan*)lfirst(lc); - lfirst(lc) = replace_motion_stream_recurse(root, cxt, initNode, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, initNode, top, fromdml); } } @@ -921,22 +984,25 @@ Plan *replace_motion_stream_recurse(PlannerInfo *root, SpqSliceContext *cxt, Pla if (top == true) { ereport(ERROR, (errmsg("There's no gather on sequence curentIndex[%d]", cxt->curentIndex))); } + if (fromdml) { + ereport(ERROR, (errmsg("not support SPQ DML with ShareInputScan"))); + } Sequence* node = (Sequence*)plan; foreach(lc, node->subplans) { Plan* subplan = (Plan*)lfirst(lc); - lfirst(lc) = replace_motion_stream_recurse(root, cxt, subplan, top); + lfirst(lc) = replace_motion_stream_recurse(root, cxt, subplan, top, fromdml); } } if (IsA(plan, Motion)) { - return tran_motion_to_stream(root, cxt, plan, top); + return tran_motion_to_stream(root, cxt, plan, top, fromdml); } else { if (plan->lefttree) { - plan->lefttree = replace_motion_stream_recurse(root, cxt, plan->lefttree, top); + plan->lefttree = replace_motion_stream_recurse(root, cxt, plan->lefttree, top, fromdml); plan->dop = plan->lefttree->dop; } if (plan->righttree) { - plan->righttree = replace_motion_stream_recurse(root, cxt, plan->righttree, top); + plan->righttree = replace_motion_stream_recurse(root, cxt, plan->righttree, top, fromdml); } if (plan->lefttree == nullptr && plan->righttree == nullptr) { if (cxt->curentIndex >= cxt->result->numSlices) { @@ -964,6 +1030,36 @@ static void InitRemoteNodeDefinition(PlannedStmt* planstmt) planstmt->nodesDefinition = (NodeDefinition *)palloc0(nodes_size); memcpy_s(planstmt->nodesDefinition, nodes_size, t_thrd.spq_ctx.nodesDefinition, nodes_size); } +Plan *replace_motion_dml(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, bool &top) +{ + if (!IsA(plan, ModifyTable)) { + ereport(ERROR, (errmsg("replace_motion_dml is %d", (int)nodeTag(plan)))); + return NULL; + } + PlannerGlobal *glob = root->glob; + int subplan_id = 0; + ListCell *lp; + foreach (lp, glob->subplans) { + if (cxt->result->subplan_sliceIds[subplan_id] == 0) { + ereport(ERROR, (errmsg("not support SPQ DML when DML_STREAM in subplan"))); + } + subplan_id++; + } + Plan *remote_query_plan = make_gather_remote_top(plan, cxt->result); + ModifyTable* node = (ModifyTable*)plan; + ListCell* l = NULL; + foreach (l, node->plans) { + Plan *subplan = (Plan*)lfirst(l); + if (subplan) { + subplan = replace_motion_stream_recurse(root, cxt, subplan, top, true); + lfirst(l) = subplan; + } + } + plan->dop = 1; + plan->parallel_enabled = (plan->dop > 1); + return remote_query_plan; +} + void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal *glob) { bool top = true; @@ -977,7 +1073,13 @@ void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal SpqSliceContext sliceCxt; sliceCxt.result = result; sliceCxt.curentIndex = 0; - result->planTree = replace_motion_stream_recurse(root, &sliceCxt, result->planTree, top); + /* whether select's part top stream has appeared, + * top stream is to send scaning data to qc to modify table*/ + if (root->parse->commandType == CMD_SELECT) { + result->planTree = replace_motion_stream_recurse(root, &sliceCxt, result->planTree, top); + } else { + result->planTree = replace_motion_dml(root, &sliceCxt, result->planTree, top); + } // should fix all? //result->planTree = set_plan_references(root, result->planTree); int parent_node_id = INITIAL_PARENT_NODE_ID; /* beginning with INITIAL_PARENT_NODE_ID */ diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index 83f171373..a43c1e985 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -4316,14 +4316,12 @@ CTranslatorDXLToPlStmt::TranslateDXLDml( const CDXLNode *dml_dxlnode, CDXLTranslateContext *output_context, CDXLTranslationContextArray *ctxt_translation_prev_siblings) { - // TODO SPQ undef DML -#if 0 // translate table descriptor into a range table entry CDXLPhysicalDML *phy_dml_dxlop = CDXLPhysicalDML::Cast(dml_dxlnode->GetOperator()); - // create DML node - DML *dml = MakeNode(DML); + // create ModifyTable node + ModifyTable *dml = MakeNode(ModifyTable); Plan *plan = &(dml->plan); AclMode acl_mode = ACL_NO_RIGHTS; @@ -4333,12 +4331,14 @@ CTranslatorDXLToPlStmt::TranslateDXLDml( { m_cmd_type = CMD_DELETE; acl_mode = ACL_DELETE; + acl_mode |= ACL_SELECT; break; } case spqdxl::Edxldmlupdate: { m_cmd_type = CMD_UPDATE; acl_mode = ACL_UPDATE; + acl_mode |= ACL_SELECT; break; } case spqdxl::Edxldmlinsert: @@ -4370,7 +4370,6 @@ CTranslatorDXLToPlStmt::TranslateDXLDml( // add the new range table entry as the last element of the range table Index index = spqdb::ListLength(m_dxl_to_plstmt_context->GetRTableEntriesList()) + 1; - dml->scanrelid = index; m_result_rel_list = spqdb::LAppendInt(m_result_rel_list, index); @@ -4382,7 +4381,7 @@ CTranslatorDXLToPlStmt::TranslateDXLDml( table_descr, index, &base_table_context); SPQOS_ASSERT(NULL != rte); rte->requiredPerms |= acl_mode; - m_dxl_to_plstmt_context->AddRTE(rte); + m_dxl_to_plstmt_context->AddRTE(rte, true); CDXLNode *project_list_dxlnode = (*dml_dxlnode)[0]; CDXLNode *child_dxlnode = (*dml_dxlnode)[1]; @@ -4400,55 +4399,96 @@ CTranslatorDXLToPlStmt::TranslateDXLDml( // translate proj list List *dml_target_list = TranslateDXLProjList(project_list_dxlnode, - NULL, // translate context for the base table - child_contexts, output_context); - - if (md_rel->HasDroppedColumns()) - { - // pad DML target list with NULLs for dropped columns for all DML operator types - List *target_list_with_dropped_cols = - CreateTargetListWithNullsForDroppedCols(dml_target_list, md_rel); - spqdb::SPQDBFree(dml_target_list); - dml_target_list = target_list_with_dropped_cols; + NULL, // translate context for the base table + child_contexts, output_context); + + List *target_list_with_dropped_cols = + CreateTargetListWithNullsForDroppedCols(dml_target_list, md_rel); + spqdb::SPQDBFree(dml_target_list); + dml_target_list = target_list_with_dropped_cols; + + // Add junk columns to the target list for the 'action', 'ctid', + // , and tuple's 'oid'. The ModifyTable node will find + // these based on the resnames. ORCA also includes a similar column for + // partition Oid in the child's target list, but we don't use it for + // anything in GPDB. + if (m_cmd_type == CMD_UPDATE) + (void) AddJunkTargetEntryForColId(&dml_target_list, &child_context, + phy_dml_dxlop->ActionColId(), + "DMLAction"); + + if (m_cmd_type == CMD_UPDATE || m_cmd_type == CMD_DELETE) + { + AddJunkTargetEntryForColId(&dml_target_list, &child_context, + phy_dml_dxlop->GetCtIdColId(), "ctid"); } + if (m_cmd_type == CMD_UPDATE && phy_dml_dxlop->IsOidsPreserved()) + AddJunkTargetEntryForColId(&dml_target_list, &child_context, + phy_dml_dxlop->GetTupleOid(), "oid"); + + // Add a Result node on top of the child plan, to coerce the target + // list to match the exact physical layout of the target table, + // including dropped columns. Often, the Result node isn't really + // needed, as the child node could do the projection, but we don't have + // the information to determine that here. There's a step in the + // backend optimize_query() function to eliminate unnecessary Results + // throught the plan, hopefully this Result gets eliminated there. + Result *result = MakeNode(Result); + Plan *result_plan = &(result->plan); - // Extract column numbers of the action and ctid columns from the - // target list. - dml->actionColIdx = AddTargetEntryForColId(&dml_target_list, &child_context, - phy_dml_dxlop->ActionColId(), - true /*is_resjunk*/); - dml->ctidColIdx = AddTargetEntryForColId(&dml_target_list, &child_context, - phy_dml_dxlop->GetCtIdColId(), - true /*is_resjunk*/); - if (phy_dml_dxlop->IsOidsPreserved()) - { - dml->tupleoidColIdx = AddTargetEntryForColId( - &dml_target_list, &child_context, phy_dml_dxlop->GetTupleOid(), - true /*is_resjunk*/); - } - else - { - dml->tupleoidColIdx = 0; + result_plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); + result_plan->lefttree = child_plan; + if (result_plan->lefttree != NULL) { + inherit_plan_locator_info(result_plan, result_plan->lefttree); } - SPQOS_ASSERT(0 != dml->actionColIdx); + result_plan->targetlist = target_list_with_dropped_cols; + SetParamIds(result_plan); - plan->targetlist = dml_target_list; + child_plan = (Plan *) result; - plan->lefttree = child_plan; - //plan->nMotionNodes = child_plan->nMotionNodes; - plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); + dml->operation = m_cmd_type; + dml->canSetTag = true; // FIXME + dml->resultRelations = ListMake1Int(index); + dml->resultRelIndex = list_length(m_result_rel_list) - 1; + dml->plans = ListMake1(child_plan); - if (CMD_INSERT == m_cmd_type && 0 == //plan->nMotionNodes) - { - List *direct_dispatch_segids = TranslateDXLDirectDispatchInfo( - phy_dml_dxlop->GetDXLDirectDispatchInfo()); - plan->directDispatch.contentIds = direct_dispatch_segids; - plan->directDispatch.isDirectDispatch = (NIL != direct_dispatch_segids); + m_result_rel_list = lappend(NIL, m_result_rel_list); + dml->resultRelations = m_result_rel_list; + + dml->fdwPrivLists = ListMake1(NIL); + if (m_cmd_type == CMD_UPDATE) + dml->isSplitUpdates = ListMake1Int((int) true); + + plan->targetlist = NIL; + plan->plan_node_id = m_dxl_to_plstmt_context->GetNextPlanId(); + /* SPQ: add exec_nodes for plan */ + if (child_plan != NULL) { + inherit_plan_locator_info(plan, child_plan); } SetParamIds(plan); + // Should be m_is_tgt_tbl_distributed + if (m_is_tgt_tbl_distributed) + { + PlanSlice *current_slice = m_dxl_to_plstmt_context->GetCurrentSlice(); + current_slice->gangType = GANGTYPE_PRIMARY_WRITER; + if (CMD_UPDATE == m_cmd_type) + { + current_slice->numsegments = u_sess->attr.attr_spq.spq_update_dop_num; + } + else if (CMD_INSERT == m_cmd_type) + { + current_slice->numsegments = u_sess->attr.attr_spq.spq_insert_dop_num; + } + else + { + /* Delete */ + current_slice->numsegments = u_sess->attr.attr_spq.spq_delete_dop_num; + } + } + // cleanup child_contexts->Release(); @@ -4461,8 +4501,44 @@ CTranslatorDXLToPlStmt::TranslateDXLDml( &(plan->plan_width)); return (Plan *) dml; -#endif - return nullptr; +} + +//--------------------------------------------------------------------------- +// @function: +// CTranslatorDXLToPlStmt::AddJunkTargetEntryForColId +// +// @doc: +// Add a new target entry for the given colid to the given target list +// +//--------------------------------------------------------------------------- +void +CTranslatorDXLToPlStmt::AddJunkTargetEntryForColId( + List **target_list, CDXLTranslateContext *dxl_translate_ctxt, ULONG colid, + const char *resname) +{ + SPQOS_ASSERT(nullptr != target_list); + + const TargetEntry *target_entry = dxl_translate_ctxt->GetTargetEntry(colid); + + if (nullptr == target_entry) + { + // colid not found in translate context + SPQOS_RAISE(spqdxl::ExmaDXL, spqdxl::ExmiDXL2PlStmtAttributeNotFound, colid); + } + + // TODO: Oct 29, 2012; see if entry already exists in the target list + + OID expr_oid = spqdb::ExprType((Node *) target_entry->expr); + INT type_modifier = spqdb::ExprTypeMod((Node *) target_entry->expr); + Var *var = + spqdb::MakeVar(OUTER_VAR, target_entry->resno, expr_oid, type_modifier, + 0 // varlevelsup + ); + ULONG resno = spqdb::ListLength(*target_list) + 1; + CHAR *resname_str = PStrDup(resname); + TargetEntry *te_new = spqdb::MakeTargetEntry( + (Expr *) var, resno, resname_str, true /* resjunk */); + *target_list = spqdb::LAppend(*target_list, te_new); } //--------------------------------------------------------------------------- @@ -4673,6 +4749,11 @@ CTranslatorDXLToPlStmt::TranslateDXLSplit( SetParamIds(plan); + if (NULL != plan->lefttree) + { + inherit_plan_locator_info(plan, plan->lefttree); + } + // cleanup child_contexts->Release(); diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp index 265e9f814..c14f3bf4b 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToScalar.cpp @@ -2278,10 +2278,9 @@ CTranslatorDXLToScalar::TranslateDXLScalarDMLActionToScalar( SPQOS_ASSERT(NULL != dml_action_node); SPQOS_ASSERT(EdxlopScalarDMLAction == dml_action_node->GetOperator()->GetDXLOperator()); - //DMLActionExpr *expr = MakeNode(DMLActionExpr); + DMLActionExpr *expr = MakeNode(DMLActionExpr); - //return (Expr *) expr; - return (Expr *)nullptr; + return (Expr *) expr; } diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp index 73b462e1f..a4698d15a 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorQueryToDXL.cpp @@ -1104,6 +1104,7 @@ CTranslatorQueryToDXL::GetCtidAndSegmentId(ULONG *ctid, ULONG *segment_id) *ctid = CTranslatorUtils::GetColId(m_query_level, m_query->resultRelation, SelfItemPointerAttributeNumber, mdid, m_var_to_colid_map); + *segment_id = *ctid; mdid->Release(); } diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index 8a022eb5e..df673974e 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -80,6 +80,23 @@ static bool check_disable_spq_planner_walker(Node *node, void *context) /* Query node */ query = (Query *)node; + /* openGauss spq: ordered sensitive */ + if ((query->sortClause || !query->rtable) && query->commandType == CMD_INSERT) { + if (u_sess->attr.attr_spq.spq_enable_insert_order_sensitive && 1 != u_sess->attr.attr_spq.spq_insert_dop_num) { + elog(DEBUG1,"spq multi insert Failed: spq_enable_insert_order_sensitive is on. One writer worker should be used"); + u_sess->attr.attr_spq.spq_insert_dop_num = 1; + } + } + /* openGauss end */ + + /* check insert select from tableless_scan */ + if (query->commandType == CMD_INSERT && !query->rtable) { + CHECK_RETURN_HELP_LOG(1 == u_sess->attr.attr_spq.spq_insert_dop_num, + "Insert worker is only one, select from tableless doesn't use spq"); + CHECK_RETURN_HELP_LOG(!u_sess->attr.attr_spq.spq_enable_insert_from_tableless, + "gauss_spq_enable_insert_from_tableless is off"); + } + if (u_sess->attr.attr_spq.spq_enable_pre_optimizer_check) { CHECK_RETURN_HELP_LOG(query->groupingSets, "sql with groupingsets was not allowed in spq"); } @@ -160,9 +177,43 @@ static bool should_spq_planner(Query *parse) elog(ERROR, "parse should not be null."); } - if (parse->commandType != CMD_SELECT) { - elog(DEBUG1, "spq can not support this commandType."); - return false; + if (CMD_INSERT == parse->commandType) { + /* If spq_enable_insert_select is off, no use SPQOPT */ + if (!u_sess->attr.attr_spq.spq_enable_insert_select) { + elog(DEBUG1, "spq Failed: param spq_enable_insert_select is off"); + return false; + } + + /* Insert into .. VALUES(only one node) Case */ + if (1 == parse->rtable->length) { + if (1 == u_sess->attr.attr_spq.spq_insert_dop_num) { + elog(DEBUG1, "spq Failed: Insert worker is only one, select from tableless doesn't use spq"); + return false; + } + if (!u_sess->attr.attr_spq.spq_enable_insert_from_tableless) { + elog(DEBUG1, "gauss_spq_enable_insert_from_tableless is off"); + return false; + } + } + } else if (CMD_UPDATE == parse->commandType) { + if (!u_sess->attr.attr_spq.spq_enable_update) { + elog(DEBUG1, "spq Failed: param spq_enable_update is off"); + return false; + } + } else if(CMD_DELETE == parse->commandType) { + if (!u_sess->attr.attr_spq.spq_enable_delete) { + elog(DEBUG1, "spq Failed: param spq_enable_delete is off"); + return false; + } + if (parse->hasModifyingCTE) { + elog(DEBUG1, "spq Failed: parallel delete can't support ModifyingCTE"); + return false; + } + } else { + if (parse->commandType != CMD_SELECT) { + elog(DEBUG1, "spq can not support this commandType."); + return false; + } } if (!u_sess->attr.attr_spq.spq_enable_transaction && IsTransactionBlock()) { -- Gitee From 01928cd13d6ed0c196d7802bc9e23830d05879a9 Mon Sep 17 00:00:00 2001 From: Julong-Li <584147810@qq.com> Date: Fri, 15 Dec 2023 11:49:40 +0800 Subject: [PATCH 126/434] =?UTF-8?q?issue=E4=BF=AE=E6=94=B9:=20enum?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E5=AF=B90=E7=9A=84=E5=A4=84=E7=90=86?= =?UTF-8?q?=E4=B8=8Emysql=E4=B8=8D=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/empty_enum_value.out | 80 +++++++++++++++++++ contrib/dolphin/include/plugin_utils/fmgr.h | 6 +- contrib/dolphin/parallel_schedule_dolphin | 2 +- .../dolphin/plugin_parser/parse_coerce.cpp | 10 ++- contrib/dolphin/plugin_utils/adt/enum.cpp | 37 ++++++++- contrib/dolphin/plugin_utils/fmgr/fmgr.cpp | 13 ++- contrib/dolphin/sql/empty_enum_value.sql | 31 +++++++ 7 files changed, 169 insertions(+), 10 deletions(-) create mode 100644 contrib/dolphin/expected/empty_enum_value.out create mode 100644 contrib/dolphin/sql/empty_enum_value.sql diff --git a/contrib/dolphin/expected/empty_enum_value.out b/contrib/dolphin/expected/empty_enum_value.out new file mode 100644 index 000000000..c16816dc1 --- /dev/null +++ b/contrib/dolphin/expected/empty_enum_value.out @@ -0,0 +1,80 @@ +create table t_null(c1 enum('a','','b')); +create table t_notnull(c1 enum('a','b')); +--strict sql_mode +insert into t_null values(0); +ERROR: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: c1 +insert into t_notnull values(0); +ERROR: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: c1 +insert into t_null values(''); +insert into t_notnull values(''); +ERROR: invalid input value for enum t_notnull_c1_2200_anonymous_enum_1: "" +LINE 1: insert into t_notnull values(''); + ^ +CONTEXT: referenced column: c1 +insert ignore into t_null values(0); +WARNING: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: c1 +insert ignore into t_notnull values(0); +WARNING: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: c1 +insert ignore into t_null values(''); +insert ignore into t_notnull values(''); +WARNING: invalid input value for enum t_notnull_c1_2200_anonymous_enum_1: "" +LINE 1: insert ignore into t_notnull values(''); + ^ +CONTEXT: referenced column: c1 +insert into t_null values('test'); +ERROR: invalid input value for enum t_null_c1_2200_anonymous_enum_1: "test" +LINE 1: insert into t_null values('test'); + ^ +CONTEXT: referenced column: c1 +insert ignore into t_null values('test'); +WARNING: invalid input value for enum t_null_c1_2200_anonymous_enum_1: "test" +LINE 1: insert ignore into t_null values('test'); + ^ +CONTEXT: referenced column: c1 +--non_strict sql_mode +set dolphin.sql_mode = ''; +insert into t_null values(0); +WARNING: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: c1 +insert into t_notnull values(0); +WARNING: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: c1 +insert into t_null values(''); +insert into t_notnull values(''); +WARNING: invalid input value for enum t_notnull_c1_2200_anonymous_enum_1: "" +LINE 1: insert into t_notnull values(''); + ^ +CONTEXT: referenced column: c1 +insert into t_null values('test'); +WARNING: invalid input value for enum t_null_c1_2200_anonymous_enum_1: "test" +LINE 1: insert into t_null values('test'); + ^ +CONTEXT: referenced column: c1 +select c1, c1+0 from t_null; + c1 | ?column? +----+---------- + | 2 + | 0 + | 2 + | 0 + | 0 + | 2 + | 0 +(7 rows) + +select c1, c1+0 from t_notnull; + c1 | ?column? +----+---------- + | 0 + | 0 + | 0 + | 0 +(4 rows) + +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero; +drop table t_null; +drop table t_notnull; diff --git a/contrib/dolphin/include/plugin_utils/fmgr.h b/contrib/dolphin/include/plugin_utils/fmgr.h index 578131cef..c62f1d587 100644 --- a/contrib/dolphin/include/plugin_utils/fmgr.h +++ b/contrib/dolphin/include/plugin_utils/fmgr.h @@ -578,7 +578,11 @@ extern Datum ReceiveFunctionCall(FmgrInfo* flinfo, fmStringInfo buf, Oid typiopa extern Datum OidReceiveFunctionCall(Oid functionId, fmStringInfo buf, Oid typioparam, int32 typmod); extern bytea* SendFunctionCall(FmgrInfo* flinfo, Datum val); extern bytea* OidSendFunctionCall(Oid functionId, Datum val); -extern Datum OidInputFunctionCallColl(Oid functionId, char* str, Oid typioparam, int32 typmod, Oid collation); +extern Datum OidInputFunctionCallColl(Oid functionId, char *str, Oid typioparam, int32 typmod, Oid collation +#ifdef DOLPHIN + , bool ignore +#endif +); /* * Routines in fmgr.c diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index e5f8cc622..5764d43f9 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -7,7 +7,7 @@ test: ast b_compatibility_time_type db_b_new_gram_test group_concat_test test_co test: db_b_parser1 db_b_parser2 db_b_parser3 db_b_parser4 second_microsecond test_set_password_for_user test_timestamp_overflow -test: db_b_plpgsql_test default_guc describe explain_desc kill set_password network test_dayofweek test_timestampn kwlist +test: db_b_plpgsql_test default_guc describe explain_desc kill set_password network test_dayofweek test_timestampn kwlist empty_enum_value test: empty_value_list empty_value_lists empty_value_support_value create_index test_guc_select_and_set test_copy_year2 test_default diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index eb628c823..abfbd1afb 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -14,6 +14,9 @@ * ------------------------------------------------------------------------- */ #include "postgres.h" +#ifdef DOLPHIN +#include "plugin_utils/fmgr.h" +#endif #include "knl/knl_variable.h" #include "utils/fmgroids.h" #include "catalog/pg_cast.h" @@ -1663,7 +1666,12 @@ CoercionContext ccontext, CoercionForm cformat, int location, Oid collation) Form_pg_type typform = (Form_pg_type)GETSTRUCT(target); Oid typinput = typform->typinput; Oid typioparam = getTypeIOParam(target); - newcon->constvalue = OidInputFunctionCallColl(typinput, DatumGetCString(con->constvalue), typioparam, inputTypeMod, collation); + newcon->constvalue = + OidInputFunctionCallColl(typinput, DatumGetCString(con->constvalue), typioparam, inputTypeMod, collation +#ifdef DOLPHIN + , pstate != NULL && pstate->p_has_ignore +#endif + ); cancel_parser_errposition_callback(&pcbstate); result = (Node*)newcon; diff --git a/contrib/dolphin/plugin_utils/adt/enum.cpp b/contrib/dolphin/plugin_utils/adt/enum.cpp index 03eef8427..89dfca404 100644 --- a/contrib/dolphin/plugin_utils/adt/enum.cpp +++ b/contrib/dolphin/plugin_utils/adt/enum.cpp @@ -35,6 +35,7 @@ #include "plugin_utils/int8.h" #include "plugin_utils/varbit.h" #include "catalog/gs_collation.h" +#include "plugin_commands/mysqlmode.h" #endif static Oid enum_endpoint(Oid enumtypoid, ScanDirection direction); @@ -69,10 +70,18 @@ Datum enum_in(PG_FUNCTION_ARGS) #endif tup = SearchSysCache2(ENUMTYPOIDNAME, ObjectIdGetDatum(enumtypoid), CStringGetDatum(name)); - if (!HeapTupleIsValid(tup)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); + if (!HeapTupleIsValid(tup)) { + /* In non-strict mode, allow enum values to be empty strings */ +#ifdef DOLPHIN + int elevel = (fcinfo->can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR; + ereport(elevel, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); + return (Datum)0; +#else + ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); +#endif + } /* * This comes from pg_enum.oid and stores system oids in user tables. This @@ -249,6 +258,11 @@ Datum int4_enum(PG_FUNCTION_ARGS) char* tmp = ""; if (order != 0) { tmp = getEnumLableByOrder(PG_GETARG_OID(1), PG_GETARG_INT32(0)); + } else { + int elevel = ((SQL_MODE_STRICT()) && !fcinfo->can_ignore) ? ERROR : WARNING; + ereport(elevel, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("Invalid input value for enum. In strict sql_mode, do not allow the value 0."))); + return (Datum)0; } Datum result = DirectFunctionCall2(enum_in, CStringGetDatum(tmp), PG_GETARG_DATUM(1)); PG_RETURN_OID(result); @@ -383,6 +397,15 @@ Datum enum_out(PG_FUNCTION_ARGS) HeapTuple tup; Form_pg_enum en; + /* In non-strict mode, + * insertion of empty strings will be treated as a normal case and output accordingly. + */ + if (enumval == 0) { + /* this variable will be manually released in the subsequent process. */ + result = pstrdup(""); + PG_RETURN_CSTRING(result); + } + tup = SearchSysCache1(ENUMOID, ObjectIdGetDatum(enumval)); if (!HeapTupleIsValid(tup)) ereport(ERROR, @@ -883,6 +906,12 @@ Datum Enum2Float8(PG_FUNCTION_ARGS) HeapTuple tup; Form_pg_enum en; + /* In non-strict mode, + * insertion of empty strings will be treated as a normal case and output accordingly. + */ + if (enumval == 0) { + PG_RETURN_FLOAT8(result); + } tup = SearchSysCache1(ENUMOID, ObjectIdGetDatum(enumval)); if (!HeapTupleIsValid(tup)) ereport(ERROR, diff --git a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp index d99f0150b..fe8e5135c 100644 --- a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp +++ b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp @@ -2451,12 +2451,19 @@ bytea* OidSendFunctionCall(Oid functionId, Datum val) return SendFunctionCall(&flinfo, val); } -Datum OidInputFunctionCallColl(Oid functionId, char* str, Oid typioparam, int32 typmod, Oid collation) +Datum OidInputFunctionCallColl(Oid functionId, char *str, Oid typioparam, int32 typmod, Oid collation +#ifdef DOLPHIN + , bool ignore +#endif +) { FmgrInfo flinfo; - + bool can_ignore = false; +#ifdef DOLPHIN + can_ignore = ignore; +#endif fmgr_info(functionId, &flinfo); - return InputFunctionCall(&flinfo, str, typioparam, typmod, false, collation); + return InputFunctionCall(&flinfo, str, typioparam, typmod, can_ignore, collation); } /* * !!! OLD INTERFACE !!! diff --git a/contrib/dolphin/sql/empty_enum_value.sql b/contrib/dolphin/sql/empty_enum_value.sql new file mode 100644 index 000000000..da9b8ad7c --- /dev/null +++ b/contrib/dolphin/sql/empty_enum_value.sql @@ -0,0 +1,31 @@ + +create table t_null(c1 enum('a','','b')); +create table t_notnull(c1 enum('a','b')); + +--strict sql_mode +insert into t_null values(0); +insert into t_notnull values(0); +insert into t_null values(''); +insert into t_notnull values(''); + +insert ignore into t_null values(0); +insert ignore into t_notnull values(0); +insert ignore into t_null values(''); +insert ignore into t_notnull values(''); + +insert into t_null values('test'); +insert ignore into t_null values('test'); + +--non_strict sql_mode +set dolphin.sql_mode = ''; +insert into t_null values(0); +insert into t_notnull values(0); +insert into t_null values(''); +insert into t_notnull values(''); +insert into t_null values('test'); + +select c1, c1+0 from t_null; +select c1, c1+0 from t_notnull; +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero; +drop table t_null; +drop table t_notnull; -- Gitee From e297c16e8a260e3a3d0a20c372063aadca544084 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Fri, 15 Dec 2023 14:09:43 +0800 Subject: [PATCH 127/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5=E5=86=85=E6=A0=B8?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=E4=BD=BF=E7=BC=96=E8=AF=91=E9=80=9A=E8=BF=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/whale/plugin_parser/parse_utilcmd.cpp | 94 ++++++++++-- contrib/whale/tablecmds.cpp | 145 ++++++++++++------ 2 files changed, 184 insertions(+), 55 deletions(-) diff --git a/contrib/whale/plugin_parser/parse_utilcmd.cpp b/contrib/whale/plugin_parser/parse_utilcmd.cpp index 8467ed5d0..ba0ba3500 100644 --- a/contrib/whale/plugin_parser/parse_utilcmd.cpp +++ b/contrib/whale/plugin_parser/parse_utilcmd.cpp @@ -334,6 +334,26 @@ Oid fill_relation_collation(const char* collate, int charset, List** options, Oi return coll_oid; } +static bool is_create_as_col_store(CreateStmt* stmt) +{ + ListCell *cell = NULL; + char* storeTypeStr = NULL; + foreach(cell, stmt->options) { + DefElem *def = (DefElem *)lfirst(cell); + if (pg_strcasecmp(def->defname, "orientation") == 0) { + if (nodeTag(def->arg) == T_String) { + storeTypeStr = strVal(def->arg); + } else if (nodeTag(def->arg) == T_TypeName) { + storeTypeStr = TypeNameToString((TypeName *)def->arg); + } else { + Assert(false); + } + } + } + return storeTypeStr && (pg_strcasecmp(storeTypeStr, ORIENTATION_COLUMN) == 0 || + pg_strcasecmp(storeTypeStr, ORIENTATION_ORC) == 0); +} + List* transformCreateStmt(CreateStmt* stmt, const char* queryString, const List* uuids, bool preCheck, Oid *namespaceid, bool isFirstNode) { @@ -834,9 +854,11 @@ Oid *namespaceid, bool isFirstNode) checkClusterConstraints(&cxt); /* - * Check reserve column + * Check reserve column if the table is column store */ - checkReserveColumn(&cxt); + if (is_create_as_col_store(stmt)) { + checkReserveColumn(&cxt); + } /* * Output results. @@ -2577,7 +2599,7 @@ static void transformTableLikePartitionKeys( RelationGetRelationName(relation)))); } - AssertEreport(n_key_column <= RANGE_PARTKEYMAXNUM, MOD_OPT, ""); + AssertEreport(n_key_column <= MAX_RANGE_PARTKEY_NUMS, MOD_OPT, ""); /* Get int2 array of partition key column numbers */ attnums = (int16*)ARR_DATA_PTR(partkey_columns); @@ -3706,7 +3728,7 @@ static IndexStmt* transformIndexConstraint(Constraint* constraint, CreateStmtCon } else if (attnum == 0) { // expresional index Node *indexkey = NULL; - attform = 0; + attform = NULL; if (indexpr_item == NULL) { ereport(ERROR, @@ -5100,9 +5122,11 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query checkClusterConstraints(&cxt); /* - * Check reserve column + * Check reserve column if the table is column store */ - checkReserveColumn(&cxt); + if (RelationIsColStore(rel)) { + checkReserveColumn(&cxt); + } if ((stmt->relkind == OBJECT_FOREIGN_TABLE || stmt->relkind == OBJECT_STREAM) && cxt.alist != NIL) { Oid relationId; @@ -5283,6 +5307,48 @@ static void transformConstraintAttrs(CreateStmtContext* cxt, List* constraintLis } } +/** + * tableof type is not supported be a column in a table + * @param ctype ctype + */ +static void CheckColumnTableOfType(Type ctype) +{ + Form_pg_type typTup = (Form_pg_type)GETSTRUCT(ctype); + if (typTup->typtype == TYPTYPE_TABLEOF) { + ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmodule(MOD_PLSQL), + errmsg("type \"%s\" is not supported as column type", NameStr(typTup->typname)), + errdetail("\"%s\" is a nest table type", NameStr(typTup->typname)), + errcause("feature not supported"), erraction("check type name"))); + } else if (typTup->typtype == TYPTYPE_COMPOSITE) { + TupleDesc tupleDesc = lookup_rowtype_tupdesc_noerror(HeapTupleGetOid(ctype), typTup->typtypmod, true); + if (tupleDesc == NULL) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("type %u cannot get tupledesc", HeapTupleGetOid(ctype)))); + } + for (int i = 0; i < tupleDesc->natts; i++) { + if (tupleDesc->attrs[i].attisdropped || strcmp(NameStr(tupleDesc->attrs[i].attname), "pljson_list_data") == 0) { + continue; + } + HeapTuple typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(tupleDesc->attrs[i].atttypid)); + if (!HeapTupleIsValid(typeTuple)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for type %u", tupleDesc->attrs[i].atttypid))); + } + CheckColumnTableOfType(typeTuple); + ReleaseSysCache(typeTuple); + } + ReleaseTupleDesc(tupleDesc); + } else if (OidIsValid(typTup->typelem) && typTup->typtype == TYPTYPE_BASE) { + HeapTuple typTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typTup->typelem)); + if (!HeapTupleIsValid(typTuple)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for type %u", typTup->typelem))); + } + CheckColumnTableOfType(typTuple); + ReleaseSysCache(typTuple); + } +} + /* * Special handling of type definition for a column */ @@ -5324,6 +5390,7 @@ static void transformColumnType(CreateStmtContext* cxt, ColumnDef* column) erraction("check type name"))); } #endif + CheckColumnTableOfType(ctype); ReleaseSysCache(ctype); } @@ -5677,11 +5744,11 @@ void checkPartitionSynax(CreateStmt* stmt) } /* check partition key number for none value-partition table */ - if (!value_partition && stmt->partTableState->partitionKey->length > PARTITION_PARTKEYMAXNUM) { + if (!value_partition && stmt->partTableState->partitionKey->length > MAX_PARTKEY_NUMS) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("too many partition keys for partitioned table"), - errhint("Partittion key columns can not be more than %d", PARTITION_PARTKEYMAXNUM))); + errhint("Partittion key columns can not be more than %d", MAX_PARTKEY_NUMS))); } /* check PARTITIONS clause */ @@ -6056,6 +6123,11 @@ static char* CreatestmtGetOrientation(CreateStmt *stmt) foreach (lc, stmt->options) { DefElem* def = (DefElem*)lfirst(lc); if (pg_strcasecmp(def->defname, "orientation") == 0) { +#ifdef ENABLE_FINANCE_MODE + if (defGetString(def) == ORIENTATION_COLUMN) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ORIENTATION==COLUMN is not supported on finance mode"))); +#endif return defGetString(def); } } @@ -6723,8 +6795,8 @@ static void get_rel_partition_info(Relation partTableRel, List** pos, Const** up return; /* nothing to do */ partMap = (RangePartitionMap*)partTableRel->partMap; - partitionKey = partMap->partitionKey; - partKeyNum = partMap->partitionKey->dim1; + partitionKey = partMap->base.partitionKey; + partKeyNum = partMap->base.partitionKey->dim1; /* get position of the partition key */ if (pos != NULL) { @@ -6823,7 +6895,7 @@ static Oid get_split_partition_oid(Relation partTableRel, SplitPartitionState* s } else { Assert(PointerIsValid(splitState->partition_for_values)); splitState->partition_for_values = transformConstIntoTargetType( - partTableRel->rd_att->attrs, partMap->partitionKey, splitState->partition_for_values); + partTableRel->rd_att->attrs, partMap->base.partitionKey, splitState->partition_for_values); srcPartOid = PartitionValuesGetPartitionOid( partTableRel, splitState->partition_for_values, AccessExclusiveLock, true, false, false); } diff --git a/contrib/whale/tablecmds.cpp b/contrib/whale/tablecmds.cpp index 091582f32..6e67f727d 100644 --- a/contrib/whale/tablecmds.cpp +++ b/contrib/whale/tablecmds.cpp @@ -1115,6 +1115,14 @@ static bool isOrientationSet(List* options, bool* isCUFormat, bool isDfsTbl) errdetail("Valid string are \"column\", \"row\"."))); } #endif /* ENABLE_MULTIPLE_NODES */ +#ifdef ENABLE_FINANCE_MODE + if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) == 0) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPTION), + errmsg("Invalid string for \"ORIENTATION\" option"), + errdetail("ORIENTATION=COLUNMN is incorrect, not work on finance mode."))); + } +#endif } if (pg_strcasecmp(defGetString(def), ORIENTATION_COLUMN) == 0 && isCUFormat != NULL) { *isCUFormat = true; @@ -1519,11 +1527,11 @@ static void validateDfsTableDef(CreateStmt* stmt, bool isDfsTbl) ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR), errmsg("Num of partition keys in value-partitioned table should not be zeror"))); - } else if (list_length(stmt->partTableState->partitionKey) > VALUE_PARTKEYMAXNUM) { + } else if (list_length(stmt->partTableState->partitionKey) > MAX_VALUE_PARTKEY_NUMS) { ereport(ERROR, (errcode(ERRCODE_PARTITION_ERROR), errmsg("Num of partition keys in value-partitioned table exceeds max allowed num:%d", - RANGE_PARTKEYMAXNUM))); + MAX_RANGE_PARTKEY_NUMS))); } /* Partition stragegy check */ @@ -2857,8 +2865,12 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object if (!IsInitdb && (relkind == RELKIND_RELATION) && !IsSystemNamespace(namespaceId) && !IsCStoreNamespace(namespaceId) && (pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) && (stmt->relation->relpersistence == RELPERSISTENCE_PERMANENT) && !u_sess->attr.attr_storage.enable_recyclebin) { - if (u_sess->attr.attr_storage.enable_segment || bucketinfo != NULL) { + bool isSegmentType = (storage_type == SEGMENT_PAGE); + if (!isSegmentType && (u_sess->attr.attr_storage.enable_segment || bucketinfo != NULL)) { storage_type = SEGMENT_PAGE; + DefElem *storage_def = makeDefElem("segment", (Node *)makeString("on")); + stmt->options = lappend(stmt->options, storage_def); + reloptions = transformRelOptions((Datum)0, stmt->options, NULL, validnsps, true, false); } } else if (storage_type == SEGMENT_PAGE) { if (u_sess->attr.attr_storage.enable_recyclebin) { @@ -2901,6 +2913,12 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object } } + if (!IsInitdb && u_sess->attr.attr_storage.enable_segment && storage_type == SEGMENT_PAGE && + !CheckSegmentStorageOption(stmt->options)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Only support segment storage type while parameter enable_segment is ON."))); + } + /* * Create the relation. Inherited defaults and constraints are passed in * for immediate handling --- since they don't need parsing, they can be @@ -5879,7 +5897,7 @@ static AttrNumber renameatt_internal(Oid myrelid, const char* oldattname, const check_for_column_name_collision(targetrelation, newattname); /* new name should not conflict with system columns */ - if (CHCHK_PSORT_RESERVE_COLUMN(newattname)) { + if (RelationIsColStore(targetrelation) && CHCHK_PSORT_RESERVE_COLUMN(newattname)) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column name \"%s\" conflicts with a system column name", newattname))); @@ -6969,6 +6987,7 @@ ObjectAddress renamePartition(RenameStmt* stmt) ParseState* pstate = NULL; RangePartitionDefState* rangePartDef = NULL; Relation rel = NULL; + int2vector *partKeyArray = NULL; ObjectAddress address; /* shouldn't happen */ @@ -7051,9 +7070,9 @@ ObjectAddress renamePartition(RenameStmt* stmt) rangePartDef->boundary = stmt->object; transformPartitionValue(pstate, (Node*)rangePartDef, false); - + partKeyArray = PartitionMapGetPartKeyArray(rel->partMap); rangePartDef->boundary = transformConstIntoTargetType( - rel->rd_att->attrs, ((RangePartitionMap*)rel->partMap)->partitionKey, rangePartDef->boundary); + rel->rd_att->attrs, partKeyArray, rangePartDef->boundary); partitionOid = PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, true, false); @@ -8783,7 +8802,7 @@ static void sqlcmd_alter_prep_convert_charset(AlteredTableInfo* tab, Relation re Form_pg_attribute attTup = (Form_pg_attribute)GETSTRUCT(tuple); int attnum = attTup->attnum; if (attnum <= 0 || attTup->attisdropped || !type_is_collatable(attTup->atttypid) || - get_charset_by_collation(attTup->attcollation) == cc->charset) + attTup->attcollation == targetcollid) continue; transform = (Node*)makeVar(1, attnum, attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0); @@ -8863,6 +8882,14 @@ static void sqlcmd_alter_exec_convert_charset(AlteredTableInfo* tab, Relation re heap_close(attrelation, RowExclusiveLock); } +static bool sqlcmd_partition_index_ddl_cmd(AlterTableType cmd) +{ + /* AT_UnusableAllIndexOnSubPartition is not supported */ + return ((cmd) == AT_UnusableIndexPartition || (cmd) == AT_UnusableAllIndexOnPartition || + (cmd) == AT_UnusableIndex || (cmd) == AT_AddIndex || (cmd) == AT_ReAddIndex || + (cmd) == AT_AddIndexConstraint); +} + static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef) { List *columnOptions = columnDef->columnOptions; @@ -8887,11 +8914,18 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT elog(ES_LOGLEVEL, "[ATExecCmd] cmd subtype: %d", cmd->subtype); if (PARTITION_DDL_CMD(cmd->subtype) && RELATION_IS_PARTITIONED(rel)) { + /* Register invalidation of the relation's relcache entry. */ + CacheInvalidateRelcache(rel); int partitionno = -GetCurrentPartitionNo(RelOidGetPartitionTupleid(rel->rd_id)); if (!PARTITIONNO_IS_VALID(partitionno)) { RelationResetPartitionno(rel->rd_id, ShareUpdateExclusiveLock); } } + + if (sqlcmd_partition_index_ddl_cmd(cmd->subtype) && RelationIsIndex(rel)) { + Oid rel_id = IndexGetRelation(rel->rd_id, false); + CacheInvalidateRelcacheByRelid(rel_id); + } switch (cmd->subtype) { case AT_AddColumn: /* ADD COLUMN */ @@ -9695,12 +9729,12 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat newslot = MakeSingleTupleTableSlot(newTupDesc, false, oldrel->rd_tam_ops); /* Preallocate values/isnull arrays */ - i = Max(newTupDesc->natts, oldTupDesc->natts); - values = (Datum*)palloc(i * sizeof(Datum)); - isnull = (bool*)palloc(i * sizeof(bool)); - rc = memset_s(values, i * sizeof(Datum), 0, i * sizeof(Datum)); + int n = Max(newTupDesc->natts, oldTupDesc->natts); + values = (Datum*)palloc(n * sizeof(Datum)); + isnull = (bool*)palloc(n * sizeof(bool)); + rc = memset_s(values, n * sizeof(Datum), 0, n * sizeof(Datum)); securec_check(rc, "\0", "\0"); - rc = memset_s(isnull, i * sizeof(bool), true, i * sizeof(bool)); + rc = memset_s(isnull, n * sizeof(bool), true, n * sizeof(bool)); securec_check(rc, "\0", "\0"); /* @@ -9914,6 +9948,12 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat } CHECK_FOR_INTERRUPTS(); + if (tab->is_first_after) { + rc = memset_s(values, n * sizeof(Datum), 0, n * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(isnull, n * sizeof(bool), true, n * sizeof(bool)); + securec_check(rc, "\0", "\0"); + } } } else { ((HeapScanDesc) scan)->rs_tupdesc = oldTupDesc; @@ -10063,6 +10103,12 @@ static void ATRewriteTableInternal(AlteredTableInfo* tab, Relation oldrel, Relat ResetExprContext(econtext); CHECK_FOR_INTERRUPTS(); + if (tab->is_first_after) { + rc = memset_s(values, n * sizeof(Datum), 0, n * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(isnull, n * sizeof(bool), true, n * sizeof(bool)); + securec_check(rc, "\0", "\0"); + } } } @@ -16366,7 +16412,7 @@ static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, * data type of a partitioned table's partition key can not be changed */ if (RELATION_IS_PARTITIONED(rel) && is_partition_column(rel, attnum)) { - int2vector* partKey = ((RangePartitionMap*)rel->partMap)->partitionKey; + int2vector* partKey = PartitionMapGetPartKeyArray(rel->partMap); int i = 0; for (; i < partKey->dim1; i++) { @@ -18744,7 +18790,7 @@ static void ATExecSetTableSpaceForPartitionP2(AlteredTableInfo* tab, Relation re rangePartDef = (RangePartitionDefState*)partition; transformPartitionValue(make_parsestate(NULL), (Node*)rangePartDef, false); rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, + ((RangePartitionMap*)rel->partMap)->base.partitionKey, rangePartDef->boundary); partOid = PartitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessExclusiveLock, true, false, false); @@ -21631,6 +21677,14 @@ void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, Object Assert(objsMoved != NULL); + if (enable_plpgsql_gsdependency_guc() && + gsplsql_is_object_depend(rel->rd_rel->reltype, GSDEPEND_OBJECT_TYPE_TYPE)) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + errmsg("The set schema operator of %s is not allowed, " + "because it is referenced by another object.", NameStr(rel->rd_rel->relname)))); + } + /* OK, modify the pg_class row and pg_depend entry */ classRel = heap_open(RelationRelationId, RowExclusiveLock); @@ -23684,14 +23738,14 @@ static Oid FindPartOidByListBoundary(Relation rel, ListPartitionMap *partMap, No Oid res; if (IsA(boundKey, RowExpr)) { /* Multi-keys partition boundary values */ partKeyValueList = transformConstIntoTargetType( - rel->rd_att->attrs, partMap->partitionKey, ((RowExpr*)boundKey)->args); + rel->rd_att->attrs, partMap->base.partitionKey, ((RowExpr*)boundKey)->args); res = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); list_free_ext(partKeyValueList); return res; } Const* con = (Const*)boundKey; - FormData_pg_attribute attr = rel->rd_att->attrs[partMap->partitionKey->values[0] - 1]; + FormData_pg_attribute attr = rel->rd_att->attrs[partMap->base.partitionKey->values[0] - 1]; if (con->ismaxvalue) { /* @@ -23742,8 +23796,8 @@ static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partD errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.", partDef->partitionInitName ? partDef->partitionInitName : partDef->partitionName))); } - partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, - partMap->partitionKey, partDef->boundary, partkeyIsFunc); + partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, partMap->base.partitionKey, + partDef->boundary, partkeyIsFunc); pfree_ext(curBound); existingPartOid = PartitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); list_free_ext(partKeyValueList); @@ -24502,17 +24556,17 @@ static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *comma case PART_TYPE_RANGE: case PART_TYPE_INTERVAL: rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, + rel->partMap->partitionKey, rangePartDef->boundary); break; case PART_TYPE_LIST: rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((ListPartitionMap*)rel->partMap)->partitionKey, + rel->partMap->partitionKey, rangePartDef->boundary); break; case PART_TYPE_HASH: rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((HashPartitionMap*)rel->partMap)->partitionKey, + rel->partMap->partitionKey, rangePartDef->boundary); break; default: @@ -24557,13 +24611,13 @@ static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid, int2vector *partitionKey = NULL; switch (rel->partMap->type) { case PART_TYPE_RANGE: - partitionKey = ((RangePartitionMap*)rel->partMap)->partitionKey; + partitionKey = rel->partMap->partitionKey; break; case PART_TYPE_LIST: - partitionKey = ((ListPartitionMap*)rel->partMap)->partitionKey; + partitionKey = rel->partMap->partitionKey; break; case PART_TYPE_HASH: - partitionKey = ((HashPartitionMap*)rel->partMap)->partitionKey; + partitionKey = rel->partMap->partitionKey; break; default: ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Unknown partitioned type"), @@ -24745,6 +24799,8 @@ static void ATExecUnusableIndexPartition(Relation rel, const char* partition_nam AccessExclusiveLock); // lock on heap partition // call the internal function ATExecSetIndexUsableState(PartitionRelationId, indexPartOid, false); + /* Invoke cache invalidation to refresh index relation data */ + CacheInvalidateRelcache(rel); } static void ATUnusableGlobalIndex(Relation rel) @@ -24857,6 +24913,8 @@ static void ATExecUnusableAllIndexOnPartition(Relation rel, const char* partitio // close index and it's partition partitionClose(parentIndex, indexPart, NoLock); index_close(parentIndex, NoLock); + /* Invoke cache invalidation to refresh index relation data */ + CacheInvalidateRelcacheByRelid(parentIndId); } freePartList(partIndexlist); @@ -25045,15 +25103,12 @@ List* GetPartitionBoundary(Relation partTableRel, Node *PartDef) switch (nodeTag(PartDef)) { case T_RangePartitionDefState: boundary = ((RangePartitionDefState *)PartDef)->boundary; - partitionKey = ((RangePartitionMap *)partTableRel->partMap)->partitionKey; break; case T_ListPartitionDefState: boundary = ((ListPartitionDefState *)PartDef)->boundary; - partitionKey = ((ListPartitionMap *)partTableRel->partMap)->partitionKey; break; case T_HashPartitionDefState: boundary = ((HashPartitionDefState *)PartDef)->boundary; - partitionKey = ((HashPartitionMap *)partTableRel->partMap)->partitionKey; break; default: ereport(ERROR, @@ -25066,6 +25121,7 @@ List* GetPartitionBoundary(Relation partTableRel, Node *PartDef) erraction("Check the table type."))); break; } + partitionKey = PartitionMapGetPartKeyArray(partTableRel->partMap); boundary = transformConstIntoTargetType(partTableRel->rd_att->attrs, partitionKey, boundary); return boundary; } @@ -27501,7 +27557,7 @@ static void checkValidationForExchangeTable(Relation partTableRel, Relation ordT int2 bucketId = InvalidBktId; // get right partition oid for the tuple - targetPartOid = heapTupleGetPartitionId(partTableRel, (HeapTuple)tuple, NULL, true); + targetPartOid = heapTupleGetPartitionOid(partTableRel, (HeapTuple)tuple, NULL, true); searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, INVALID_PARTITION_NO, partRel, part, RowExclusiveLock); @@ -27666,7 +27722,7 @@ template static void checkValidationForExchangeCStore(Relation partTableRel, Relation ordTableRel, int partSeq) { RangePartitionMap* partMap = (RangePartitionMap*)(partTableRel->partMap); - int2vector* partkeyColumns = partMap->partitionKey; + int2vector* partkeyColumns = partMap->base.partitionKey; int partkeyColumnNum = partkeyColumns->dim1; AttrNumber* scanAttrNumbers = NULL; @@ -27680,8 +27736,8 @@ static void checkValidationForExchangeCStore(Relation partTableRel, Relation ord bool* nulls = NULL; FormData_pg_attribute* attrs = ordTableRel->rd_att->attrs; - Const consts[RANGE_PARTKEYMAXNUM]; - Const* partKeyValues[RANGE_PARTKEYMAXNUM]; + Const consts[MAX_RANGE_PARTKEY_NUMS]; + Const* partKeyValues[MAX_RANGE_PARTKEY_NUMS]; bool isInPart = false; const int tididx = 1; // junk column for cstore delete @@ -27994,7 +28050,7 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) splitPart = (SplitPartitionState*)cmd->def; destPartDefList = splitPart->dest_partition_define_list; partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; + partKeyNum = partMap->base.partitionKey->dim1; partTableOid = RelationGetRelid(partTableRel); // get src partition oid @@ -28010,7 +28066,7 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) NoLock); } else { splitPart->partition_for_values = transformConstIntoTargetType( - partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->partition_for_values); + partTableRel->rd_att->attrs, partMap->base.partitionKey, splitPart->partition_for_values); srcPartOid = PartitionValuesGetPartitionOid( partTableRel, splitPart->partition_for_values, AccessExclusiveLock, true, true, false); } @@ -28673,14 +28729,14 @@ static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation par RangePartitionMap* partMap = NULL; ParseState* pstate = NULL; ListCell* cell = NULL; - Const* partKeyValueArr[RANGE_PARTKEYMAXNUM] = {NULL}; + Const* partKeyValueArr[MAX_RANGE_PARTKEY_NUMS] = {NULL}; int i = 0; int partKeyNum = 0; int compareSrcPart = 0; // get partition key number partMap = (RangePartitionMap*)partTableRel->partMap; - partKeyNum = partMap->partitionKey->dim1; + partKeyNum = partMap->base.partitionKey->dim1; // check split point length if (partKeyNum != list_length(splitPart->split_point)) { @@ -28702,7 +28758,7 @@ static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation par List *tmp = splitPart->split_point; splitPart->split_point = - transformConstIntoTargetType(partTableRel->rd_att->attrs, partMap->partitionKey, splitPart->split_point); + transformConstIntoTargetType(partTableRel->rd_att->attrs, partMap->base.partitionKey, splitPart->split_point); list_free_ext(tmp); foreach (cell, splitPart->split_point) { @@ -28765,11 +28821,11 @@ static List* getDestPartBoundaryList(Relation partTableRel, List* destPartDefLis RangePartitionDefState* rangePartDef = (RangePartitionDefState*)lfirst(cell); List* partKeyValueList = NIL; ListCell* otherCell = NULL; - Const** partKeyValueArr = (Const**)palloc0(sizeof(Const*) * RANGE_PARTKEYMAXNUM); + Const** partKeyValueArr = (Const**)palloc0(sizeof(Const*) * MAX_RANGE_PARTKEY_NUMS); int i = 0; partKeyValueList = transformConstIntoTargetType(partTableRel->rd_att->attrs, - ((RangePartitionMap*)partTableRel->partMap)->partitionKey, + ((RangePartitionMap*)partTableRel->partMap)->base.partitionKey, rangePartDef->boundary, partkeyIsFunc); foreach (otherCell, partKeyValueList) { @@ -29521,11 +29577,11 @@ static void readTuplesAndInsertInternal(Relation tempTableRel, Relation partTabl /* tableam_tops_copy_tuple is not ready so we add UStore hack path */ copyTuple = tableam_tops_copy_tuple(tuple); - targetPartOid = heapTupleGetPartitionId(partTableRel, (void *)tuple, &partitionno, true); + targetPartOid = heapTupleGetPartitionOid(partTableRel, (void *)tuple, &partitionno, true); searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partTableRel, targetPartOid, partitionno, partRel, part, RowExclusiveLock); if (RelationIsSubPartitioned(partTableRel)) { - targetSubPartOid = heapTupleGetPartitionId(partRel, (void *)tuple, &subpartitionno, true); + targetSubPartOid = heapTupleGetPartitionOid(partRel, (void *)tuple, &subpartitionno, true); searchFakeReationForPartitionOid(partRelHTAB, CurrentMemoryContext, partRel, targetSubPartOid, subpartitionno, subPartRel, subPart, RowExclusiveLock); partRel = subPartRel; @@ -29684,7 +29740,8 @@ void addToastTableForNewPartition(Relation relation, Oid newPartId, bool isForSu firstPartitionId = ((RangePartitionMap*)relation->partMap)->rangeElements[0].partitionOid; firstPartition = partitionOpen(relation, firstPartitionId, NoLock); firstPartitionToastId = firstPartition->pd_part->reltoastrelid; - + + if (OidIsValid(firstPartitionToastId)) { reltuple = SearchSysCache1(RELOID, ObjectIdGetDatum(firstPartitionToastId)); if (!PointerIsValid(reltuple)) { @@ -31234,7 +31291,7 @@ bool is_partition_column(Relation rel, AttrNumber att_no) } } } else if (RelationIsCommonPartitioned(rel)) { - int2vector* part_key = ((RangePartitionMap*)rel->partMap)->partitionKey; + int2vector* part_key = PartitionMapGetPartKeyArray(rel->partMap); for (int i = 0; i < part_key->dim1; i++) { if (att_no == part_key->values[i]) { is_part_col = true; @@ -31242,7 +31299,7 @@ bool is_partition_column(Relation rel, AttrNumber att_no) } } } else if (RelationIsSubPartitioned(rel)) { - int2vector *partKey = ((RangePartitionMap *)rel->partMap)->partitionKey; + int2vector* partKey = PartitionMapGetPartKeyArray(rel->partMap); for (int i = 0; i < partKey->dim1; i++) { if (att_no == partKey->values[i]) { return true; @@ -31252,7 +31309,7 @@ bool is_partition_column(Relation rel, AttrNumber att_no) Oid partOid = linitial_oid(partOidList); Partition part = partitionOpen(rel, partOid, NoLock); Relation partRel = partitionGetRelation(rel, part); - int2vector *subPartKey = ((RangePartitionMap *)partRel->partMap)->partitionKey; + int2vector* subPartKey = PartitionMapGetPartKeyArray(partRel->partMap); for (int i = 0; i < subPartKey->dim1; i++) { if (att_no == subPartKey->values[i]) { is_part_col = true; -- Gitee From f565fd6d6082a8724b07e835ad6cf448ba4047b4 Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Thu, 14 Dec 2023 21:26:11 +0800 Subject: [PATCH 128/434] =?UTF-8?q?=E4=BC=98=E5=8C=96binary=E5=92=8Cvarbin?= =?UTF-8?q?ary=E7=B1=BB=E5=9E=8B=E7=9A=84=E8=BE=93=E5=87=BA=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/charset_gbk_b_db.out | 412 ++++++------- .../dolphin/expected/charset_utf8mb4_b_db.out | 582 +++++++++--------- contrib/dolphin/expected/conv_cast_test.out | 4 +- contrib/dolphin/expected/db_b_parser2.out | 12 +- .../ignore_not_null_constraints.out | 14 +- contrib/dolphin/expected/kwlist.out | 6 +- .../json_cmp_operator_test.out | 24 +- .../string_func_test/db_b_compress_test.out | 6 +- contrib/dolphin/expected/test_binary.out | 186 +++--- contrib/dolphin/expected/test_blob.out | 12 +- contrib/dolphin/expected/test_condition.out | 96 +-- contrib/dolphin/include/plugin_postgres.h | 2 + .../dolphin/input/binary_export_test.source | 27 + .../dolphin/output/binary_export_test.source | 43 ++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_postgres.cpp | 11 + contrib/dolphin/plugin_utility.cpp | 16 + contrib/dolphin/plugin_utils/adt/varlena.cpp | 72 ++- .../rollback_script/dolphin--3.0--2.0.sql | 4 + .../upgrade_script/dolphin--2.0--3.0.sql | 3 + 20 files changed, 856 insertions(+), 678 deletions(-) create mode 100644 contrib/dolphin/input/binary_export_test.source create mode 100644 contrib/dolphin/output/binary_export_test.source diff --git a/contrib/dolphin/expected/charset_gbk_b_db.out b/contrib/dolphin/expected/charset_gbk_b_db.out index de3a73464..d8bfe9ad0 100644 --- a/contrib/dolphin/expected/charset_gbk_b_db.out +++ b/contrib/dolphin/expected/charset_gbk_b_db.out @@ -110,9 +110,9 @@ LINE 1: SELECT CAST('高斯' AS binary) COLLATE "utf8mb4_unicode_ci"... ^ CONTEXT: referenced column: binary SELECT CAST('高斯' AS binary) COLLATE "binary"; - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST('E9AB98E696AF' AS blob) COLLATE "utf8mb4_unicode_ci"; -- ERROR @@ -146,33 +146,33 @@ LINE 1: SELECT '高斯' COLLATE "binary"; ^ -- 中文 const charset SELECT CAST('高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_binary'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + ¸ß˹ (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT _binary'高斯'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4'高斯'; @@ -663,9 +663,9 @@ LINE 1: ... ^ -- -- -- diff charset & implicit collation SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci + result | pg_collation_for +------------------+------------------ + 高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); @@ -687,9 +687,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); (1 row) SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci + result | pg_collation_for +------------------+------------------ + 高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR @@ -701,21 +701,21 @@ ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chin LINE 1: ...30'高斯DB' , '高斯DB') result, pg_collation_for(result); ^ SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci + result | pg_collation_for +------------------+------------------ + 高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - \xe9ab98e696af4442高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------+------------------ + 高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - \xe9ab98e696af4442高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------+------------------ + 高斯DB高斯DB | gbk_chinese_ci (1 row) -- -- -- explicit & implicit @@ -786,9 +786,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_unicode_ci + result | pg_collation_for +--------------+-------------------- + ¸ß˹DB¸ß˹DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); @@ -810,9 +810,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_co (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_bin + result | pg_collation_for +--------------+------------------ + ¸ß˹DB¸ß˹DB | utf8mb4_bin (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); @@ -864,28 +864,28 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_co (1 row) SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442¸ß˹DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------+-------------------- + ¸ß˹DB¸ß˹DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - \xe9ab98e696af4442高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------+------------------ + 高斯DB高斯DB | gbk_chinese_ci (1 row) -- -- -- concat 3 args SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +--------------------------+------------------ + 高斯DB高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +--------------------------+------------------ + 高斯DB高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); @@ -901,15 +901,15 @@ SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci + result | pg_collation_for +--------------------------+------------------ + 高斯DB高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------------+-------------------- - ¸ß˹DB高斯DB\xe9ab98e696af4442 | gb18030_chinese_ci + result | pg_collation_for +------------------------+-------------------- + ¸ß˹DB高斯DB高斯DB | gb18030_chinese_ci (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR @@ -927,9 +927,9 @@ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _ut (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci + result | pg_collation_for +--------------------------+------------------ + 高斯DB高斯DB高斯DB | gbk_chinese_ci (1 row) -- -- -- const compare CONCAT @@ -1316,9 +1316,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+-------------------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+---------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB (1 row) SELECT char_length(futf8_bin), @@ -1512,9 +1512,9 @@ LINE 1: SELECT fbin COLLATE gbk_chinese_ci FROM t_diff_charset_colum... ^ CONTEXT: referenced column: fbin SELECT fbin COLLATE 'binary' FROM t_diff_charset_columns; - fbin --------------------- - \xe9ab98e696af4442 + fbin +---------- + 高斯DB (1 row) SELECT fbin COLLATE "zh_CN.utf8" FROM t_diff_charset_columns; -- ERROR @@ -2812,39 +2812,39 @@ SELECT @id_res,@a_res; SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; -- ERROR ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -2855,13 +2855,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (5 rows) DELETE FROM t_charset_utf8mb4 WHERE a='高斯'; @@ -2974,37 +2974,37 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+----------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; @@ -3022,13 +3022,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (5 rows) DELETE FROM t_charset_gbk WHERE a='高斯'; @@ -3069,33 +3069,33 @@ SHOW collation_connection; -- 中文 SELECT CAST('高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 楂樻柉 (1 row) SELECT CAST(_binary'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 楂樻柉 (1 row) SELECT _binary'高斯'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4'高斯'; @@ -3111,9 +3111,9 @@ SELECT _gbk'高斯'; (1 row) SELECT _binary X'E9AB98E696AF'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4 X'E9AB98E696AF'; @@ -3241,43 +3241,43 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+---------------------- - 2 | 楂樻柉 | \xe6a582e6a8bbe69f89 + id | a | b +----+--------+-------- + 2 | 楂樻柉 | 楂樻柉 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+------+------ + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 3 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 3 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------------------- - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 5 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 4 | 楂樻柉DB | 高斯DB + 5 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 5 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 4 | 楂樻柉DB | 高斯DB + 5 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -3287,14 +3287,14 @@ UPDATE t_charset_utf8mb4 SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 5 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 5 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (6 rows) DELETE FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯'; @@ -3408,51 +3408,51 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+------+------ + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+----------+-------------------- - 3 | 楂樻柉DB | \xe9ab98e696af4442 - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 3 | 楂樻柉DB | 高斯DB + 4 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------------------- - 3 | 楂樻柉DB | \xe9ab98e696af4442 - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 3 | 楂樻柉DB | 高斯DB + 4 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 3 | 楂樻柉DB | \xe9ab98e696af4442 - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 3 | 楂樻柉DB | 高斯DB + 4 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+---------------------- - 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 + id | a | b +----+--------+-------- + 5 | 楂樻柉 | 楂樻柉 (1 row) SELECT * FROM t_charset_gbk WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+---------------------- - 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 + id | a | b +----+--------+-------- + 5 | 楂樻柉 | 楂樻柉 (1 row) UPDATE t_charset_gbk SET a='高斯DB', b='高斯DB' @@ -3462,14 +3462,14 @@ UPDATE t_charset_gbk SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 5 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 5 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (6 rows) DELETE FROM t_charset_gbk WHERE a=_utf8mb4'高斯'; @@ -3504,9 +3504,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+-------------------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+-------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB (1 row) SELECT char_length(futf8_bin), diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index 47851e472..8ff2c2c3b 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -62,13 +62,13 @@ SELECT _gbk'ABCD' = _GbK'ABCD'; SELECT _binary E'', _binary E'' IS NULL; ?column? | ?column? ----------+---------- - \x | f + | f (1 row) SELECT _binary E'\\xe9ab98e696af'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4 E'\\xe9ab98e696af'; @@ -90,13 +90,13 @@ LINE 1: SELECT _utf8mb4 X'\\xe9ab98e696af'; SELECT _binary X'', _binary X'' IS NULL; ?column? | ?column? ----------+---------- - \x | f + | f (1 row) SELECT _binary X'E9AB98E696AF'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4 X'E9AB98E696AF'; @@ -114,19 +114,19 @@ SELECT _gbk X'E9AB98E696AF'; SELECT _binary B'', _binary B'' IS NULL; ?column? | ?column? ----------+---------- - \x | f + | f (1 row) SELECT _binary B'0'; ?column? ---------- - \x00 + \0 (1 row) SELECT _binary B'111010011010101110011000111001101001011010101111'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4 B'111010011010101110011000111001101001011010101111'; @@ -247,9 +247,9 @@ ERROR: COLLATION "gbk_chinese_ci" is not valid for binary type LINE 1: SELECT _binary'高斯' COLLATE gbk_chinese_ci; ^ SELECT _binary'高斯' COLLATE 'binary'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _binary'高斯' COLLATE "zh_CN.utf8"; -- ERROR @@ -337,9 +337,9 @@ LINE 1: SELECT CAST('高斯' AS binary) COLLATE "utf8mb4_unicode_ci"; ^ CONTEXT: referenced column: binary SELECT CAST('高斯' AS binary) COLLATE "binary"; - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST('E9AB98E696AF' AS blob) COLLATE "utf8mb4_unicode_ci"; -- ERROR @@ -434,33 +434,33 @@ WARNING: invalid input syntax for type bigint: "b" DROP TABLE t_collate_expr; -- 中文 const charset SELECT CAST('高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_binary'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 楂樻柉 (1 row) SELECT _binary'高斯'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4'高斯'; @@ -1051,9 +1051,9 @@ LINE 1: ...æ–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); ^ -- -- -- diff charset & implicit collation SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); @@ -1075,9 +1075,9 @@ SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); (1 row) SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR @@ -1091,21 +1091,21 @@ SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); (1 row) SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) -- -- -- explicit & implicit @@ -1176,9 +1176,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); @@ -1200,9 +1200,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_co (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | utf8mb4_bin + result | pg_collation_for +--------------+------------------ + 高斯DB高斯DB | utf8mb4_bin (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); @@ -1254,28 +1254,28 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_co (1 row) SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) -- -- -- concat 3 args SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------------+-------------------- - \xe9ab98e696af4442高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------+-------------------- + 高斯DB高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - \xe9ab98e696af4442楂樻柉DB楂樻柉DB | gbk_chinese_ci + result | pg_collation_for +--------------------------+------------------ + 楂樻柉DB楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); @@ -1291,15 +1291,15 @@ SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------------+-------------------- - 高斯DB高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +--------------------+-------------------- + 高斯DB高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------------+-------------------- - 高斯DB楂樻柉DB\xe9ab98e696af4442 | gb18030_chinese_ci + result | pg_collation_for +------------------------+-------------------- + 高斯DB楂樻柉DB楂樻柉DB | gb18030_chinese_ci (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR @@ -1317,9 +1317,9 @@ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _ut (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - 楂樻柉DB楂樻柉DB\xe9ab98e696af4442 | gbk_chinese_ci + result | pg_collation_for +--------------------------+------------------ + 楂樻柉DB楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) -- -- -- const compare CONCAT @@ -1706,9 +1706,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+-------------------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+-------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB (1 row) SELECT char_length(futf8_bin), @@ -1902,9 +1902,9 @@ LINE 1: SELECT fbin COLLATE gbk_chinese_ci FROM t_diff_charset_colum... ^ CONTEXT: referenced column: fbin SELECT fbin COLLATE 'binary' FROM t_diff_charset_columns; - fbin --------------------- - \xe9ab98e696af4442 + fbin +-------- + 高斯DB (1 row) SELECT fbin COLLATE "zh_CN.utf8" FROM t_diff_charset_columns; -- ERROR @@ -2194,9 +2194,9 @@ LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- with binary & implicit collation SELECT CONCAT(futf8_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2206,9 +2206,9 @@ SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_cha (1 row) SELECT CONCAT(futf8_uni, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------------+-------------------- - 高斯db\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯db高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2218,9 +2218,9 @@ SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_cha (1 row) SELECT CONCAT(fgbk_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ------------------------+-------------------- - ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +-----------+-------------------- + ˹DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2230,9 +2230,9 @@ SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_char (1 row) SELECT CONCAT(fgb18030_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ------------------------+-------------------- - ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci + result | pg_collation_for +-----------+-------------------- + ˹DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2242,9 +2242,9 @@ SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_ (1 row) SELECT CONCAT(fbin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------------------+-------------------- - \xe9ab98e696af4442E9AB98E696AF | utf8mb4_general_ci + result | pg_collation_for +--------------------+-------------------- + 高斯DBE9AB98E696AF | utf8mb4_general_ci (1 row) -- -- concat column and @uservar @@ -3161,9 +3161,9 @@ ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_c... ^ SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯db | utf8mb4_general_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯db | utf8mb4_general_ci (1 row) -- -- test explicit collate on blob result @@ -3732,9 +3732,9 @@ ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "bi... ^ SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci + result | pg_collation_for +--------------+-------------------- + 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR @@ -3878,43 +3878,43 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+---------------------- - 2 | 楂樻柉 | \xe6a582e6a8bbe69f89 + id | a | b +----+--------+-------- + 2 | 楂樻柉 | 楂樻柉 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+------+------ + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 3 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 3 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------------------- - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 5 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 4 | 楂樻柉DB | 高斯DB + 5 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 5 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 4 | 楂樻柉DB | 高斯DB + 5 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -3924,14 +3924,14 @@ UPDATE t_charset_utf8mb4 SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 5 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 5 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (6 rows) DELETE FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯'; @@ -3945,27 +3945,27 @@ SELECT * FROM t_charset_utf8mb4 ORDER BY id; INSERT INTO t_charset_utf8mb4(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_utf8mb4 SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------------------- - 7 | 高斯DB | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 7 | 高斯DB | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) INSERT INTO t_charset_utf8mb4(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 7 | 高斯db | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) UPDATE t_charset_utf8mb4 SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | DB高斯 | \x4442e9ab98e696af + id | a | b +----+--------+-------- + 7 | 高斯db | 高斯DB + 8 | DB高斯 | DB高斯 (2 rows) DROP TABLE IF EXISTS t_charset_utf8mb4; @@ -4072,51 +4072,51 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+------+------ + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+----------+-------------------- - 3 | 楂樻柉DB | \xe9ab98e696af4442 - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 3 | 楂樻柉DB | 高斯DB + 4 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------------------- - 3 | 楂樻柉DB | \xe9ab98e696af4442 - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 3 | 楂樻柉DB | 高斯DB + 4 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 3 | 楂樻柉DB | \xe9ab98e696af4442 - 4 | 楂樻柉DB | \xe9ab98e696af4442 - 6 | 楂樻柉DB | \xe9ab98e696af4442 + id | a | b +----+----------+-------- + 3 | 楂樻柉DB | 高斯DB + 4 | 楂樻柉DB | 高斯DB + 6 | 楂樻柉DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+---------------------- - 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 + id | a | b +----+--------+-------- + 5 | 楂樻柉 | 楂樻柉 (1 row) SELECT * FROM t_charset_gbk WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+---------------------- - 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 + id | a | b +----+--------+-------- + 5 | 楂樻柉 | 楂樻柉 (1 row) UPDATE t_charset_gbk SET a='高斯DB', b='高斯DB' @@ -4126,14 +4126,14 @@ UPDATE t_charset_gbk SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 5 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 5 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (6 rows) DELETE FROM t_charset_gbk WHERE a=_utf8mb4'高斯'; @@ -4148,27 +4148,27 @@ SELECT * FROM t_charset_gbk ORDER BY id; INSERT INTO t_charset_gbk(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_gbk SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------------------- - 7 | 高斯DB | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 7 | 高斯DB | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) INSERT INTO t_charset_gbk(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+--------+-------- + 7 | 高斯db | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) UPDATE t_charset_gbk SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | DB高斯 | \x4442e9ab98e696af + id | a | b +----+--------+-------- + 7 | 高斯db | 高斯DB + 8 | DB高斯 | DB高斯 (2 rows) DROP TABLE IF EXISTS t_charset_gbk; @@ -4630,33 +4630,33 @@ SHOW collation_connection; -- 中文 SELECT CAST('高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + ¸ß˹ (1 row) SELECT CAST(_binary'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + ¸ß˹ (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary ----------------- - \xe9ab98e696af + binary +-------- + 高斯 (1 row) SELECT _binary'高斯'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4'高斯'; @@ -4672,9 +4672,9 @@ SELECT _gbk'高斯'; (1 row) SELECT _binary X'E9AB98E696AF'; - ?column? ----------------- - \xe9ab98e696af + ?column? +---------- + 高斯 (1 row) SELECT _utf8mb4 X'E9AB98E696AF'; @@ -4802,39 +4802,39 @@ SELECT @id_res,@a_res; SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; -- ERROR ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -4845,13 +4845,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (5 rows) DELETE FROM t_charset_utf8mb4 WHERE a='高斯'; @@ -4865,27 +4865,27 @@ SELECT * FROM t_charset_utf8mb4 ORDER BY id; INSERT INTO t_charset_utf8mb4(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_utf8mb4 SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+-------------------- - 7 | 高斯DB | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 7 | 高斯DB | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) INSERT INTO t_charset_utf8mb4(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 7 | 高斯db | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) UPDATE t_charset_utf8mb4 SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | DB高斯 | \x4442e9ab98e696af + id | a | b +----+----------+---------- + 7 | 高斯db | 高斯DB + 8 | DB高斯 | DB高斯 (2 rows) DROP TABLE IF EXISTS t_charset_utf8mb4; @@ -4991,37 +4991,37 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+---------------- - 1 | 高斯 | \xe9ab98e696af + id | a | b +----+--------+-------- + 1 | 高斯 | 高斯 (1 row) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+----------+-------------------- - 2 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 2 | 高斯DB | 高斯DB (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------------------- - 3 | ¸ß˹DB | \xe9ab98e696af4442 - 4 | ¸ß˹DB | \xe9ab98e696af4442 - 6 | ¸ß˹DB | \xe9ab98e696af4442 + id | a | b +----+--------+---------- + 3 | ¸ß˹DB | 高斯DB + 4 | ¸ß˹DB | 高斯DB + 6 | ¸ß˹DB | 高斯DB (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; @@ -5039,13 +5039,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+-------------------- - 1 | 高斯 | \xe9ab98e696af - 2 | 高斯DB | \xe9ab98e696af4442 - 3 | 高斯DB | \xe9ab98e696af4442 - 4 | 高斯DB | \xe9ab98e696af4442 - 6 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 1 | 高斯 | 高斯 + 2 | 高斯DB | 高斯DB + 3 | 高斯DB | 高斯DB + 4 | 高斯DB | 高斯DB + 6 | 高斯DB | 高斯DB (5 rows) DELETE FROM t_charset_gbk WHERE a='高斯'; @@ -5059,27 +5059,27 @@ SELECT * FROM t_charset_gbk ORDER BY id; INSERT INTO t_charset_gbk(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_gbk SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+-------------------- - 7 | 高斯DB | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 7 | 高斯DB | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) INSERT INTO t_charset_gbk(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | 高斯DB | \xe9ab98e696af4442 + id | a | b +----+----------+---------- + 7 | 高斯db | 高斯DB + 8 | 高斯DB | 高斯DB (2 rows) UPDATE t_charset_gbk SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+-------------------- - 7 | 高斯db | \xe9ab98e696af4442 - 8 | DB高斯 | \x4442e9ab98e696af + id | a | b +----+----------+---------- + 7 | 高斯db | 高斯DB + 8 | DB高斯 | DB高斯 (2 rows) DROP TABLE IF EXISTS t_charset_gbk; @@ -5106,9 +5106,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+-------------------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+---------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB (1 row) SELECT char_length(futf8_bin), diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index 90b62f7a6..c8e7c8754 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -929,7 +929,7 @@ set dolphin.sql_mode = treat_bxconst_as_binary; select b'11100000111000'; ?column? ---------- - \x3838 + 88 (1 row) select conv(b'11100000111000', 10, 8); @@ -953,7 +953,7 @@ select conv(b'11100000111000'::int8, 20, 8); select x'4c'; ?column? ---------- - \x4c + L (1 row) select conv(x'4c', 10, 8); diff --git a/contrib/dolphin/expected/db_b_parser2.out b/contrib/dolphin/expected/db_b_parser2.out index de0c19c29..eacc2e9a3 100644 --- a/contrib/dolphin/expected/db_b_parser2.out +++ b/contrib/dolphin/expected/db_b_parser2.out @@ -746,12 +746,12 @@ insert into t_binary values(b'1',b'111',b'111'); insert into t_binary values(b'1',b'11111',b'111'); insert into t_binary values(b'1',b'111111111',b'111'); select *,length(a),length(b),length(c) from t_binary order by 1,2,3; - a | b | c | length | length | length -------+--------+------------------------+--------+--------+-------- - \x01 | \x01ff | \x07000000000000000000 | 1 | 2 | 10 - \x01 | \x07 | \x07000000000000000000 | 1 | 1 | 10 - \x01 | \x1f | \x07000000000000000000 | 1 | 1 | 10 - | | | | | + a | b | c | length | length | length +------+------+------------------------+--------+--------+-------- + \x01 | \x01 | \x07\0\0\0\0\0\0\0\0\0 | 1 | 2 | 10 + \x01 | \x07 | \x07\0\0\0\0\0\0\0\0\0 | 1 | 1 | 10 + \x01 | \x1F | \x07\0\0\0\0\0\0\0\0\0 | 1 | 1 | 10 + | | | | | (4 rows) drop table t_binary; diff --git a/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out b/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out index 88656a7a8..d69423193 100644 --- a/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out +++ b/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out @@ -966,9 +966,9 @@ insert ignore into t_binaryn values (null); WARNING: null value in column "c" violates not-null constraint DETAIL: Failing row contains (null). select *,hex(c) from t_binaryn; - c | hex -----------------+-------------- - \x000000000000 | 000000000000 + c | hex +--------------+-------------- + \0\0\0\0\0\0 | 000000000000 (1 row) insert into t_binaryn values(b'01'); @@ -978,10 +978,10 @@ DETAIL: Failing row contains (null). WARNING: null value in column "c" violates not-null constraint DETAIL: Failing row contains (null). select *,hex(c) from t_binaryn; - c | hex -----------------+-------------- - \x000000000000 | 000000000000 - \x000000000000 | 000000000000 + c | hex +--------------+-------------- + \0\0\0\0\0\0 | 000000000000 + \0\0\0\0\0\0 | 000000000000 (2 rows) -- mixture diff --git a/contrib/dolphin/expected/kwlist.out b/contrib/dolphin/expected/kwlist.out index 7f9ec1402..f87b545a5 100644 --- a/contrib/dolphin/expected/kwlist.out +++ b/contrib/dolphin/expected/kwlist.out @@ -270,9 +270,9 @@ LINE 1: select case when end then binary else (binary) end from end; ^ CONTEXT: referenced column: binary select case when end then binary end else binary end end from end; - end ------- - \x31 + end +----- + 1 (1 row) /* only */ diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out index e3289233e..6e0827320 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out @@ -539,9 +539,9 @@ select `binary`, `json`, `binary` < `json` as `binary `json` as `binary<>json`, `binary` = `json` as `binary=json`, `binary` <=> `json` as `binary<=>json` from test_json_table; - binary | json | binary>json | binary>=json | binaryjson | binary=json | binary<=>json -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- - \x312e3233610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f + binary | json | binary>json | binary>=json | binaryjson | binary=json | binary<=>json +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- + 1.23a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f (1 row) select `binary`, `json`, @@ -549,9 +549,9 @@ select `binary`, `json`, `json` < `binary` as `json `binary` as `json<>binary`, `json` = `binary` as `json=binary`, `json` <=> `binary` as `json<=>binary` from test_json_table; - binary | json | json>binary | json>=binary | jsonbinary | json=binary | json<=>binary -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- - \x312e3233610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + binary | json | json>binary | json>=binary | jsonbinary | json=binary | json<=>binary +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- + 1.23a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `varbinary`, `json`, @@ -559,9 +559,9 @@ select `varbinary`, `json`, `varbinary` < `json` as `varbinary `json` as `varbinary<>json`, `varbinary` = `json` as `varbinary=json`, `varbinary` <=> `json` as `varbinary<=>json` from test_json_table; - varbinary | json | varbinary>json | varbinary>=json | varbinaryjson | varbinary=json | varbinary<=>json ---------------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ - \x312e323361 | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f + varbinary | json | varbinary>json | varbinary>=json | varbinaryjson | varbinary=json | varbinary<=>json +-----------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ + 1.23a | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f (1 row) select `varbinary`, `json`, @@ -569,9 +569,9 @@ select `varbinary`, `json`, `json` < `varbinary` as `json `varbinary` as `json<>varbinary`, `json` = `varbinary` as `json=varbinary`, `json` <=> `varbinary` as `json<=>varbinary` from test_json_table; - varbinary | json | json>varbinary | json>=varbinary | jsonvarbinary | json=varbinary | json<=>varbinary ---------------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ - \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + varbinary | json | json>varbinary | json>=varbinary | jsonvarbinary | json=varbinary | json<=>varbinary +-----------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ + 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `tinyblob`, `json`, diff --git a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out index 4dbf3f114..94b906521 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out @@ -12,9 +12,9 @@ select length(compress('string for test compress function aaaaaaaaaaaaaaaaaaaaaa create table t1 (a binary(255), b text(255), c char(255)); insert into t1 (a,b,c) values (compress('string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb'),'string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb', 'string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb'); SELECT a FROM t1; - a ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - \x79000000789c2b2e29cacc4b5748cb2f5228492d2e5148cecf2d284a2d2e56482bcd4b2ec9cccf5348a43a504802020001942dcb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + a +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + y\0\0\0x+.)KWH/R(I-.QH-(J-.VH+K.SH:PH\x02\x02\0\x01-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 (1 row) SELECT HEX(COMPRESS(b)) FROM t1; diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index 71b415f52..3a35acfcd 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -24,10 +24,10 @@ insert into binary_templates values ('aaaaaa', 'aaa', 'aaaaaa'); ERROR: The input length:6 exceeds the maximum length:5. CONTEXT: referenced column: c select * from binary_templates; - a | b | c -----------+--------------+-------------- - \x616161 | \x6161610000 | \x616161 - \x616161 | \x6161610000 | \x6161616161 + a | b | c +----------+---------+------- + \x616161 | aaa\0\0 | aaa + \x616161 | aaa\0\0 | aaaaa (2 rows) create table test_bytea (a bytea); @@ -110,36 +110,36 @@ EXECUTE insert_varbinary(1, 'aaaaaaaaaaa'::bytea); -- length 11 ERROR: The input length:11 exceeds the maximum length:10. CONTEXT: referenced column: field_name select * from t_binary_061; - id | field_name -----+------------------------ - 1 | \x61616100000000000000 + id | field_name +----+------------------- + 1 | aaa\0\0\0\0\0\0\0 (1 row) select * from t_varbinary_061; id | field_name ----+------------ - 1 | \x616161 + 1 | aaa (1 row) drop table if exists t_binary_061; drop table if exists t_varbinary_061; -- binary test enhance select binary '\t'; - binary --------- - \x09 + binary +---------- + (1 row) select binary '\\'; binary -------- - \x5c + \ (1 row) select binary '\a'; binary -------- - \x61 + a (1 row) select binary '\b'; @@ -151,85 +151,86 @@ select binary '\b'; select binary '\n'; binary -------- - \x0a + + + (1 row) select binary '\r'; binary -------- - \x0d + \r (1 row) select binary '\v'; binary -------- - \x76 + v (1 row) select binary '\f'; binary -------- - \x66 + f (1 row) select binary '\"'; binary -------- - \x22 + " (1 row) select binary '\%'; binary -------- - \x5c25 + \% (1 row) select binary '\_'; binary -------- - \x5c5f + \_ (1 row) select binary '\0'; binary -------- - \x00 + \0 (1 row) select binary '\z'; binary -------- - \x7a + z (1 row) select binary '\pqy'; - binary ----------- - \x707179 + binary +-------- + pqy (1 row) select binary 'æ•°æ®åº“'; - binary ----------------------- - \xe695b0e68daee5ba93 + binary +-------- + æ•°æ®åº“ (1 row) select binary E'\t'; - binary --------- - \x09 + binary +---------- + (1 row) select binary E'\\'; binary -------- - \x5c + \ (1 row) select binary E'\a'; binary -------- - \x61 + a (1 row) select binary E'\b'; @@ -241,43 +242,44 @@ select binary E'\b'; select binary E'\n'; binary -------- - \x0a + + + (1 row) select binary E'\r'; binary -------- - \x0d + \r (1 row) select binary E'\v'; binary -------- - \x76 + v (1 row) select binary E'\f'; binary -------- - \x0c + \x0C (1 row) select binary E'\"'; binary -------- - \x22 + " (1 row) select binary E'\%'; binary -------- - \x25 + % (1 row) select binary E'\_'; binary -------- - \x5f + _ (1 row) select binary E'\0'; @@ -285,69 +287,69 @@ ERROR: invalid byte sequence for encoding "UTF8": 0x00 select binary E'\z'; binary -------- - \x7a + z (1 row) select binary E'\pqy'; - binary ----------- - \x707179 + binary +-------- + pqy (1 row) select binary E'æ•°æ®åº“'; - binary ----------------------- - \xe695b0e68daee5ba93 + binary +-------- + æ•°æ®åº“ (1 row) -- binary type cast test select 'abc'::binary; - binary ----------- - \x616263 + binary +-------- + abc (1 row) select 'abcdefgh'::binary; - binary --------------------- - \x6162636465666768 + binary +---------- + abcdefgh (1 row) select 'abc'::binary(20); - binary --------------------------------------------- - \x6162630000000000000000000000000000000000 + binary +--------------------------------------- + abc\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 (1 row) select 'a啊填啊'::binary; - binary ------------------------- - \x61e5958ae5a1abe5958a + binary +--------- + a啊填啊 (1 row) -- other type select 123::binary; - binary ----------- - \x313233 + binary +-------- + 123 (1 row) select 123.456::binary; - binary ------------------- - \x3132332e343536 + binary +--------- + 123.456 (1 row) select '2020-01-01'::date::binary; - binary ------------------------- - \x323032302d30312d3031 + binary +------------ + 2020-01-01 (1 row) select '12:13:13'::time::binary; - binary --------------------- - \x31323a31333a3133 + binary +---------- + 12:13:13 (1 row) --errreport @@ -422,9 +424,9 @@ select 'abc%' like binary 'abc|_' escape '|'; -- test binary expr gram; select binary sin(1); - sin --------------------------------------- - \x302e383431343730393834383037383937 + sin +------------------- + 0.841470984807897 (1 row) drop table if exists t1; @@ -432,15 +434,15 @@ NOTICE: table "t1" does not exist, skipping create table t1(a int, b text); insert into t1 values(1,'test'); select binary a from t1; - a ------- - \x31 + a +--- + 1 (1 row) select binary b from t1; - b ------------- - \x74657374 + b +------ + test (1 row) select binary a = binary '3' from t1; @@ -457,22 +459,22 @@ insert into t1 values('ad','name2'); ERROR: The input length:2 exceeds the maximum length:1. CONTEXT: referenced column: a select * from t1; - a | b -------+------- - \x61 | name1 + a | b +---+------- + a | name1 (1 row) -- enhase origin request SELECT BINARY 'Geeksforgeeks'; - binary ------------------------------- - \x4765656b73666f726765656b73 + binary +--------------- + Geeksforgeeks (1 row) select binary repeat('a', 3); - repeat ----------- - \x616161 + repeat +-------- + aaa (1 row) create table test33 (c binary(3)); @@ -525,9 +527,9 @@ select cast('' as binary(0)); (1 row) select cast('ab' as binary(12)); - binary ----------------------------- - \x616200000000000000000000 + binary +------------------------ + ab\0\0\0\0\0\0\0\0\0\0 (1 row) create table test34 (a binary(0)); diff --git a/contrib/dolphin/expected/test_blob.out b/contrib/dolphin/expected/test_blob.out index 0344c0b04..a7067971b 100644 --- a/contrib/dolphin/expected/test_blob.out +++ b/contrib/dolphin/expected/test_blob.out @@ -1238,16 +1238,16 @@ c5 binary(5), c23 varbinary(50))default charset=utf8; insert t_dezebium_0007_02 values('1010101','1010101','1010101','1010101','ab','ab'); select * from t_dezebium_0007_02; - c1 | c2 | c3 | c4 | c5 | c23 ----------+---------+---------+---------+----------------+----- - 1010101 | 1010101 | 1010101 | 1010101 | ab\000\000\000 | ab + c1 | c2 | c3 | c4 | c5 | c23 +---------+---------+---------+---------+----------+----- + 1010101 | 1010101 | 1010101 | 1010101 | ab\0\0\0 | ab (1 row) set bytea_output=escape; select * from t_dezebium_0007_02; - c1 | c2 | c3 | c4 | c5 | c23 ----------+---------+---------+---------+----------------+----- - 1010101 | 1010101 | 1010101 | 1010101 | ab\000\000\000 | ab + c1 | c2 | c3 | c4 | c5 | c23 +---------+---------+---------+---------+----------+----- + 1010101 | 1010101 | 1010101 | 1010101 | ab\0\0\0 | ab (1 row) update t_dezebium_0007_02 set c2='101',c5='FG'; diff --git a/contrib/dolphin/expected/test_condition.out b/contrib/dolphin/expected/test_condition.out index 19c092272..784f2bcae 100644 --- a/contrib/dolphin/expected/test_condition.out +++ b/contrib/dolphin/expected/test_condition.out @@ -6313,73 +6313,73 @@ select ifnull(blb, txt) from typeset; select ifnull(bin, smint) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, anint) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, bgint) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, dcmal) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, nmric) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, flt) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, bt) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, dt) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, tmstp) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, tm) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, ch) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, vch) from typeset; ifnull -------- - \x31 + 1 (1 row) select ifnull(bin, blb) from typeset; @@ -6395,75 +6395,75 @@ select ifnull(bin, txt) from typeset; (1 row) select ifnull(vbin, smint) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, anint) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, bgint) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, dcmal) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, nmric) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, flt) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, bt) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, dt) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, tmstp) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, tm) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, ch) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, vch) from typeset; - ifnull ------------------------------------------- - \x323030312d30342d31392032323a32333a3434 + ifnull +--------------------- + 2001-04-19 22:23:44 (1 row) select ifnull(vbin, blb) from typeset; diff --git a/contrib/dolphin/include/plugin_postgres.h b/contrib/dolphin/include/plugin_postgres.h index 786459258..528bddfef 100644 --- a/contrib/dolphin/include/plugin_postgres.h +++ b/contrib/dolphin/include/plugin_postgres.h @@ -167,6 +167,8 @@ typedef struct BSqlPluginContext { bool group_by_error; bool is_create_alter_stmt; Alias *upSertAliasName; + bool isDoCopy; + char* performance_schema; #endif } bSqlPluginContext; diff --git a/contrib/dolphin/input/binary_export_test.source b/contrib/dolphin/input/binary_export_test.source new file mode 100644 index 000000000..b23b13a51 --- /dev/null +++ b/contrib/dolphin/input/binary_export_test.source @@ -0,0 +1,27 @@ +CREATE DATABASE b_source dbcompatibility 'B'; +CREATE DATABASE b_target dbcompatibility 'B'; +\c b_target +\c b_source +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; +create table t1 (a binary(255), b varbinary(255)); +insert into t1 values +(0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE, +0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE); +select * from t1; + +-- test about copy +copy t1 to '@abs_builddir@/data/binary.sql'; +create table t2 (a binary(255), b varbinary(255)); +copy t2 from '@abs_builddir@/data/binary.sql'; +select * from t2; + +-- test about dump +\! @abs_bindir@/gs_dump b_source -p @portstring@ -f @abs_bindir@/binary_dump.tar -F t >/dev/null 2>&1; echo $? +\! @abs_bindir@/gs_restore -d b_target -p @portstring@ @abs_bindir@/binary_dump.tar >/dev/null 2>&1; echo $? + +\c b_target +select * from t1; + +\c contrib_regression +drop database b_source; +drop database b_target; \ No newline at end of file diff --git a/contrib/dolphin/output/binary_export_test.source b/contrib/dolphin/output/binary_export_test.source new file mode 100644 index 000000000..804d13a8d --- /dev/null +++ b/contrib/dolphin/output/binary_export_test.source @@ -0,0 +1,43 @@ +CREATE DATABASE b_source dbcompatibility 'B'; +CREATE DATABASE b_target dbcompatibility 'B'; +\c b_target +\c b_source +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; +create table t1 (a binary(255), b varbinary(255)); +insert into t1 values +(0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE, +0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE); +select * from t1; + a | b +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \0\x01\x02\x03\x04\x05\x06\x07\x08 +| \0\x01\x02\x03\x04\x05\x06\x07\x08 + + \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F | \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F +(1 row) + +-- test about copy +copy t1 to '@abs_builddir@/data/binary.sql'; +create table t2 (a binary(255), b varbinary(255)); +copy t2 from '@abs_builddir@/data/binary.sql'; +select * from t2; + a | b +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \0\x01\x02\x03\x04\x05\x06\x07\x08 +| \0\x01\x02\x03\x04\x05\x06\x07\x08 + + \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F | \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F +(1 row) + +-- test about dump +\! @abs_bindir@/gs_dump b_source -p @portstring@ -f @abs_bindir@/binary_dump.tar -F t >/dev/null 2>&1; echo $? +0 +\! @abs_bindir@/gs_restore -d b_target -p @portstring@ @abs_bindir@/binary_dump.tar >/dev/null 2>&1; echo $? +0 +\c b_target +select * from t1; + a | b +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \0\x01\x02\x03\x04\x05\x06\x07\x08 +| \0\x01\x02\x03\x04\x05\x06\x07\x08 + + \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F | \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F +(1 row) + +\c contrib_regression +drop database b_source; +drop database b_target; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index fd70da736..e99dc5e76 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -137,7 +137,7 @@ test: like_default_test conv_cast_test read_only_guc_test b_database_guc union_a test: string_func_test/db_b_to_base64_test string_func_test/db_b_unhex_test bit_count test_current_user -test: test_schema connection_id test_system_user test_bit_xor union +test: test_schema connection_id test_system_user test_bit_xor union binary_export_test test: builtin_funcs/cast any_value_test default_function get_b_database union2 diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index 88b71770e..c36275182 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -943,6 +943,7 @@ void init_session_vars(void) cxt->is_ast_stmt = false; cxt->group_by_error = false; cxt->is_create_alter_stmt = false; + cxt->isDoCopy = false; DefineCustomBoolVariable("dolphin.b_compatibility_mode", "Enable mysql behavior override opengauss's when collision happens.", @@ -1252,6 +1253,16 @@ void init_session_vars(void) NULL, NULL, NULL); + DefineCustomStringVariable("performance_schema", + gettext_noop("CUSTOM_OPTIONS"), + NULL, + &GetSessionContext()->performance_schema, + "", + PGC_USERSET, + GUC_LIST_INPUT | GUC_REPORT, + NULL, + NULL, + NULL); #endif } diff --git a/contrib/dolphin/plugin_utility.cpp b/contrib/dolphin/plugin_utility.cpp index f9b717e81..a43c184e6 100644 --- a/contrib/dolphin/plugin_utility.cpp +++ b/contrib/dolphin/plugin_utility.cpp @@ -18,6 +18,7 @@ #include "postgres.h" #include "knl/knl_variable.h" +#include "plugin_postgres.h" #include "plugin_nodes/parsenodes_common.h" #include "plugin_commands/defrem.h" #include "nodes/print.h" @@ -3245,7 +3246,22 @@ void standard_ProcessUtility(processutility_context* processutility_cxt, uint64 processed; uint64 histhash; bool has_histhash; +#ifdef DOLPHIN + GetSessionContext()->isDoCopy = true; + PG_TRY(); + { + DoCopy((CopyStmt*)parse_tree, query_string, &processed); + } + PG_CATCH(); + { + GetSessionContext()->isDoCopy = false; + PG_RE_THROW(); + } + PG_END_TRY(); + GetSessionContext()->isDoCopy = false; +#else DoCopy((CopyStmt*)parse_tree, query_string, &processed); +#endif has_histhash = ((CopyStmt*)parse_tree)->hashstate.has_histhash; histhash = ((CopyStmt*)parse_tree)->hashstate.histhash; if (completion_tag != NULL) { diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index fa128c9f8..c4c8b4a3e 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -90,6 +90,7 @@ #define MYSQL_SUPPORT_MINUS_MAX_LENGTH 65 #define MAX_UINT32_STR "0xffffffff" #define MAXBI64LEN 25 +#define BINARY_LEN(len) ((len - 2) / 2) static long convert_bit_to_int (PG_FUNCTION_ARGS, int idx); static TimestampTz temporal_to_timestamptz(Oid type, int index, PG_FUNCTION_ARGS); @@ -402,6 +403,9 @@ extern "C" DLL_PUBLIC Datum Varlena2Text(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(Varlena2Bit); extern "C" DLL_PUBLIC Datum Varlena2Bit(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_binaryout); +extern "C" DLL_PUBLIC Datum dolphin_binaryout(PG_FUNCTION_ARGS); + #endif /***************************************************************************** @@ -698,7 +702,7 @@ Datum dolphin_binaryin(PG_FUNCTION_ARGS) bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */ result = (bytea*)palloc(bc); } else { - if (len > (size_t)(atttypmod - VARHDRSZ)) { + if (BINARY_LEN(len) > (size_t)(atttypmod - VARHDRSZ)) { ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("value too long for type binary(%d)", atttypmod - VARHDRSZ))); } @@ -10943,4 +10947,70 @@ Datum binary_length(PG_FUNCTION_ARGS) bytea* vlena = PG_GETARG_BYTEA_PP(0); PG_RETURN_INT32(VARSIZE_ANY_EXHDR(vlena)); } + +static Datum normal_dolphin_binaryout(PG_FUNCTION_ARGS) +{ + bytea* vlena = PG_GETARG_BYTEA_PP(0); + char* result = NULL; + char* rp = NULL; + char* vp = NULL; + int len; + int i; + + len = 1; + vp = VARDATA_ANY(vlena); + for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++) { + if (*vp == '\0') { + len += 2; + } else { + len++; + } + } + rp = result = (char*)palloc(len); + vp = VARDATA_ANY(vlena); + for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++) { + if (*vp == '\0') { + *rp++ = '\\'; + *rp++ = '0'; + } else { + *rp++ = *vp; + } + } + *rp = '\0'; + + /* free memory if allocated by the toaster */ + PG_FREE_IF_COPY(vlena, 0); + + PG_RETURN_CSTRING(result); +} + +static Datum hex_dolphin_binaryout(PG_FUNCTION_ARGS) +{ + bytea* vlena = PG_GETARG_BYTEA_PP(0); + char* result = NULL; + char* rp = NULL; + + /* Print hex format */ + rp = result = (char*)palloc(VARSIZE_ANY_EXHDR(vlena) * 2 + 2 + 1); + *rp++ = '\\'; + *rp++ = 'x'; + rp += hex_encode(VARDATA_ANY(vlena), VARSIZE_ANY_EXHDR(vlena), rp); + *rp = '\0'; + + /* free memory if allocated by the toaster */ + PG_FREE_IF_COPY(vlena, 0); + + PG_RETURN_CSTRING(result); +} + +Datum dolphin_binaryout(PG_FUNCTION_ARGS) +{ + if (strcmp(u_sess->attr.attr_common.application_name, "gs_dump") == 0 || + strcmp(u_sess->attr.attr_common.application_name, "gs_dumpall") == 0 || + GetSessionContext()->isDoCopy) { + return hex_dolphin_binaryout(fcinfo); + } else { + return normal_dolphin_binaryout(fcinfo); + } +} #endif diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index acc4b67ad..40bbc5032 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -258,3 +258,7 @@ DROP FUNCTION IF EXISTS pg_catalog.hex(tinyblob); DROP FUNCTION IF EXISTS pg_catalog.hex(blob); DROP FUNCTION IF EXISTS pg_catalog.hex(mediumblob); DROP FUNCTION IF EXISTS pg_catalog.hex(longblob); + +CREATE OR REPLACE FUNCTION pg_catalog.binary_out (binary) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; + +CREATE OR REPLACE FUNCTION pg_catalog.varbinary_out (varbinary) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 116323c2e..626a70177 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -436,3 +436,6 @@ CREATE OR REPLACE FUNCTION pg_catalog.hex(tinyblob) RETURNS text LANGUAGE C IMMU CREATE OR REPLACE FUNCTION pg_catalog.hex(blob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; CREATE OR REPLACE FUNCTION pg_catalog.hex(mediumblob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; CREATE OR REPLACE FUNCTION pg_catalog.hex(longblob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; + +CREATE OR REPLACE FUNCTION pg_catalog.binary_out (binary) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_binaryout'; +CREATE OR REPLACE FUNCTION pg_catalog.varbinary_out (varbinary) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_binaryout'; \ No newline at end of file -- Gitee From 871e29261eb23771b130ccfd30203879ca337f47 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Fri, 15 Dec 2023 17:02:28 +0800 Subject: [PATCH 129/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E6=97=A0=E6=B3=95=E7=BC=96=E8=AF=91=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/plugin_utils/adt/format_type.cpp | 4 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 110 +++++- .../whale/plugin_utils/adt/format_type.cpp | 10 +- contrib/whale/plugin_utils/adt/ruleutils.cpp | 349 ++++++++++++++---- 4 files changed, 384 insertions(+), 89 deletions(-) diff --git a/contrib/dolphin/plugin_utils/adt/format_type.cpp b/contrib/dolphin/plugin_utils/adt/format_type.cpp index 80436124d..a4ba9a48d 100644 --- a/contrib/dolphin/plugin_utils/adt/format_type.cpp +++ b/contrib/dolphin/plugin_utils/adt/format_type.cpp @@ -30,7 +30,7 @@ static char* format_type_internal( Oid type_oid, int32 typemod, bool typemod_given, bool allow_invalid, bool include_nspname = false); -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout); +char* printTypmod(const char* typname, int32 typmod, Oid typmodout); static char* psnprintf(size_t len, const char* fmt, ...) /* This lets gcc check the format string for consistency. */ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); @@ -360,7 +360,7 @@ static char* format_type_internal( /* * Add typmod decoration to the basic type name */ -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout) +char* printTypmod(const char* typname, int32 typmod, Oid typmodout) { char* res = NULL; diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index f437ed341..4dc571181 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -45,6 +45,7 @@ #include "catalog/pg_partition.h" #include "catalog/pg_partition_fn.h" #include "catalog/pg_proc.h" +#include "catalog/pg_rewrite.h" #include "catalog/pg_synonym.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" @@ -262,9 +263,9 @@ static void decompile_column_index_array(Datum column_index_array, Oid relId, St static char* pg_get_ruledef_worker(Oid ruleoid, int prettyFlags); static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *excludeOps, bool attrsOnly, bool showTblSpc, int prettyFlags, bool dumpSchemaOnly = false, bool showPartitionLocal = true, bool showSubpartitionLocal = true); -static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, +void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal); -static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags); +static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags, bool with_option=false); static text* pg_get_expr_worker(text* expr, Oid relid, const char* relname, int prettyFlags); static int print_function_arguments(StringInfo buf, HeapTuple proctup, bool print_table_args, bool print_defaults); static void print_function_ora_arguments(StringInfo buf, HeapTuple proctup); @@ -333,7 +334,7 @@ static void get_from_clause_coldeflist( List* names, List* types, List* typmods, List* collations, deparse_context* context); static void get_tablesample_def(TableSampleClause* tablesample, deparse_context* context); static void GetTimecapsuleDef(const TimeCapsuleClause* timeCapsule, deparse_context* context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); +void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node* processIndirection(Node* node, deparse_context* context, bool printit); static void printSubscripts(ArrayRef* aref, deparse_context* context); static char* get_relation_name(Oid relid); @@ -342,7 +343,6 @@ static char* generate_function_name( Oid funcid, int nargs, List* argnames, Oid* argtypes, bool was_variadic, bool* use_variadic_p); static char* generate_operator_name(Oid operid, Oid arg1, Oid arg2); static text* string_to_text(char* str); -static char* flatten_reloptions(Oid relid); static Oid SearchSysTable(const char* query); static void replace_cl_types_in_argtypes(Oid func_id, int numargs, Oid* argtypes, bool *is_client_logic); @@ -355,6 +355,8 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, Oid *iPartboundary, SubpartitionInfo *subpartinfo); static void AppendTablespaceInfo(const char *spcname, StringInfo buf, tableInfo tableinfo); +static inline bool IsTableVisible(Oid tableoid); +static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoid, tableInfo tableinfo); /* from pgxcship */ Var* get_var_from_node(Node* node, bool (*func)(Oid) = func_oid_check_reject); @@ -1008,6 +1010,75 @@ void GetPartitionExprKeySrc(StringInfo buf, Datum* datum, char* relname, Oid tab pfree_ext(partkeystr); } +char *pg_get_partkeydef_string(Relation relation) +{ + OverrideSearchPath *tmp_search_path = NULL; + StringInfoData buf; + StringInfoData query; + tableInfo tableinfo; + + Form_pg_class classForm = NULL; + Oid tableoid = RelationGetRelid(relation); + if (IsTempTable(tableoid)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Can not get temporary tables partition defination."))); + } + + initStringInfo(&buf); + initStringInfo(&query); + classForm = relation->rd_rel; + tableinfo.relkind = classForm->relkind; + + if (tableinfo.relkind != RELKIND_RELATION) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Not a ordinary table or foreign table."))); + } + + tableinfo.relpersistence = classForm->relpersistence; + tableinfo.tablespace = classForm->reltablespace; + tableinfo.hasPartialClusterKey = classForm->relhasclusterkey; + tableinfo.hasindex = classForm->relhasindex; + tableinfo.relcmpr = classForm->relcmprs; + tableinfo.relrowmovement = classForm->relrowmovement; + tableinfo.parttype = classForm->parttype; + tableinfo.spcid = classForm->relnamespace; + tableinfo.relname = pstrdup(NameStr(classForm->relname)); + tableinfo.autoinc_attnum = 0; + tableinfo.autoinc_consoid = 0; + tableinfo.autoinc_seqoid = 0; + + tmp_search_path = GetOverrideSearchPath(CurrentMemoryContext); + tmp_search_path->schemas = NIL; + tmp_search_path->addCatalog = true; + tmp_search_path->addTemp = true; + PushOverrideSearchPath(tmp_search_path); + + /* + * Connect to SPI manager + */ + SPI_STACK_LOG("connect", NULL, NULL); + if (SPI_connect() != SPI_OK_CONNECT) + ereport(ERROR, (errcode(ERRCODE_SPI_CONNECTION_FAILURE), + errmsg("SPI_connect failed"))); + + PushActiveSnapshot(GetTransactionSnapshot()); + get_table_partitiondef(&query, &buf, tableoid, tableinfo); + PopActiveSnapshot(); + + /* + * Disconnect from SPI manager + */ + SPI_STACK_LOG("finish", NULL, NULL); + if (SPI_finish() != SPI_OK_FINISH) + ereport(ERROR, (errcode(ERRCODE_SPI_FINISH_FAILURE), + errmsg("SPI_finish failed"))); + + PopOverrideSearchPath(); + pfree_ext(query.data); + pfree_ext(tableinfo.relname); + return buf.data; +} + /* * @Description: get partition table defination * @in query - append query for SPI_execute. @@ -1253,11 +1324,11 @@ static void AppendSubPartitionDetail(StringInfo buf, tableInfo tableinfo, Subpar StringInfo query = makeStringInfo(); appendStringInfo(query, - "SELECT /*+ hashjoin(p t) */ p.relname AS partname, " + "SELECT /*+ hashjoin(p t) */ p.relname AS partName, " "array_to_string(p.boundaries, ',') as partbound, " "array_to_string(p.boundaries, ''',''') as partboundstr, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' AND p.partstrategy = '%c' " "ORDER BY p.boundaries[1]::%s ASC", @@ -1333,7 +1404,7 @@ static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, table appendStringInfo(query, "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", @@ -1416,7 +1487,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "array_to_string(p.boundaries, ''',''') as partboundstr, " "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC", @@ -1453,13 +1524,13 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "pg_catalog.unnest(keys_array)::text AS key_value FROM ( " "SELECT oid, relname, reltablespace, bound_id,key_bounds::cstring[] AS keys_array FROM ( " "SELECT oid, relname, reltablespace, pg_catalog.unnest(boundaries) AS key_bounds, " - "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_partition " + "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c')))) " "GROUP BY oid, relname, reltablespace, bound_id) " "GROUP BY oid, relname, reltablespace " - "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_partition " + "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c' AND boundaries[1] IS NULL) p " - "LEFT JOIN pg_tablespace t ON p.reltablespace = t.oid " + "LEFT JOIN pg_catalog.pg_tablespace t ON p.reltablespace = t.oid " "ORDER BY p.bound_def ASC", tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST); @@ -1523,7 +1594,7 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "p.boundaries[1] AS partboundary, " "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", @@ -3448,7 +3519,7 @@ static void GetIndexdefForIntervalPartTabDumpSchemaOnly(Oid indexrelid, RangePar appendStringInfo(buf, ") "); } -static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, +void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal) { Oid relid = idxrec->indrelid; @@ -3831,7 +3902,12 @@ char* pg_get_constraintdef_string(Oid constraintId) return pg_get_constraintdef_worker(constraintId, true, 0); } -static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags) +char* pg_get_constraintdef_part_string(Oid constraintId) +{ + return pg_get_constraintdef_worker(constraintId, false, 0, true); +} + +static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags, bool with_option) { HeapTuple tup; Form_pg_constraint conForm; @@ -4081,7 +4157,7 @@ static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int indexId = get_constraint_index(constraintId); /* XXX why do we only print these bits if fullCommand? */ - if (fullCommand && OidIsValid(indexId)) { + if ((fullCommand || with_option) && OidIsValid(indexId)) { char* options = flatten_reloptions(indexId); Oid tblspc; @@ -12201,7 +12277,7 @@ static void get_from_clause_coldeflist( * actual_datatype. (If you don't want this behavior, just pass * InvalidOid for actual_datatype.) */ -static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) +void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) { HeapTuple ht_opc; Form_pg_opclass opcrec; @@ -12723,7 +12799,7 @@ static text* string_to_text(char* str) /* * Generate a C string representing a relation's reloptions, or NULL if none. */ -static char* flatten_reloptions(Oid relid) +char* flatten_reloptions(Oid relid) { char* result = NULL; HeapTuple tuple; diff --git a/contrib/whale/plugin_utils/adt/format_type.cpp b/contrib/whale/plugin_utils/adt/format_type.cpp index 1f2a8379f..a4ba9a48d 100644 --- a/contrib/whale/plugin_utils/adt/format_type.cpp +++ b/contrib/whale/plugin_utils/adt/format_type.cpp @@ -30,7 +30,7 @@ static char* format_type_internal( Oid type_oid, int32 typemod, bool typemod_given, bool allow_invalid, bool include_nspname = false); -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout); +char* printTypmod(const char* typname, int32 typmod, Oid typmodout); static char* psnprintf(size_t len, const char* fmt, ...) /* This lets gcc check the format string for consistency. */ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); @@ -94,7 +94,8 @@ char* format_type_be(Oid type_oid) return format_type_internal(type_oid, -1, false, false); } -char* format_type_be_qualified(Oid type_oid) +char * +format_type_be_qualified(Oid type_oid) { return format_type_internal(type_oid, -1, false, false, true); } @@ -359,7 +360,7 @@ static char* format_type_internal( /* * Add typmod decoration to the basic type name */ -static char* printTypmod(const char* typname, int32 typmod, Oid typmodout) +char* printTypmod(const char* typname, int32 typmod, Oid typmodout) { char* res = NULL; @@ -460,7 +461,8 @@ Datum oidvectortypes(PG_FUNCTION_ARGS) left -= slen; } - if ((!strcmp(result, "")) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !RETURN_NS) { + if ((!strcmp(result, "")) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && + !ACCEPT_EMPTY_STR && !RETURN_NS) { pfree_ext(result); PG_RETURN_NULL(); } diff --git a/contrib/whale/plugin_utils/adt/ruleutils.cpp b/contrib/whale/plugin_utils/adt/ruleutils.cpp index 87113f898..26a873a4a 100644 --- a/contrib/whale/plugin_utils/adt/ruleutils.cpp +++ b/contrib/whale/plugin_utils/adt/ruleutils.cpp @@ -43,6 +43,7 @@ #include "catalog/pg_partition.h" #include "catalog/pg_partition_fn.h" #include "catalog/pg_proc.h" +#include "catalog/pg_rewrite.h" #include "catalog/pg_synonym.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" @@ -246,9 +247,9 @@ static void decompile_column_index_array(Datum column_index_array, Oid relId, St static char* pg_get_ruledef_worker(Oid ruleoid, int prettyFlags); static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *excludeOps, bool attrsOnly, bool showTblSpc, int prettyFlags, bool dumpSchemaOnly = false, bool showPartitionLocal = true, bool showSubpartitionLocal = true); -static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, +void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal); -static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags); +static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags, bool with_option=false); static text* pg_get_expr_worker(text* expr, Oid relid, const char* relname, int prettyFlags); static int print_function_arguments(StringInfo buf, HeapTuple proctup, bool print_table_args, bool print_defaults); static void print_function_ora_arguments(StringInfo buf, HeapTuple proctup); @@ -276,6 +277,9 @@ static void get_basic_select_query(Query* query, deparse_context* context, Tuple static void get_target_list(Query* query, List* targetList, deparse_context* context, TupleDesc resultDesc); static void get_setop_query(Node* setOp, Query* query, deparse_context* context, TupleDesc resultDesc); static Node* get_rule_sortgroupclause(Index ref, List* tlist, bool force_colno, deparse_context* context); +#ifdef USE_SPQ +static Node* get_rule_sortgroupclause_spq(Index ref, bool force_colno, deparse_context* context); +#endif static void get_rule_groupingset(GroupingSet* gset, List* targetlist, deparse_context* context); static void get_rule_orderby(List* orderList, List* targetList, bool force_colno, deparse_context* context); static void get_rule_windowclause(Query* query, deparse_context* context); @@ -314,7 +318,7 @@ static void get_from_clause_coldeflist( List* names, List* types, List* typmods, List* collations, deparse_context* context); static void get_tablesample_def(TableSampleClause* tablesample, deparse_context* context); static void GetTimecapsuleDef(const TimeCapsuleClause* timeCapsule, deparse_context* context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); +void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node* processIndirection(Node* node, deparse_context* context, bool printit); static void printSubscripts(ArrayRef* aref, deparse_context* context); static char* get_relation_name(Oid relid); @@ -323,7 +327,6 @@ static char* generate_function_name( Oid funcid, int nargs, List* argnames, Oid* argtypes, bool was_variadic, bool* use_variadic_p); static char* generate_operator_name(Oid operid, Oid arg1, Oid arg2); static text* string_to_text(char* str); -static char* flatten_reloptions(Oid relid); static Oid SearchSysTable(const char* query); static void replace_cl_types_in_argtypes(Oid func_id, int numargs, Oid* argtypes, bool *is_client_logic); @@ -336,6 +339,8 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, Oid *iPartboundary, SubpartitionInfo *subpartinfo); static void AppendTablespaceInfo(const char *spcname, StringInfo buf, tableInfo tableinfo); +static inline bool IsTableVisible(Oid tableoid); +static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoid, tableInfo tableinfo); /* from pgxcship */ Var* get_var_from_node(Node* node, bool (*func)(Oid) = func_oid_check_reject); @@ -985,6 +990,75 @@ void GetPartitionExprKeySrc(StringInfo buf, Datum* datum, char* relname, Oid tab pfree_ext(partkeystr); } +char *pg_get_partkeydef_string(Relation relation) +{ + OverrideSearchPath *tmp_search_path = NULL; + StringInfoData buf; + StringInfoData query; + tableInfo tableinfo; + + Form_pg_class classForm = NULL; + Oid tableoid = RelationGetRelid(relation); + if (IsTempTable(tableoid)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Can not get temporary tables partition defination."))); + } + + initStringInfo(&buf); + initStringInfo(&query); + classForm = relation->rd_rel; + tableinfo.relkind = classForm->relkind; + + if (tableinfo.relkind != RELKIND_RELATION) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Not a ordinary table or foreign table."))); + } + + tableinfo.relpersistence = classForm->relpersistence; + tableinfo.tablespace = classForm->reltablespace; + tableinfo.hasPartialClusterKey = classForm->relhasclusterkey; + tableinfo.hasindex = classForm->relhasindex; + tableinfo.relcmpr = classForm->relcmprs; + tableinfo.relrowmovement = classForm->relrowmovement; + tableinfo.parttype = classForm->parttype; + tableinfo.spcid = classForm->relnamespace; + tableinfo.relname = pstrdup(NameStr(classForm->relname)); + tableinfo.autoinc_attnum = 0; + tableinfo.autoinc_consoid = 0; + tableinfo.autoinc_seqoid = 0; + + tmp_search_path = GetOverrideSearchPath(CurrentMemoryContext); + tmp_search_path->schemas = NIL; + tmp_search_path->addCatalog = true; + tmp_search_path->addTemp = true; + PushOverrideSearchPath(tmp_search_path); + + /* + * Connect to SPI manager + */ + SPI_STACK_LOG("connect", NULL, NULL); + if (SPI_connect() != SPI_OK_CONNECT) + ereport(ERROR, (errcode(ERRCODE_SPI_CONNECTION_FAILURE), + errmsg("SPI_connect failed"))); + + PushActiveSnapshot(GetTransactionSnapshot()); + get_table_partitiondef(&query, &buf, tableoid, tableinfo); + PopActiveSnapshot(); + + /* + * Disconnect from SPI manager + */ + SPI_STACK_LOG("finish", NULL, NULL); + if (SPI_finish() != SPI_OK_FINISH) + ereport(ERROR, (errcode(ERRCODE_SPI_FINISH_FAILURE), + errmsg("SPI_finish failed"))); + + PopOverrideSearchPath(); + pfree_ext(query.data); + pfree_ext(tableinfo.relname); + return buf.data; +} + /* * @Description: get partition table defination * @in query - append query for SPI_execute. @@ -1234,7 +1308,7 @@ static void AppendSubPartitionDetail(StringInfo buf, tableInfo tableinfo, Subpar "array_to_string(p.boundaries, ',') as partbound, " "array_to_string(p.boundaries, ''',''') as partboundstr, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' AND p.partstrategy = '%c' " "ORDER BY p.boundaries[1]::%s ASC", @@ -1310,7 +1384,7 @@ static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, table appendStringInfo(query, "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", @@ -1393,7 +1467,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "array_to_string(p.boundaries, ''',''') as partboundstr, " "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC", @@ -1430,13 +1504,13 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "pg_catalog.unnest(keys_array)::text AS key_value FROM ( " "SELECT oid, relname, reltablespace, bound_id,key_bounds::cstring[] AS keys_array FROM ( " "SELECT oid, relname, reltablespace, pg_catalog.unnest(boundaries) AS key_bounds, " - "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_partition " + "pg_catalog.generate_subscripts(boundaries, 1) AS bound_id FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c')))) " "GROUP BY oid, relname, reltablespace, bound_id) " "GROUP BY oid, relname, reltablespace " - "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_partition " + "UNION ALL SELECT oid, relname, reltablespace, 'DEFAULT' AS bound_def FROM pg_catalog.pg_partition " "WHERE parentid = %u AND parttype = '%c' AND partstrategy = '%c' AND boundaries[1] IS NULL) p " - "LEFT JOIN pg_tablespace t ON p.reltablespace = t.oid " + "LEFT JOIN pg_catalog.pg_tablespace t ON p.reltablespace = t.oid " "ORDER BY p.bound_def ASC", tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST); @@ -1500,7 +1574,7 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl "p.boundaries[1] AS partboundary, " "p.oid AS partoid, " "t.spcname AS reltblspc " - "FROM pg_partition p LEFT JOIN pg_tablespace t " + "FROM pg_catalog.pg_partition p LEFT JOIN pg_catalog.pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " "AND p.partstrategy = '%c' ORDER BY ", @@ -3193,9 +3267,11 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty) } if (tgfbody != NULL) { - char* tgordername = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgordername, tgrel->rd_att, &isnull)); - char* tgorder = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgorder, tgrel->rd_att, &isnull)); - if (tgorder != NULL) + bool isordernull = false; + bool isordernamenull = false; + char* tgordername = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgordername, tgrel->rd_att, &isordernamenull)); + char* tgorder = DatumGetCString(fastgetattr(ht_trig, Anum_pg_trigger_tgorder, tgrel->rd_att, &isordernull)); + if (!isordernull && !isordernamenull) appendStringInfo(&buf, "%s %s ", tgorder, tgordername); appendStringInfo(&buf, "%s;", tgfbody); @@ -3364,7 +3440,7 @@ static void GetIndexdefForIntervalPartTabDumpSchemaOnly(Oid indexrelid, RangePar appendStringInfo(buf, ") "); } -static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, +void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, bool dumpSchemaOnly, bool showPartitionLocal, bool showSubpartitionLocal) { Oid relid = idxrec->indrelid; @@ -3747,7 +3823,12 @@ char* pg_get_constraintdef_string(Oid constraintId) return pg_get_constraintdef_worker(constraintId, true, 0); } -static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags) +char* pg_get_constraintdef_part_string(Oid constraintId) +{ + return pg_get_constraintdef_worker(constraintId, false, 0, true); +} + +static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags, bool with_option) { HeapTuple tup; Form_pg_constraint conForm; @@ -3993,7 +4074,7 @@ static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int indexId = get_constraint_index(constraintId); /* XXX why do we only print these bits if fullCommand? */ - if (fullCommand && OidIsValid(indexId)) { + if ((fullCommand || with_option) && OidIsValid(indexId)) { char* options = flatten_reloptions(indexId); Oid tblspc; @@ -5284,6 +5365,10 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) /* index_tlist is set only if it's an IndexOnlyScan */ if (IsA(ps->plan, IndexOnlyScan)) dpns->index_tlist = ((IndexOnlyScan*)ps->plan)->indextlist; +#ifdef USE_SPQ + else if IsA(ps->plan, SpqIndexOnlyScan) + dpns->index_tlist = ((IndexOnlyScan*)ps->plan)->indextlist; +#endif else if (IsA(ps->plan, ForeignScan)) dpns->index_tlist = ((ForeignScan *)ps->plan)->fdw_scan_tlist; else if (IsA(ps->plan, ExtensiblePlan)) @@ -5797,6 +5882,11 @@ static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, i void deparse_query(Query* query, StringInfo buf, List* parentnamespace, bool finalise_aggs, bool sortgroup_colno, void* parserArg, bool qrw_phase, bool is_fqs) { + if (u_sess->hook_cxt.deparseQueryHook != NULL) { + ((deparse_query_func)(u_sess->hook_cxt.deparseQueryHook))(query, buf, parentnamespace, + finalise_aggs, sortgroup_colno, parserArg, qrw_phase, is_fqs); + return; + } OverrideSearchPath* tmp_search_path = NULL; List* schema_list = NIL; ListCell* schema = NULL; @@ -6685,7 +6775,23 @@ static void get_setop_query(Node* setOp, Query* query, deparse_context* context, if (context->qrw_phase) get_setop_query(subquery->setOperations, subquery, context, resultDesc); else - Assert(false); + get_query_def(subquery, + buf, + context->namespaces, + resultDesc, + context->prettyFlags, + context->wrapColumn, + context->indentLevel +#ifdef PGXC + , + context->finalise_aggs, + context->sortgroup_colno, + context->parser_arg +#endif /* PGXC */ + , + context->qrw_phase, + context->viewdef, + context->is_fqs); } if (need_paren) @@ -6782,6 +6888,59 @@ static Node* get_rule_sortgroupclause(Index ref, List* tlist, bool force_colno, return expr; } +#ifdef USE_SPQ +/* + * Display a sort/group clause. + * + * Also returns the expression tree, so caller need not find it again. + */ + +static Node* get_rule_sortgroupclause_spq(Index ref, bool force_colno, deparse_context* context) +{ + StringInfo buf = context->buf; + TargetEntry* tle = NULL; + Node* expr = NULL; + List* tlist; + + deparse_namespace* dpns_spq = (deparse_namespace*)linitial(context->namespaces); + PlanState* ps = dpns_spq->planstate; + WindowAgg* node = NULL; + node = (WindowAgg*)ps->plan; + tlist = node->plan.lefttree->targetlist; + + if (tlist == NULL){ + return expr; + } + + tle = get_sortgroupref_tle_spq(ref, tlist); + expr = (Node*)tle->expr; + + deparse_namespace* dpns = NULL; + deparse_namespace save_dpns; + + dpns = (deparse_namespace*)list_nth(context->namespaces, ((Var*)expr)->varlevelsup); + push_child_plan(dpns, dpns->outer_planstate, &save_dpns); + + + /* + * Use column-number form if requested by caller. Otherwise, if + * expression is a constant, force it to be dumped with an explicit cast + * as decoration --- this is because a simple integer constant is + * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we + * dump it without any decoration. Otherwise, just dump the expression + * normally. + */ + if (force_colno || context->sortgroup_colno) { + Assert(!tle->resjunk); + appendStringInfo(buf, "%d", tle->resno); + } else if (expr && IsA(expr, Var)) + get_rule_expr(expr, context, true); + + pop_child_plan(dpns, &save_dpns); + + return expr; +} +#endif /* * @Description: Display a GroupingSet. @@ -6841,6 +7000,24 @@ static void get_rule_groupingset(GroupingSet* gset, List* targetlist, deparse_co appendStringInfoString(buf, ")"); } +static void get_rule_separator(Const* con, StringInfo buf) +{ + Oid typoutput; + char* extval = NULL; + bool typIsVarlena = false; + + appendStringInfoString(buf, "\'"); + if (u_sess->exec_cxt.under_auto_explain) { + appendStringInfoString(buf, "***"); + } else if (!con->constisnull) { + getTypeOutputInfo(con->consttype, &typoutput, &typIsVarlena); + extval = OidOutputFunctionCall(typoutput, con->constvalue); + appendStringInfoString(buf, extval); + pfree_ext(extval); + } + appendStringInfoChar(buf, '\''); +} + /* * Display an ORDER BY list. */ @@ -6858,7 +7035,15 @@ static void get_rule_orderby(List* orderList, List* targetList, bool force_colno TypeCacheEntry* typentry = NULL; appendStringInfoString(buf, sep); - sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, force_colno, context); +#ifdef USE_SPQ + if (IS_SPQ_COORDINATOR && (list_length(context->windowClause) > 0) && + lfirst(list_head(context->windowClause)) != NULL && + ((WindowClause *)lfirst(list_head(context->windowClause)))->reOrderSPQ) { + sortexpr = get_rule_sortgroupclause_spq(srt->tleSortGroupRef, force_colno, context); + } else +#endif + sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, force_colno, context); + sortcoltype = exprType(sortexpr); /* See whether operator is default < or > for datatype */ typentry = lookup_type_cache(sortcoltype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); @@ -6940,7 +7125,12 @@ static void get_rule_windowspec(WindowClause* wc, List* targetList, deparse_cont SortGroupClause* grp = (SortGroupClause*)lfirst(l); appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); +#ifdef USE_SPQ + if (IS_SPQ_COORDINATOR && wc->rePartitionSPQ) { + get_rule_sortgroupclause_spq(grp->tleSortGroupRef, false, context); + } else +#endif + get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); sep = ", "; } needspace = true; @@ -7039,7 +7229,13 @@ static void get_rule_windowspec_listagg(WindowClause* wc, List* targetList, depa SortGroupClause* grp = (SortGroupClause*)lfirst(l); appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); +#ifdef USE_SPQ + if (IS_SPQ_COORDINATOR && wc->rePartitionSPQ) { + get_rule_sortgroupclause_spq(grp->tleSortGroupRef, false, context); + } else +#endif + get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); + sep = ", "; } needspace = true; @@ -10661,18 +10857,9 @@ static void get_agg_expr(Aggref* aggref, deparse_context* context) } if (pg_strcasecmp(funcname, "group_concat") == 0) { - Oid typoutput; - char* extval = NULL; - bool typIsVarlena = false; - /* parse back the first argument as separator */ - TargetEntry* tle = (TargetEntry*)lfirst(list_head(aggref->args)); - getTypeOutputInfo(((Const*)tle->expr)->consttype, &typoutput, &typIsVarlena); - extval = OidOutputFunctionCall(typoutput, ((Const*)tle->expr)->constvalue); - - appendStringInfoString(buf, " SEPARATOR '"); - appendStringInfoString(buf, extval); - appendStringInfoChar(buf, '\''); - pfree_ext(extval); + appendStringInfoString(buf, " SEPARATOR "); + Const* con = (Const*)(((TargetEntry*)lfirst(list_head(aggref->args)))->expr); + get_rule_separator(con, buf); } } @@ -10699,23 +10886,38 @@ static bool construct_partitionClause(WindowAgg* node, WindowClause* wc) * ressortgroupref refers to windowagg's tlist * partColIdx refers to subplan's tlist */ - ListCell *lc = NULL; - foreach(lc, node->plan.targetlist) { - TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); - if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && - _equalSimpleVar(tle->expr, window_agg_te->expr)) { - if (window_agg_te->ressortgroupref > 0) { - partcl->tleSortGroupRef = window_agg_te->ressortgroupref; - /* found it */ - break; +#ifdef USE_SPQ + wc->rePartitionSPQ = false; + if (IS_SPQ_COORDINATOR) { + if (IsA(tle->expr, Var)) { + Var* tle_expr = (Var*)tle->expr; + partcl->tleSortGroupRef = tle_expr->varattno; + wc->rePartitionSPQ = true; + } else { + list_free_ext(partitionClause); + return false; + } + } else +#endif + { + ListCell *lc = NULL; + foreach(lc, node->plan.targetlist) { + TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); + if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && + _equalSimpleVar(tle->expr, window_agg_te->expr)) { + if (window_agg_te->ressortgroupref > 0) { + partcl->tleSortGroupRef = window_agg_te->ressortgroupref; + /* found it */ + break; + } } } - } - if (lc == NULL) { - /* not found */ - list_free_ext(partitionClause); - return false; + if (lc == NULL) { + /* not found */ + list_free_ext(partitionClause); + return false; + } } partcl->eqop = node->partOperators[i]; @@ -10762,26 +10964,41 @@ static void construct_windowClause(deparse_context* context) } /* - * ressortgroupref refers to windowagg's tlist - * partColIdx refers to subplan's tlist - */ - ListCell *lc = NULL; - foreach(lc, node->plan.targetlist) { - TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); - if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && - _equalSimpleVar(tle->expr, window_agg_te->expr)) { - if (window_agg_te->ressortgroupref > 0) { - sortcl->tleSortGroupRef = window_agg_te->ressortgroupref; - /* found it */ - break; + * ressortgroupref refers to windowagg's tlist + * partColIdx refers to subplan's tlist + */ +#ifdef USE_SPQ + wc->reOrderSPQ = false; + if (IS_SPQ_COORDINATOR) { + if (IsA(tle->expr, Var)) { + Var* tle_expr = (Var*)tle->expr; + sortcl->tleSortGroupRef = tle_expr->varattno; + wc->reOrderSPQ = true; + } else { + list_free_ext(orderClause); + return; + } + } else +#endif + { + ListCell *lc = NULL; + foreach(lc, node->plan.targetlist) { + TargetEntry *window_agg_te = (TargetEntry *)lfirst(lc); + if (IsA(tle->expr, Var) && IsA(window_agg_te->expr, Var) && + _equalSimpleVar(tle->expr, window_agg_te->expr)) { + if (window_agg_te->ressortgroupref > 0) { + sortcl->tleSortGroupRef = window_agg_te->ressortgroupref; + /* found it */ + break; + } } } - } - if (lc == NULL) { - list_free_ext(orderClause); - /* not found */ - return; + if (lc == NULL) { + list_free_ext(orderClause); + /* not found */ + return; + } } sortcl->sortop = node->ordOperators[i]; @@ -11924,7 +12141,7 @@ static void get_from_clause_coldeflist( * actual_datatype. (If you don't want this behavior, just pass * InvalidOid for actual_datatype.) */ -static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) +void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) { HeapTuple ht_opc; Form_pg_opclass opcrec; @@ -12428,7 +12645,7 @@ static text* string_to_text(char* str) /* * Generate a C string representing a relation's reloptions, or NULL if none. */ -static char* flatten_reloptions(Oid relid) +char* flatten_reloptions(Oid relid) { char* result = NULL; HeapTuple tuple; -- Gitee From 43466d5059ffe5459df2422667bca1b31f38c39e Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Thu, 14 Dec 2023 19:26:42 +0800 Subject: [PATCH 130/434] =?UTF-8?q?=E8=A7=A3=E5=86=B3license=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/og-timescaledb1.7.4.sql | 2 - contrib/timescaledb/src/cache.h | 13 +- contrib/timescaledb/src/cache_invalidate.h | 26 +++ contrib/timescaledb/src/compat.h | 3 + contrib/timescaledb/src/init.cpp | 69 ------ contrib/timescaledb/src/loader/CMakeLists.txt | 95 +++++++- contrib/timescaledb/src/loader/loader.cpp | 3 +- .../timescaledb/src/loader/tsdb_loader.cpp | 77 ------- contrib/timescaledb/src/planner.h | 2 + contrib/timescaledb/src/process_utility.h | 2 + contrib/timescaledb/src/tsdb.cpp | 97 ++++++++ contrib/timescaledb/src/tsdb.h | 4 + contrib/timescaledb/tsl/src/CMakeLists.txt | 210 +++++++++--------- contrib/timescaledb/tsl/src/tsdb_tsl.cpp | 171 -------------- 14 files changed, 337 insertions(+), 437 deletions(-) create mode 100644 contrib/timescaledb/src/cache_invalidate.h delete mode 100644 contrib/timescaledb/src/loader/tsdb_loader.cpp delete mode 100644 contrib/timescaledb/tsl/src/tsdb_tsl.cpp diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 74040b0e0..3157b2632 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -2422,5 +2422,3 @@ END IF; END; $$; - -select _timescaledb_internal.print_license_expiration_info(); diff --git a/contrib/timescaledb/src/cache.h b/contrib/timescaledb/src/cache.h index 05d9afa7a..4906b60a1 100644 --- a/contrib/timescaledb/src/cache.h +++ b/contrib/timescaledb/src/cache.h @@ -81,9 +81,11 @@ typedef struct tsdb_session_context { struct config_enum_entry tsdb_telemetry_level_options[3]; TelemetryLevel tsdb_on_level; bool tsdb_first_start; -} tsdb_session_context; - + char *tsdb_ts_guc_license_key; + bool tsdb_loaded; + bool tsdb_loader_present; +} tsdb_session_context; extern void ts_cache_init(Cache *cache); extern void ts_cache_invalidate(Cache *cache); @@ -95,14 +97,7 @@ extern MemoryContext ts_cache_memory_ctx(Cache *cache); extern Cache *ts_cache_pin(Cache *cache); extern TSDLLEXPORT int ts_cache_release(Cache *cache); - - extern void _cache_init(void); extern void _cache_fini(void); -extern "C" void set_extension_index(uint32 index); -extern "C" void init_session_vars(void); - -extern tsdb_session_context* get_session_context(bool is_from_PG_init=false); - #endif /* TIMESCALEDB_CACHE_H */ diff --git a/contrib/timescaledb/src/cache_invalidate.h b/contrib/timescaledb/src/cache_invalidate.h new file mode 100644 index 000000000..dbd368e54 --- /dev/null +++ b/contrib/timescaledb/src/cache_invalidate.h @@ -0,0 +1,26 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ +#ifndef CACHE_INVALIDATE_H +#define CACHE_INVALIDATE_H +#include +#include +#include +#include +#include +#include +#include + +#include "catalog.h" +#include "compat.h" +#include "extension.h" +#include "hypertable_cache.h" + +#include "bgw/scheduler.h" + +extern void _cache_invalidate_init(void); +extern void _cache_invalidate_fini(void); + +#endif /* CACHE_INVALIDATE_H */ \ No newline at end of file diff --git a/contrib/timescaledb/src/compat.h b/contrib/timescaledb/src/compat.h index 5f2274d41..88da9d1ce 100644 --- a/contrib/timescaledb/src/compat.h +++ b/contrib/timescaledb/src/compat.h @@ -66,6 +66,9 @@ #define on_level (get_session_context()->tsdb_on_level) #define telemetry_level_options (get_session_context()->tsdb_telemetry_level_options) #define tsdb_first_start (get_session_context(true)->tsdb_first_start) + +#define loaded (get_session_context()->tsdb_loaded) +#define loader_present (get_session_context()->tsdb_loader_present) /* * The following are compatibility functions for different versions of * PostgreSQL. Each compatibility function (or group) has its own logic for diff --git a/contrib/timescaledb/src/init.cpp b/contrib/timescaledb/src/init.cpp index 281ecd727..cde3bb3d2 100644 --- a/contrib/timescaledb/src/init.cpp +++ b/contrib/timescaledb/src/init.cpp @@ -27,7 +27,6 @@ #include "chunk_append/exec.h" -static uint32 tsdb_index; #ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; #endif @@ -66,10 +65,6 @@ extern void _chunk_append_init(); extern void TSDLLEXPORT _PG_init(void); extern void TSDLLEXPORT _PG_fini(void); -extern "C" void set_extension_index(uint32 index); -extern "C" void init_session_vars(void); - -extern tsdb_session_context* get_session_context(bool is_from_PG_init); TS_FUNCTION_INFO_V1(ts_post_load_init); @@ -142,67 +137,3 @@ ts_post_load_init(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - - - -void set_extension_index(uint32 index) { - tsdb_index = index; -} - - - - -tsdb_session_context* get_session_context(bool is_from_PG_init) -{ - if (u_sess->attr.attr_common.extension_session_vars_array[tsdb_index] == NULL && !is_from_PG_init) { - init_session_vars(); - } - return (tsdb_session_context*)u_sess->attr.attr_common.extension_session_vars_array[tsdb_index]; -} - -void init_session_vars(void) -{ - if (u_sess->attr.attr_common.extension_session_vars_array[tsdb_index]!=NULL) - return - - RepallocSessionVarsArrayIfNecessary(); - tsdb_session_context* psc = (tsdb_session_context*)MemoryContextAllocZero(u_sess->self_mem_cxt, sizeof(tsdb_session_context)); - u_sess->attr.attr_common.extension_session_vars_array[tsdb_index] = psc; - psc->tsdb_pinned_caches = NIL; - psc->tsdb_pinned_caches_mctx = NULL; - psc->tsdb_hypertable_cache_current = NULL; - psc->tsdb_planner_hcaches = NIL; - psc->tsdb_TS_CTE_EXPAND= "ts_expand"; - - - psc->tsdb_chunk_append_plan_methods = { - .ExtensibleName = "ChunkAppend", - .CreateExtensiblePlanState = ts_chunk_append_state_create, - }; - - psc->tsdb_telemetry_level_options[0] = { "off", TELEMETRY_OFF, false }; - psc->tsdb_telemetry_level_options[1] = { "basic", TELEMETRY_BASIC, false }; - psc->tsdb_telemetry_level_options[2] = { NULL, 0, false }; - psc->tsdb_on_level = TELEMETRY_BASIC; - - - psc->tsdb_expect_chunk_modification = false; - _constraint_aware_append_init(); - - - if (!ts_extension_is_loaded()) - return; - - - if (!tsdb_first_start) - { - _cache_init(); - _hypertable_cache_init(); - _cache_invalidate_init(); - - _planner_init(); - _chunk_append_init(); - _process_utility_init(); - } - -} \ No newline at end of file diff --git a/contrib/timescaledb/src/loader/CMakeLists.txt b/contrib/timescaledb/src/loader/CMakeLists.txt index 6c2145398..3ba778850 100644 --- a/contrib/timescaledb/src/loader/CMakeLists.txt +++ b/contrib/timescaledb/src/loader/CMakeLists.txt @@ -5,7 +5,8 @@ set(SOURCES bgw_launcher.cpp bgw_interface.cpp lwlocks.cpp - tsdb_loader.cpp + ${PROJECT_SOURCE_DIR}/src/agg_bookend.cpp + ${PROJECT_SOURCE_DIR}/src/base64_compat.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_dsm.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_shm.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_extension.cpp @@ -13,7 +14,99 @@ set(SOURCES ${PROJECT_SOURCE_DIR}/src/tsdb_static.cpp ${PROJECT_SOURCE_DIR}/src/tsdb_static2.cpp ${PROJECT_SOURCE_DIR}/src/tsdb.cpp + ${PROJECT_SOURCE_DIR}/src/func_cache.cpp + ${PROJECT_SOURCE_DIR}/src/cache.cpp + ${PROJECT_SOURCE_DIR}/src/cache_invalidate.cpp + ${PROJECT_SOURCE_DIR}/src/catalog.cpp + ${PROJECT_SOURCE_DIR}/src/continuous_agg.cpp + ${PROJECT_SOURCE_DIR}/src/chunk.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_adaptive.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_constraint.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_dispatch.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_dispatch_plan.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_dispatch_state.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_index.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_insert_state.cpp + ${PROJECT_SOURCE_DIR}/src/constraint_aware_append.cpp + ${PROJECT_SOURCE_DIR}/src/cross_module_fn.cpp + ${PROJECT_SOURCE_DIR}/src/copy.cpp + ${PROJECT_SOURCE_DIR}/src/compression_chunk_size.cpp + ${PROJECT_SOURCE_DIR}/src/compression_with_clause.cpp + ${PROJECT_SOURCE_DIR}/src/debug_wait.cpp + ${PROJECT_SOURCE_DIR}/src/dimension.cpp + ${PROJECT_SOURCE_DIR}/src/dimension_slice.cpp + ${PROJECT_SOURCE_DIR}/src/dimension_vector.cpp + ${PROJECT_SOURCE_DIR}/src/estimate.cpp + ${PROJECT_SOURCE_DIR}/src/event_trigger.cpp + ${PROJECT_SOURCE_DIR}/src/extension.cpp + ${PROJECT_SOURCE_DIR}/src/gapfill.cpp + ${PROJECT_SOURCE_DIR}/src/guc.cpp + ${PROJECT_SOURCE_DIR}/src/histogram.cpp + ${PROJECT_SOURCE_DIR}/src/hypercube.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_cache.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_compression.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_insert.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_restrict_info.cpp + ${PROJECT_SOURCE_DIR}/src/indexing.cpp + ${PROJECT_SOURCE_DIR}/src/interval.cpp + ${PROJECT_SOURCE_DIR}/src/metadata.cpp + ${PROJECT_SOURCE_DIR}/src/jsonb_utils.cpp + ${PROJECT_SOURCE_DIR}/src/license_guc.cpp + ${PROJECT_SOURCE_DIR}/src/partitioning.cpp + ${PROJECT_SOURCE_DIR}/src/planner.cpp + ${PROJECT_SOURCE_DIR}/src/plan_expand_hypertable.cpp + ${PROJECT_SOURCE_DIR}/src/plan_add_hashagg.cpp + ${PROJECT_SOURCE_DIR}/src/plan_agg_bookend.cpp + ${PROJECT_SOURCE_DIR}/src/plan_partialize.cpp + ${PROJECT_SOURCE_DIR}/src/process_utility.cpp + ${PROJECT_SOURCE_DIR}/src/scanner.cpp + ${PROJECT_SOURCE_DIR}/src/scan_iterator.cpp + ${PROJECT_SOURCE_DIR}/src/sort_transform.cpp + ${PROJECT_SOURCE_DIR}/src/subspace_store.cpp + ${PROJECT_SOURCE_DIR}/src/tablespace.cpp + ${PROJECT_SOURCE_DIR}/src/time_bucket.cpp + ${PROJECT_SOURCE_DIR}/src/custom_type_cache.cpp + ${PROJECT_SOURCE_DIR}/src/trigger.cpp + ${PROJECT_SOURCE_DIR}/src/utils.cpp + ${PROJECT_SOURCE_DIR}/src/version.cpp + ${PROJECT_SOURCE_DIR}/src/with_clause_parser.cpp + + ${PROJECT_SOURCE_DIR}/src/chunk_append/chunk_append.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/exec.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/explain.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/planner.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/transform.cpp + + ${PROJECT_SOURCE_DIR}/src/bgw_policy/reorder.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/drop_chunks.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/compress_chunks.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/policy.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/chunk_stats.cpp + + ${PROJECT_SOURCE_DIR}/src/import/planner.cpp + + ${PROJECT_SOURCE_DIR}/src/compat/tableam.cpp + ${PROJECT_SOURCE_DIR}/src/compat/tupconvert.cpp + ${PROJECT_SOURCE_DIR}/src/compat/tuptable.cpp + ${PROJECT_SOURCE_DIR}/src/compat/fkeylist.cpp + + ${PROJECT_SOURCE_DIR}/src/bgw/job.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/job_stat.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/launcher_interface.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/scheduler.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/timer.cpp + + ${PROJECT_SOURCE_DIR}/src/telemetry/uuid.cpp + ${PROJECT_SOURCE_DIR}/src/telemetry/telemetry_metadata.cpp + ${PROJECT_SOURCE_DIR}/src/telemetry/telemetry.cpp + + ${PROJECT_SOURCE_DIR}/src/net/conn.cpp + ${PROJECT_SOURCE_DIR}/src/net/conn_plain.cpp + ${PROJECT_SOURCE_DIR}/src/net/http.cpp + ${PROJECT_SOURCE_DIR}/src/net/http_response.cpp + ${PROJECT_SOURCE_DIR}/src/net/http_request.cpp ) set(TEST_SOURCES diff --git a/contrib/timescaledb/src/loader/loader.cpp b/contrib/timescaledb/src/loader/loader.cpp index 3f72601c8..f0c954be6 100644 --- a/contrib/timescaledb/src/loader/loader.cpp +++ b/contrib/timescaledb/src/loader/loader.cpp @@ -25,6 +25,7 @@ #include "extension_utils.cpp" #include "export.h" #include "compat.h" +#include "cache.h" #include "extension_constants.h" #include "loader/loader.h" @@ -103,8 +104,6 @@ extern void TSDLLEXPORT _PG_init(void); extern void TSDLLEXPORT _PG_fini(void); /* was the versioned-extension loaded*/ -static bool loaded = false; -static bool loader_present = true; static char soversion[MAX_VERSION_LEN]; diff --git a/contrib/timescaledb/src/loader/tsdb_loader.cpp b/contrib/timescaledb/src/loader/tsdb_loader.cpp deleted file mode 100644 index 2ccf17015..000000000 --- a/contrib/timescaledb/src/loader/tsdb_loader.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "compat-msvc-enter.h" /* To label externs in extension.h and - * miscadmin.h correctly */ -#include -#include -#include "compat-msvc-exit.h" - -#include -#include -#include -#include -#include - -#include "config.h" -#include "catalog.h" -#include "extension.h" -#include "guc.h" -#include "config.h" -#include "extension_utils.cpp" -#include "compat.h" - -#define TS_UPDATE_SCRIPT_CONFIG_VAR "timescaledb.update_script_stage" -#define POST_UPDATE "post" -static Oid extension_proxy_oid = InvalidOid; - -Oid -ts_extension_schema_oid(void) -{ - Datum result; - Relation rel; - SysScanDesc scandesc; - HeapTuple tuple; - ScanKeyData entry[1]; - bool is_null = true; - Oid schema = InvalidOid; - - rel = table_open(ExtensionRelationId, AccessShareLock); - - ScanKeyInit(&entry[0], - Anum_pg_extension_extname, - BTEqualStrategyNumber, - F_NAMEEQ, - DirectFunctionCall1(namein, CStringGetDatum(EXTENSION_NAME))); - - scandesc = systable_beginscan(rel, ExtensionNameIndexId, true, NULL, 1, entry); - - tuple = systable_getnext(scandesc); - - /* We assume that there can be at most one matching tuple */ - if (HeapTupleIsValid(tuple)) - { - result = - heap_getattr(tuple, Anum_pg_extension_extnamespace, RelationGetDescr(rel), &is_null); - - if (!is_null) - schema = DatumGetObjectId(result); - } - - systable_endscan(scandesc); - table_close(rel, AccessShareLock); - - if (schema == InvalidOid) - elog(ERROR, "extension schema not found"); - return schema; -} - -char * -ts_extension_schema_name(void) -{ - return get_namespace_name(ts_extension_schema_oid()); -} \ No newline at end of file diff --git a/contrib/timescaledb/src/planner.h b/contrib/timescaledb/src/planner.h index 5af211cab..c2d5c33ff 100644 --- a/contrib/timescaledb/src/planner.h +++ b/contrib/timescaledb/src/planner.h @@ -43,4 +43,6 @@ ts_get_private_reloptinfo(const RelOptInfo *rel) return (TimescaleDBPrivate *)rel->fdw_private; } +extern void _planner_init(void); +extern void _planner_fini(void); #endif /* TIMESCALEDB_PLANNER_H */ diff --git a/contrib/timescaledb/src/process_utility.h b/contrib/timescaledb/src/process_utility.h index 072b0222b..7b06a291c 100644 --- a/contrib/timescaledb/src/process_utility.h +++ b/contrib/timescaledb/src/process_utility.h @@ -33,4 +33,6 @@ typedef bool (*ts_process_utility_handler_t)(ProcessUtilityArgs *args); extern void ts_process_utility_set_expect_chunk_modification(bool expect); +extern void _process_utility_init(void); +extern void _process_utility_fini(void); #endif /* TIMESCALEDB_PROCESS_UTILITY_H */ diff --git a/contrib/timescaledb/src/tsdb.cpp b/contrib/timescaledb/src/tsdb.cpp index 915925071..10fbc6181 100644 --- a/contrib/timescaledb/src/tsdb.cpp +++ b/contrib/timescaledb/src/tsdb.cpp @@ -1,5 +1,12 @@ #include "compat.h" +#include "chunk_append/exec.h" +#include "constraint_aware_append.h" +#include "hypertable_cache.h" +#include "cache_invalidate.h" +#include "chunk_append/planner.h" +#include "process_utility.h" + #include "commands/cluster.h" #include "utils.h" #include "plan_agg_bookend.cpp" @@ -3484,3 +3491,93 @@ ts_preprocess_first_last_aggregates(PlannerInfo *root, List *tlist) } +static uint32 tsdb_index; + +void set_extension_index(uint32 index) +{ + tsdb_index = index; +} + +tsdb_session_context* get_session_context(bool is_from_PG_init) +{ + if (NULL == u_sess || NULL == u_sess->attr.attr_common.extension_session_vars_array) + { + tsdb_session_context* psc = (tsdb_session_context*)MemoryContextAllocZero(u_sess->self_mem_cxt, sizeof(tsdb_session_context)); + psc->tsdb_pinned_caches = NIL; + psc->tsdb_pinned_caches_mctx = NULL; + psc->tsdb_hypertable_cache_current = NULL; + psc->tsdb_planner_hcaches = NIL; + psc->tsdb_TS_CTE_EXPAND= "ts_expand"; + + psc->tsdb_chunk_append_plan_methods = { + .ExtensibleName = "ChunkAppend", + .CreateExtensiblePlanState = ts_chunk_append_state_create, + }; + + psc->tsdb_telemetry_level_options[0] = { "off", TELEMETRY_OFF, false }; + psc->tsdb_telemetry_level_options[1] = { "basic", TELEMETRY_BASIC, false }; + psc->tsdb_telemetry_level_options[2] = { NULL, 0, false }; + psc->tsdb_on_level = TELEMETRY_BASIC; + + psc->tsdb_expect_chunk_modification = false; + psc->tsdb_ts_guc_license_key = "CommunityLicense"; + + psc->tsdb_loaded = false; + psc->tsdb_loader_present = true; + + return psc; + } + + if (u_sess->attr.attr_common.extension_session_vars_array[tsdb_index] == NULL && !is_from_PG_init) + { + init_session_vars(); + } + return (tsdb_session_context*)u_sess->attr.attr_common.extension_session_vars_array[tsdb_index]; +} + +void init_session_vars(void) +{ + if (u_sess->attr.attr_common.extension_session_vars_array[tsdb_index]!=NULL) + return + + RepallocSessionVarsArrayIfNecessary(); + tsdb_session_context* psc = (tsdb_session_context*)MemoryContextAllocZero(u_sess->self_mem_cxt, sizeof(tsdb_session_context)); + u_sess->attr.attr_common.extension_session_vars_array[tsdb_index] = psc; + psc->tsdb_pinned_caches = NIL; + psc->tsdb_pinned_caches_mctx = NULL; + psc->tsdb_hypertable_cache_current = NULL; + psc->tsdb_planner_hcaches = NIL; + psc->tsdb_TS_CTE_EXPAND= "ts_expand"; + + psc->tsdb_chunk_append_plan_methods = { + .ExtensibleName = "ChunkAppend", + .CreateExtensiblePlanState = ts_chunk_append_state_create, + }; + + psc->tsdb_telemetry_level_options[0] = { "off", TELEMETRY_OFF, false }; + psc->tsdb_telemetry_level_options[1] = { "basic", TELEMETRY_BASIC, false }; + psc->tsdb_telemetry_level_options[2] = { NULL, 0, false }; + psc->tsdb_on_level = TELEMETRY_BASIC; + + psc->tsdb_expect_chunk_modification = false; + psc->tsdb_ts_guc_license_key = "CommunityLicense"; + + psc->tsdb_loaded = false; + psc->tsdb_loader_present = true; + + _constraint_aware_append_init(); + + if (!ts_extension_is_loaded()) + return; + + if (!tsdb_first_start) + { + _cache_init(); + _hypertable_cache_init(); + _cache_invalidate_init(); + + _planner_init(); + _chunk_append_init(); + _process_utility_init(); + } +} \ No newline at end of file diff --git a/contrib/timescaledb/src/tsdb.h b/contrib/timescaledb/src/tsdb.h index c6e040e86..51d85f107 100644 --- a/contrib/timescaledb/src/tsdb.h +++ b/contrib/timescaledb/src/tsdb.h @@ -269,4 +269,8 @@ extern char func_parallel(Oid funcid); extern Relids find_childrel_parents(PlannerInfo *root, RelOptInfo *rel); +extern void set_extension_index(uint32 index); +extern void init_session_vars(void); + +extern tsdb_session_context* get_session_context(bool is_from_PG_init=false); #endif \ No newline at end of file diff --git a/contrib/timescaledb/tsl/src/CMakeLists.txt b/contrib/timescaledb/tsl/src/CMakeLists.txt index c51ab3c73..d81090e21 100644 --- a/contrib/timescaledb/tsl/src/CMakeLists.txt +++ b/contrib/timescaledb/tsl/src/CMakeLists.txt @@ -5,113 +5,111 @@ set(SOURCES telemetry.cpp partialize_finalize.cpp planner.cpp - tsdb_tsl.cpp ${PROJECT_SOURCE_DIR}/src/agg_bookend.cpp -${PROJECT_SOURCE_DIR}/src/base64_compat.cpp -${PROJECT_SOURCE_DIR}/src/cache.cpp -${PROJECT_SOURCE_DIR}/src/cache_invalidate.cpp -${PROJECT_SOURCE_DIR}/src/catalog.cpp -${PROJECT_SOURCE_DIR}/src/chunk.cpp -${PROJECT_SOURCE_DIR}/src/chunk_adaptive.cpp -${PROJECT_SOURCE_DIR}/src/chunk_constraint.cpp -${PROJECT_SOURCE_DIR}/src/chunk_dispatch.cpp -${PROJECT_SOURCE_DIR}/src/chunk_dispatch_plan.cpp -${PROJECT_SOURCE_DIR}/src/chunk_dispatch_state.cpp -${PROJECT_SOURCE_DIR}/src/chunk_index.cpp -${PROJECT_SOURCE_DIR}/src/chunk_insert_state.cpp -${PROJECT_SOURCE_DIR}/src/compression_chunk_size.cpp -${PROJECT_SOURCE_DIR}/src/compression_with_clause.cpp -${PROJECT_SOURCE_DIR}/src/constraint_aware_append.cpp -${PROJECT_SOURCE_DIR}/src/continuous_agg.cpp -${PROJECT_SOURCE_DIR}/src/copy.cpp -${PROJECT_SOURCE_DIR}/src/cross_module_fn.cpp -${PROJECT_SOURCE_DIR}/src/custom_type_cache.cpp -${PROJECT_SOURCE_DIR}/src/debug_wait.cpp -${PROJECT_SOURCE_DIR}/src/dimension.cpp -${PROJECT_SOURCE_DIR}/src/dimension_slice.cpp -${PROJECT_SOURCE_DIR}/src/dimension_vector.cpp -${PROJECT_SOURCE_DIR}/src/estimate.cpp -${PROJECT_SOURCE_DIR}/src/event_trigger.cpp -${PROJECT_SOURCE_DIR}/src/extension.cpp -${PROJECT_SOURCE_DIR}/src/extension_utils.cpp -${PROJECT_SOURCE_DIR}/src/func_cache.cpp - -${PROJECT_SOURCE_DIR}/src/guc.cpp -${PROJECT_SOURCE_DIR}/src/histogram.cpp -${PROJECT_SOURCE_DIR}/src/hypercube.cpp -${PROJECT_SOURCE_DIR}/src/hypertable.cpp -${PROJECT_SOURCE_DIR}/src/hypertable_cache.cpp -${PROJECT_SOURCE_DIR}/src/hypertable_compression.cpp -${PROJECT_SOURCE_DIR}/src/hypertable_insert.cpp -${PROJECT_SOURCE_DIR}/src/hypertable_restrict_info.cpp -${PROJECT_SOURCE_DIR}/src/indexing.cpp - -${PROJECT_SOURCE_DIR}/src/interval.cpp -${PROJECT_SOURCE_DIR}/src/jsonb_utils.cpp -${PROJECT_SOURCE_DIR}/src/license_guc.cpp -${PROJECT_SOURCE_DIR}/src/metadata.cpp -${PROJECT_SOURCE_DIR}/src/partitioning.cpp -${PROJECT_SOURCE_DIR}/src/planner.cpp -${PROJECT_SOURCE_DIR}/src/plan_add_hashagg.cpp -${PROJECT_SOURCE_DIR}/src/plan_agg_bookend.cpp -${PROJECT_SOURCE_DIR}/src/plan_expand_hypertable.cpp -${PROJECT_SOURCE_DIR}/src/plan_partialize.cpp -${PROJECT_SOURCE_DIR}/src/process_utility.cpp -${PROJECT_SOURCE_DIR}/src/scanner.cpp -${PROJECT_SOURCE_DIR}/src/scan_iterator.cpp -${PROJECT_SOURCE_DIR}/src/sort_transform.cpp -${PROJECT_SOURCE_DIR}/src/subspace_store.cpp -${PROJECT_SOURCE_DIR}/src/tablespace.cpp -${PROJECT_SOURCE_DIR}/src/time_bucket.cpp - -${PROJECT_SOURCE_DIR}/src/tsdb.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_dsm.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_extension.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_head.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_shm.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_static.cpp -${PROJECT_SOURCE_DIR}/src/tsdb_static2.cpp -${PROJECT_SOURCE_DIR}/src/utils.cpp -${PROJECT_SOURCE_DIR}/src/version.cpp -${PROJECT_SOURCE_DIR}/src/with_clause_parser.cpp -${PROJECT_SOURCE_DIR}/src/bgw/job.cpp -${PROJECT_SOURCE_DIR}/src/bgw/job_stat.cpp -${PROJECT_SOURCE_DIR}/src/bgw/launcher_interface.cpp -${PROJECT_SOURCE_DIR}/src/bgw/scheduler.cpp -${PROJECT_SOURCE_DIR}/src/bgw/timer.cpp -${PROJECT_SOURCE_DIR}/src/bgw_policy/chunk_stats.cpp -${PROJECT_SOURCE_DIR}/src/bgw_policy/compress_chunks.cpp -${PROJECT_SOURCE_DIR}/src/bgw_policy/drop_chunks.cpp -${PROJECT_SOURCE_DIR}/src/bgw_policy/policy.cpp -${PROJECT_SOURCE_DIR}/src/bgw_policy/reorder.cpp -${PROJECT_SOURCE_DIR}/src/chunk_append/chunk_append.cpp -${PROJECT_SOURCE_DIR}/src/chunk_append/exec.cpp -${PROJECT_SOURCE_DIR}/src/chunk_append/explain.cpp -${PROJECT_SOURCE_DIR}/src/chunk_append/planner.cpp -${PROJECT_SOURCE_DIR}/src/chunk_append/transform.cpp -${PROJECT_SOURCE_DIR}/src/compat/fkeylist.cpp -${PROJECT_SOURCE_DIR}/src/compat/tableam.cpp -${PROJECT_SOURCE_DIR}/src/compat/tupconvert.cpp -${PROJECT_SOURCE_DIR}/src/compat/tuptable.cpp - -${PROJECT_SOURCE_DIR}/src/import/planner.cpp -${PROJECT_SOURCE_DIR}/src/loader/bgw_counter.cpp -${PROJECT_SOURCE_DIR}/src/loader/bgw_interface.cpp -${PROJECT_SOURCE_DIR}/src/loader/bgw_launcher.cpp -${PROJECT_SOURCE_DIR}/src/loader/bgw_message_queue.cpp - -${PROJECT_SOURCE_DIR}/src/loader/lwlocks.cpp - -${PROJECT_SOURCE_DIR}/src/net/conn.cpp -${PROJECT_SOURCE_DIR}/src/net/conn_plain.cpp - -${PROJECT_SOURCE_DIR}/src/net/http.cpp -${PROJECT_SOURCE_DIR}/src/net/http_request.cpp -${PROJECT_SOURCE_DIR}/src/net/http_response.cpp -${PROJECT_SOURCE_DIR}/src/telemetry/telemetry.cpp -${PROJECT_SOURCE_DIR}/src/telemetry/telemetry_metadata.cpp -${PROJECT_SOURCE_DIR}/src/telemetry/uuid.cpp - + ${PROJECT_SOURCE_DIR}/src/base64_compat.cpp + ${PROJECT_SOURCE_DIR}/src/cache.cpp + ${PROJECT_SOURCE_DIR}/src/cache_invalidate.cpp + ${PROJECT_SOURCE_DIR}/src/catalog.cpp + ${PROJECT_SOURCE_DIR}/src/chunk.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_adaptive.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_constraint.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_dispatch.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_dispatch_plan.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_dispatch_state.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_index.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_insert_state.cpp + ${PROJECT_SOURCE_DIR}/src/compression_chunk_size.cpp + ${PROJECT_SOURCE_DIR}/src/compression_with_clause.cpp + ${PROJECT_SOURCE_DIR}/src/constraint_aware_append.cpp + ${PROJECT_SOURCE_DIR}/src/continuous_agg.cpp + ${PROJECT_SOURCE_DIR}/src/copy.cpp + ${PROJECT_SOURCE_DIR}/src/cross_module_fn.cpp + ${PROJECT_SOURCE_DIR}/src/custom_type_cache.cpp + ${PROJECT_SOURCE_DIR}/src/debug_wait.cpp + ${PROJECT_SOURCE_DIR}/src/dimension.cpp + ${PROJECT_SOURCE_DIR}/src/dimension_slice.cpp + ${PROJECT_SOURCE_DIR}/src/dimension_vector.cpp + ${PROJECT_SOURCE_DIR}/src/estimate.cpp + ${PROJECT_SOURCE_DIR}/src/event_trigger.cpp + ${PROJECT_SOURCE_DIR}/src/extension.cpp + ${PROJECT_SOURCE_DIR}/src/extension_utils.cpp + ${PROJECT_SOURCE_DIR}/src/func_cache.cpp + + ${PROJECT_SOURCE_DIR}/src/guc.cpp + ${PROJECT_SOURCE_DIR}/src/histogram.cpp + ${PROJECT_SOURCE_DIR}/src/hypercube.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_cache.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_compression.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_insert.cpp + ${PROJECT_SOURCE_DIR}/src/hypertable_restrict_info.cpp + ${PROJECT_SOURCE_DIR}/src/indexing.cpp + + ${PROJECT_SOURCE_DIR}/src/interval.cpp + ${PROJECT_SOURCE_DIR}/src/jsonb_utils.cpp + ${PROJECT_SOURCE_DIR}/src/license_guc.cpp + ${PROJECT_SOURCE_DIR}/src/metadata.cpp + ${PROJECT_SOURCE_DIR}/src/partitioning.cpp + ${PROJECT_SOURCE_DIR}/src/planner.cpp + ${PROJECT_SOURCE_DIR}/src/plan_add_hashagg.cpp + ${PROJECT_SOURCE_DIR}/src/plan_agg_bookend.cpp + ${PROJECT_SOURCE_DIR}/src/plan_expand_hypertable.cpp + ${PROJECT_SOURCE_DIR}/src/plan_partialize.cpp + ${PROJECT_SOURCE_DIR}/src/process_utility.cpp + ${PROJECT_SOURCE_DIR}/src/scanner.cpp + ${PROJECT_SOURCE_DIR}/src/scan_iterator.cpp + ${PROJECT_SOURCE_DIR}/src/sort_transform.cpp + ${PROJECT_SOURCE_DIR}/src/subspace_store.cpp + ${PROJECT_SOURCE_DIR}/src/tablespace.cpp + ${PROJECT_SOURCE_DIR}/src/time_bucket.cpp + + ${PROJECT_SOURCE_DIR}/src/tsdb.cpp + ${PROJECT_SOURCE_DIR}/src/tsdb_dsm.cpp + ${PROJECT_SOURCE_DIR}/src/tsdb_extension.cpp + ${PROJECT_SOURCE_DIR}/src/tsdb_head.cpp + ${PROJECT_SOURCE_DIR}/src/tsdb_shm.cpp + ${PROJECT_SOURCE_DIR}/src/tsdb_static.cpp + ${PROJECT_SOURCE_DIR}/src/tsdb_static2.cpp + ${PROJECT_SOURCE_DIR}/src/utils.cpp + ${PROJECT_SOURCE_DIR}/src/version.cpp + ${PROJECT_SOURCE_DIR}/src/with_clause_parser.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/job.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/job_stat.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/launcher_interface.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/scheduler.cpp + ${PROJECT_SOURCE_DIR}/src/bgw/timer.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/chunk_stats.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/compress_chunks.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/drop_chunks.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/policy.cpp + ${PROJECT_SOURCE_DIR}/src/bgw_policy/reorder.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/chunk_append.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/exec.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/explain.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/planner.cpp + ${PROJECT_SOURCE_DIR}/src/chunk_append/transform.cpp + ${PROJECT_SOURCE_DIR}/src/compat/fkeylist.cpp + ${PROJECT_SOURCE_DIR}/src/compat/tableam.cpp + ${PROJECT_SOURCE_DIR}/src/compat/tupconvert.cpp + ${PROJECT_SOURCE_DIR}/src/compat/tuptable.cpp + + ${PROJECT_SOURCE_DIR}/src/import/planner.cpp + ${PROJECT_SOURCE_DIR}/src/loader/bgw_counter.cpp + ${PROJECT_SOURCE_DIR}/src/loader/bgw_interface.cpp + ${PROJECT_SOURCE_DIR}/src/loader/bgw_launcher.cpp + ${PROJECT_SOURCE_DIR}/src/loader/bgw_message_queue.cpp + + ${PROJECT_SOURCE_DIR}/src/loader/lwlocks.cpp + + ${PROJECT_SOURCE_DIR}/src/net/conn.cpp + ${PROJECT_SOURCE_DIR}/src/net/conn_plain.cpp + + ${PROJECT_SOURCE_DIR}/src/net/http.cpp + ${PROJECT_SOURCE_DIR}/src/net/http_request.cpp + ${PROJECT_SOURCE_DIR}/src/net/http_response.cpp + ${PROJECT_SOURCE_DIR}/src/telemetry/telemetry.cpp + ${PROJECT_SOURCE_DIR}/src/telemetry/telemetry_metadata.cpp + ${PROJECT_SOURCE_DIR}/src/telemetry/uuid.cpp ) # Add test source code in Debug builds diff --git a/contrib/timescaledb/tsl/src/tsdb_tsl.cpp b/contrib/timescaledb/tsl/src/tsdb_tsl.cpp deleted file mode 100644 index 36194ba86..000000000 --- a/contrib/timescaledb/tsl/src/tsdb_tsl.cpp +++ /dev/null @@ -1,171 +0,0 @@ -/* - * This file and its contents are licensed under the Apache License 2.0. - * Please see the included NOTICE for copyright information and - * LICENSE-APACHE for a copy of the license. - */ -#include -#include -#include -#include -#include "../src/compat-msvc-enter.h" -#include -#include -#include -#include -#include -#include -#include "../src/compat-msvc-exit.h" -#include -#include -#include -#include -#include -#include "parallel/parallel.h" - -#include "extension_utils.cpp" -#include "export.h" -#include "compat.h" -#include "extension_constants.h" - -#include "loader/loader.h" -#include "loader/bgw_counter.h" -#include "loader/bgw_interface.h" -#include "loader/bgw_launcher.h" -#include "loader/bgw_message_queue.h" -#include "loader/lwlocks.h" - -static bool loaded = false; -static char soversion[MAX_VERSION_LEN]; -static post_parse_analyze_hook_type extension_post_parse_analyze_hook = NULL; - -#define POST_LOAD_INIT_FN "ts_post_load_init" -#if PG96 -#ifdef WIN32 -#define CalledInParallelWorker() false -#else -#define CalledInParallelWorker() false -#endif /* WIN32 */ -#else -#define CalledInParallelWorker() \ - (MyBgworkerEntry != NULL && (MyBgworkerEntry->bgw_flags & BGWORKER_CLASS_PARALLEL) != 0) -#endif /* PG96 */ - - - -static void inline do_load() -{ - char *version = extension_version(); - char soname[MAX_SO_NAME_LEN]; - post_parse_analyze_hook_type old_hook; - - StrNCpy(soversion, version, MAX_VERSION_LEN); - - /* - * An inval_relcache callback can be called after previous checks of - * loaded had found it to be false. But the inval_relcache callback may - * load the extension setting it to true. Thus it needs to be rechecked - * here again by the outer call after inval_relcache completes. This is - * double-check locking, in effect. - */ - if (loaded) - return; - - snprintf(soname, MAX_SO_NAME_LEN, "%s-%s", EXTENSION_SO, version); - - /* - * Set to true whether or not the load succeeds to prevent reloading if - * failure happened after partial load. - */ - loaded = true; - - /* - * In a parallel worker, we're not responsible for loading libraries, it's - * handled by the parallel worker infrastructure which restores the - * library state. - */ - if (CalledInParallelWorker()) - return; - - /* - * Set the config option to let versions 0.9.0 and 0.9.1 know that the - * loader was preloaded, newer versions use rendezvous variables instead. - */ - if (strcmp(version, "0.9.0") == 0 || strcmp(version, "0.9.1") == 0) - SetConfigOption("timescaledb.loader_present", "on", PGC_USERSET, PGC_S_SESSION); - - /* - * we need to capture the loaded extension's post analyze hook, giving it - * a NULL as previous - */ - old_hook = post_parse_analyze_hook; - post_parse_analyze_hook = NULL; - - /* - * We want to call the post_parse_analyze_hook from the versioned - * extension after we've loaded the versioned so. When the file is loaded - * it sets post_parse_analyze_hook, which we capture and store in - * extension_post_parse_analyze_hook to call at the end _PG_init - */ - PG_TRY(); - { - CFunInfo temp_for_tsdb = load_external_function(soname, POST_LOAD_INIT_FN, false, NULL); - PGFunction ts_post_load_init = temp_for_tsdb.user_fn; - - if (ts_post_load_init != NULL) - DirectFunctionCall1(ts_post_load_init, CharGetDatum(0)); - } - PG_CATCH(); - { - extension_post_parse_analyze_hook = post_parse_analyze_hook; - post_parse_analyze_hook = old_hook; - PG_RE_THROW(); - } - PG_END_TRY(); - - extension_post_parse_analyze_hook = post_parse_analyze_hook; - post_parse_analyze_hook = old_hook; -} - -static void inline extension_check() -{ - if (!loaded) - { - enum ExtensionState state = extension_current_state(); - - switch (state) - { - case EXTENSION_STATE_TRANSITIONING: - - /* - * Always load as soon as the extension is transitioning. This - * is necessary so that the extension load before any CREATE - * FUNCTION calls. Otherwise, the CREATE FUNCTION calls will - * load the .so without capturing the post_parse_analyze_hook. - */ - case EXTENSION_STATE_CREATED: - do_load(); - return; - case EXTENSION_STATE_UNKNOWN: - case EXTENSION_STATE_NOT_INSTALLED: - return; - } - } -} - -extern void -ts_loader_extension_check(void) -{ - extension_check(); -} - -extern bool -ts_loader_extension_exists(void) -{ - return extension_exists(); -} - -extern char * -ts_loader_extension_version(void) -{ - return extension_version(); -} \ No newline at end of file -- Gitee From bcede5d3383ed3f8d60c68c0a87d5696470ba0ba Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Sat, 16 Dec 2023 17:23:20 +0800 Subject: [PATCH 131/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E5=87=BD=E6=95=B0=5Ftimescaledb=5Finternal.get=5Fcreate=5Fcomm?= =?UTF-8?q?and()=E6=8F=90=E7=A4=BA=E4=BF=A1=E6=81=AF=E4=B8=8D=E5=87=86?= =?UTF-8?q?=E7=A1=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 5 ++++- contrib/timescaledb/og-timescaledb1.7.4.sql | 13 +++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index df89cf53a..a2dda2cc3 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -23,12 +23,15 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ ### 1.3.1. 一般性é™åˆ¶ +- 在兼容pg库下创建æ’ä»¶ +- chunkåŠŸèƒ½æš‚ä¸æ”¯æŒ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› - TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; - 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› + # **2.** TimescaleDB安装方法 @@ -128,7 +131,7 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, | 12 | create_hypertable()创建超表 | 创建超表 | | 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | | 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | -| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš” | +| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | | 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | | 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | | 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 3157b2632..28b5b1086 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -813,16 +813,21 @@ DECLARE dimension_row record; ret TEXT; BEGIN - SELECT h.id, h.schema_name + SELECT COUNT(*) + INTO v_count FROM _timescaledb_catalog.hypertable AS h - WHERE h.table_name = get_create_command.table_name - INTO h_id, schema_name; + WHERE h.table_name = get_create_command.table_name; - IF h_id IS NULL THEN + IF v_count = 0 THEN RAISE EXCEPTION 'hypertable "%" not found', table_name USING ERRCODE = 'TS101'; END IF; + SELECT h.id, h.schema_name + FROM _timescaledb_catalog.hypertable AS h + WHERE h.table_name = get_create_command.table_name + INTO h_id, schema_name; + SELECT COUNT(*) FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = h_id -- Gitee From 268f5fb3aa63abf83822015038669a848da4e301 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Sat, 16 Dec 2023 17:23:20 +0800 Subject: [PATCH 132/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20get=5Fpartition=5F?= =?UTF-8?q?for=5Fkey()=E8=8E=B7=E5=8F=96=E7=9A=84=E5=80=BC=E4=B8=8D?= =?UTF-8?q?=E5=87=86=E7=A1=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 5 ++++- contrib/timescaledb/og-timescaledb1.7.4.sql | 13 +++++++++---- contrib/timescaledb/src/partitioning.cpp | 3 +-- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index df89cf53a..a2dda2cc3 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -23,12 +23,15 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ ### 1.3.1. 一般性é™åˆ¶ +- 在兼容pg库下创建æ’ä»¶ +- chunkåŠŸèƒ½æš‚ä¸æ”¯æŒ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› - TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; - 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› + # **2.** TimescaleDB安装方法 @@ -128,7 +131,7 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, | 12 | create_hypertable()创建超表 | 创建超表 | | 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | | 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | -| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš” | +| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | | 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | | 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | | 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 3157b2632..28b5b1086 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -813,16 +813,21 @@ DECLARE dimension_row record; ret TEXT; BEGIN - SELECT h.id, h.schema_name + SELECT COUNT(*) + INTO v_count FROM _timescaledb_catalog.hypertable AS h - WHERE h.table_name = get_create_command.table_name - INTO h_id, schema_name; + WHERE h.table_name = get_create_command.table_name; - IF h_id IS NULL THEN + IF v_count = 0 THEN RAISE EXCEPTION 'hypertable "%" not found', table_name USING ERRCODE = 'TS101'; END IF; + SELECT h.id, h.schema_name + FROM _timescaledb_catalog.hypertable AS h + WHERE h.table_name = get_create_command.table_name + INTO h_id, schema_name; + SELECT COUNT(*) FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = h_id diff --git a/contrib/timescaledb/src/partitioning.cpp b/contrib/timescaledb/src/partitioning.cpp index c2a55ba08..49b496a52 100644 --- a/contrib/timescaledb/src/partitioning.cpp +++ b/contrib/timescaledb/src/partitioning.cpp @@ -161,8 +161,7 @@ find_text_coercion_func(Oid type) */ cpt = find_coercion_pathway(TEXTOID, type, COERCION_EXPLICIT, &funcid); - if (cpt != COERCION_PATH_FUNC) - getTypeOutputInfo(type, &funcid, &is_varlena); + getTypeOutputInfo(type, &funcid, &is_varlena); return funcid; } -- Gitee From 46f86e5cf4a87fcef537b88d6b9fc7d9c4d7efcd Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Sat, 16 Dec 2023 17:23:20 +0800 Subject: [PATCH 133/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=A0=87=E5=87=86?= =?UTF-8?q?=E8=A1=A8=E6=8C=87=E5=AE=9A=E8=A1=A8=E7=A9=BA=E9=97=B4=E5=90=8E?= =?UTF-8?q?=E5=88=9B=E5=BB=BA=E8=B6=85=E8=A1=A8=E5=A4=B1=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 5 ++++- contrib/timescaledb/og-timescaledb1.7.4.sql | 13 +++++++++---- contrib/timescaledb/src/hypertable.cpp | 3 --- contrib/timescaledb/src/partitioning.cpp | 3 +-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index df89cf53a..a2dda2cc3 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -23,12 +23,15 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ ### 1.3.1. 一般性é™åˆ¶ +- 在兼容pg库下创建æ’ä»¶ +- chunkåŠŸèƒ½æš‚ä¸æ”¯æŒ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› - TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; - 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› + # **2.** TimescaleDB安装方法 @@ -128,7 +131,7 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, | 12 | create_hypertable()创建超表 | 创建超表 | | 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | | 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | -| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš” | +| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | | 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | | 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | | 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 3157b2632..28b5b1086 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -813,16 +813,21 @@ DECLARE dimension_row record; ret TEXT; BEGIN - SELECT h.id, h.schema_name + SELECT COUNT(*) + INTO v_count FROM _timescaledb_catalog.hypertable AS h - WHERE h.table_name = get_create_command.table_name - INTO h_id, schema_name; + WHERE h.table_name = get_create_command.table_name; - IF h_id IS NULL THEN + IF v_count = 0 THEN RAISE EXCEPTION 'hypertable "%" not found', table_name USING ERRCODE = 'TS101'; END IF; + SELECT h.id, h.schema_name + FROM _timescaledb_catalog.hypertable AS h + WHERE h.table_name = get_create_command.table_name + INTO h_id, schema_name; + SELECT COUNT(*) FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = h_id diff --git a/contrib/timescaledb/src/hypertable.cpp b/contrib/timescaledb/src/hypertable.cpp index 6f4e563c3..2df5127ba 100644 --- a/contrib/timescaledb/src/hypertable.cpp +++ b/contrib/timescaledb/src/hypertable.cpp @@ -1516,9 +1516,6 @@ insert_blocker_trigger_add(Oid relid) */ objaddr = CreateTriggerCompat(&stmt, NULL, relid, InvalidOid, InvalidOid, InvalidOid, false,0); - if (!OidIsValid(objaddr.objectId)) - elog(ERROR, "could not create insert blocker trigger"); - return objaddr.objectId; } diff --git a/contrib/timescaledb/src/partitioning.cpp b/contrib/timescaledb/src/partitioning.cpp index c2a55ba08..49b496a52 100644 --- a/contrib/timescaledb/src/partitioning.cpp +++ b/contrib/timescaledb/src/partitioning.cpp @@ -161,8 +161,7 @@ find_text_coercion_func(Oid type) */ cpt = find_coercion_pathway(TEXTOID, type, COERCION_EXPLICIT, &funcid); - if (cpt != COERCION_PATH_FUNC) - getTypeOutputInfo(type, &funcid, &is_varlena); + getTypeOutputInfo(type, &funcid, &is_varlena); return funcid; } -- Gitee From c056a8bb0058195f27b87bc366a92aeeb72f9c09 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Sat, 16 Dec 2023 17:23:20 +0800 Subject: [PATCH 134/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=88=A0=E9=99=A4?= =?UTF-8?q?=E8=B6=85=E8=A1=A8=E5=90=8E=EF=BC=8C=E7=B3=BB=E7=BB=9F=E8=A1=A8?= =?UTF-8?q?=E4=B8=AD=E8=AF=A5=E8=B6=85=E8=A1=A8=E7=9A=84=E4=BF=A1=E6=81=AF?= =?UTF-8?q?=E5=AD=98=E5=9C=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 5 ++++- contrib/timescaledb/og-timescaledb1.7.4.sql | 13 +++++++++---- contrib/timescaledb/src/hypertable.cpp | 3 --- contrib/timescaledb/src/partitioning.cpp | 3 +-- contrib/timescaledb/src/process_utility.cpp | 10 +--------- 5 files changed, 15 insertions(+), 19 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index df89cf53a..a2dda2cc3 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -23,12 +23,15 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ ### 1.3.1. 一般性é™åˆ¶ +- 在兼容pg库下创建æ’ä»¶ +- chunkåŠŸèƒ½æš‚ä¸æ”¯æŒ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› - TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; - 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› + # **2.** TimescaleDB安装方法 @@ -128,7 +131,7 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, | 12 | create_hypertable()创建超表 | 创建超表 | | 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | | 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | -| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš” | +| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | | 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | | 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | | 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 3157b2632..28b5b1086 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -813,16 +813,21 @@ DECLARE dimension_row record; ret TEXT; BEGIN - SELECT h.id, h.schema_name + SELECT COUNT(*) + INTO v_count FROM _timescaledb_catalog.hypertable AS h - WHERE h.table_name = get_create_command.table_name - INTO h_id, schema_name; + WHERE h.table_name = get_create_command.table_name; - IF h_id IS NULL THEN + IF v_count = 0 THEN RAISE EXCEPTION 'hypertable "%" not found', table_name USING ERRCODE = 'TS101'; END IF; + SELECT h.id, h.schema_name + FROM _timescaledb_catalog.hypertable AS h + WHERE h.table_name = get_create_command.table_name + INTO h_id, schema_name; + SELECT COUNT(*) FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = h_id diff --git a/contrib/timescaledb/src/hypertable.cpp b/contrib/timescaledb/src/hypertable.cpp index 6f4e563c3..2df5127ba 100644 --- a/contrib/timescaledb/src/hypertable.cpp +++ b/contrib/timescaledb/src/hypertable.cpp @@ -1516,9 +1516,6 @@ insert_blocker_trigger_add(Oid relid) */ objaddr = CreateTriggerCompat(&stmt, NULL, relid, InvalidOid, InvalidOid, InvalidOid, false,0); - if (!OidIsValid(objaddr.objectId)) - elog(ERROR, "could not create insert blocker trigger"); - return objaddr.objectId; } diff --git a/contrib/timescaledb/src/partitioning.cpp b/contrib/timescaledb/src/partitioning.cpp index c2a55ba08..49b496a52 100644 --- a/contrib/timescaledb/src/partitioning.cpp +++ b/contrib/timescaledb/src/partitioning.cpp @@ -161,8 +161,7 @@ find_text_coercion_func(Oid type) */ cpt = find_coercion_pathway(TEXTOID, type, COERCION_EXPLICIT, &funcid); - if (cpt != COERCION_PATH_FUNC) - getTypeOutputInfo(type, &funcid, &is_varlena); + getTypeOutputInfo(type, &funcid, &is_varlena); return funcid; } diff --git a/contrib/timescaledb/src/process_utility.cpp b/contrib/timescaledb/src/process_utility.cpp index 73f2926e0..bafa622d7 100644 --- a/contrib/timescaledb/src/process_utility.cpp +++ b/contrib/timescaledb/src/process_utility.cpp @@ -912,15 +912,7 @@ process_drop_hypertable(ProcessUtilityArgs *args, DropStmt *stmt) errmsg("dropping compressed hypertables not supported"), errhint("Please drop the corresponding uncompressed hypertable " "instead."))); - - if (stmt->behavior != DROP_CASCADE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Dropping hypertables without CASCADE is not supported"), - errhint("Please use drop hypertable.... CASCADE " - "instead."))); - /* * We need to drop hypertable chunks before the hypertable to avoid the need * to CASCADE such drops; @@ -937,7 +929,7 @@ process_drop_hypertable(ProcessUtilityArgs *args, DropStmt *stmt) ts_hypertable_drop(compressed_hypertable, DROP_CASCADE); } #ifdef OG30 - if (stmt->behavior == DROP_CASCADE && !TS_HYPERTABLE_HAS_COMPRESSION(ht)) + if (!TS_HYPERTABLE_HAS_COMPRESSION(ht)) { ts_hypertable_drop(ht, DROP_CASCADE); handled = true; -- Gitee From 5e6676c0c0efcc2fa8753b160bbdb1a110855c39 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Sat, 16 Dec 2023 17:23:20 +0800 Subject: [PATCH 135/434] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=9C=AA=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=8E=9F=E5=9B=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 92 ++++++++++++++++++++- contrib/timescaledb/og-timescaledb1.7.4.sql | 13 ++- contrib/timescaledb/src/hypertable.cpp | 3 - contrib/timescaledb/src/partitioning.cpp | 3 +- contrib/timescaledb/src/process_utility.cpp | 10 +-- 5 files changed, 102 insertions(+), 19 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index df89cf53a..05fdba25f 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -23,6 +23,8 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ ### 1.3.1. 一般性é™åˆ¶ +- 在兼容pg库下创建æ’ä»¶ +- chunkåŠŸèƒ½æš‚ä¸æ”¯æŒ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› @@ -133,4 +135,92 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, | 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | | 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | | 19 | timescaledb_information.license获å–许å¯ä¿¡æ¯ | èŽ·å–æœ‰å…³å½“å‰è®¸å¯è¯çš„ä¿¡æ¯ | -| 20 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | \ No newline at end of file +| 20 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | + + +# **4.** TimescaleDBä¸å¯ç”¨æŽ¥å£ + +## **4.1** 压缩相关函数 + +**æè¿°ï¼š** 在TimescaleDBå†…éƒ¨ä½¿ç”¨ï¼Œç”¨äºŽç®¡ç†æ—¶åºæ•°æ®çš„压缩和解压缩,当数æ®è¢«æ‘„å–ã€å­˜å‚¨ã€æ£€ç´¢æˆ–在系统内部传输时。TimescaleDB使用压缩æ¥é™ä½Žå­˜å‚¨æˆæœ¬å¹¶æé«˜æŸ¥è¯¢æ€§èƒ½ã€‚ +**ä¸å¯å®žçŽ°åŽŸå› ï¼š** compress相关函数中使用到了B 树索引ã€JSONB 和范围类型以åŠè¡¨åˆ†åŒºåŠŸèƒ½ï¼Œå¯¹äºŽ TimescaleDB 的压缩æ¥è¯´æ˜¯å¿…需的。 PostgreSQL 9.6 没有必è¦çš„åŸºç¡€å‡½æ•°æ¥æ”¯æŒè¿™äº›é«˜çº§åŠŸèƒ½ã€‚ 因此,TimescaleDB çš„åŽ‹ç¼©åŠŸèƒ½éœ€è¦ PostgreSQL 11 或更高版本æ‰èƒ½è¿è¡Œã€‚从而在适é…openGauss时也没有这些基本函数 + + +| Functions | +|---------------------------------------------| +| _timescaledb_internal.compressed_data_in | +| _timescaledb_internal.compressed_data_out | +| _timescaledb_internal.compressed_data_send | +| _timescaledb_internal.compressed_data_recv | +| _timescaledb_catalog.compression_algorithm | +| _timescaledb_catalog.compression_chunk_size | +| _timescaledb_catalog.hypertable_compression | + +## **4.2** 触å‘器相关函数 + +**æè¿°ï¼š** 触å‘器功能相关 +**ä¸å¯å®žçŽ°åŽŸå› ï¼š** ä¸€é˜¶æ®µé€‚é…æ—¶è§¦å‘器功能缺少的函数和结构体太多,在二阶段æ‰èƒ½æ”¯æŒ + +| Functions | +|---------------------------------------------| +| _timescaledb_internal.insert_blocker | +| _timescaledb_internal.continuous_agg_invalidation_trigger | +| _timescaledb_internal.process_ddl_event | + +## **4.3** èšåˆå‡½æ•° + +**æè¿°ï¼š** 这些函数是TimescaleDB内部使用的èšåˆå‡½æ•°ç»„件,主è¦ç”¨äºŽè¿žç»­èšåˆæŸ¥è¯¢çš„实现。 +**ä¸å¯å®žçŽ°åŽŸå› ï¼š** pgçš„èšåˆå‡½æ•°ä¸Žogçš„ç›¸å…³å‚æ•°ä¸åŒï¼Œå¦‚æžœè¦ä¿è¯å’ŒPG一致,对于OG内核的修改太大,对于具体ä¸åŒç‚¹ï¼Œå·²ç»åœ¨ã€Šèšåˆå‡½æ•°ç›¸å…³é—®é¢˜.docx》中进行了详细说明 + +| Functions | +|---------------------------------------------| +| _timescaledb_internal.first_sfunc | +| _timescaledb_internal.first_combinefunc | +| _timescaledb_internal.last_sfunc | +| _timescaledb_internal.last_combinefunc | +| _timescaledb_internal.bookend_finalfunc | +| _timescaledb_internal.bookend_serializefunc | +| _timescaledb_internal.bookend_deserializefunc | +| _timescaledb_internal.hist_sfunc | +| _timescaledb_internal.hist_combinefunc | +| _timescaledb_internal.hist_serializefunc | +| _timescaledb_internal.hist_deserializefunc | +| _timescaledb_internal.hist_finalfunc | +| _timescaledb_internal.partialize_agg | +| _timescaledb_internal.finalize_agg_sfunc | +| _timescaledb_internal.finalize_agg_ffunc | +| _timescaledb_internal.cagg_watermark | +| _timescaledb_internal.finalize_agg | + +## **4.4** License相关 + +**æè¿°ï¼š** ä¼ä¸šçº§TimescaleDB相关功能函数 +**ä¸å¯å®žçŽ°åŽŸå› ï¼š** 这些函数需è¦license支æŒï¼Œæˆ‘们适é…çš„ä»£ç æ˜¯ç¤¾åŒºç‰ˆæœ¬çš„TimescaleDBï¼Œå¦‚æžœè¦æ”¯æŒè¿™äº›å‡½æ•°éœ€è¦ä¼ä¸šç‰ˆæœ¬çš„TimescaleDBæºç  + +| Functions | +|---------------------------------------------| +| _timescaledb_internal.current_db_set_license_key | +| _timescaledb_internal.hypertable_constraint_add_table_fk_constraint | +| _timescaledb_internal.enterprise_enabled | +| _timescaledb_internal.tsl_loaded | + +## **4.5** BGW(Background Worker)相关 + +**æè¿°ï¼š** 这些函数与TimescaleDB中的åŽå°å·¥ä½œå’Œç­–略管ç†ç›¸å…³ï¼Œæ¯”如é‡å¯åŽå°å·¥ä½œè¿›ç¨‹ï¼Œæä¾›æœ‰å…³å—策略执行统计的信æ¯ï¼Œä¸»è¦ç”¨äºŽTimescaleDB内部维护和æ“作数æ®åº“çš„åŽå°ä»»åŠ¡å’Œç›¸å…³ç­–ç•¥ï¼Œé€šå¸¸ä¸ç›´æŽ¥æš´éœ²ç»™æœ€ç»ˆç”¨æˆ· +**ä¸å¯å®žçŽ°åŽŸå› ï¼š** PostgreSQL采用的是基于进程的架构,而openGauss采用的是基于线程的架构。在基于进程的架构中,独立的åŽå°è¿›ç¨‹ï¼ˆå¦‚åŽå°å·¥ä½œè€…,或 BGW)å¯ä»¥ç”¨äºŽæ‰§è¡Œå®šæ—¶ä»»åŠ¡å’Œå¹¶è¡Œæ“ä½œã€‚è€Œåœ¨åŸºäºŽçº¿ç¨‹çš„æž¶æž„ä¸­ï¼Œè¿™äº›ä»»åŠ¡é€šå¸¸ç”±çº¿ç¨‹è€Œä¸æ˜¯è¿›ç¨‹æ¥å¤„ç†ï¼Œå› æ­¤äºŒè€…差别较大。 + +| Functions | +|----------------------------------------------------| +| _timescaledb_internal.restart_background_workers | +| _timescaledb_internal.stop_background_workers | +| _timescaledb_internal.start_background_workers | +| timescaledb_information.continuous_aggregate_stats | +| timescaledb_information.continuous_aggregates | +| _timescaledb_config.bgw_job | +| _timescaledb_config.bgw_policy_compress_chunks | +| _timescaledb_config.bgw_policy_drop_chunks | +| _timescaledb_config.bgw_policy_reorder | +| _timescaledb_cache.cache_inval_bgw_job | +| _timescaledb_config.bgw_job_id_seq | +| _timescaledb_internal.bgw_job_stat | +| _timescaledb_internal.bgw_policy_chunk_stats | \ No newline at end of file diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 3157b2632..28b5b1086 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -813,16 +813,21 @@ DECLARE dimension_row record; ret TEXT; BEGIN - SELECT h.id, h.schema_name + SELECT COUNT(*) + INTO v_count FROM _timescaledb_catalog.hypertable AS h - WHERE h.table_name = get_create_command.table_name - INTO h_id, schema_name; + WHERE h.table_name = get_create_command.table_name; - IF h_id IS NULL THEN + IF v_count = 0 THEN RAISE EXCEPTION 'hypertable "%" not found', table_name USING ERRCODE = 'TS101'; END IF; + SELECT h.id, h.schema_name + FROM _timescaledb_catalog.hypertable AS h + WHERE h.table_name = get_create_command.table_name + INTO h_id, schema_name; + SELECT COUNT(*) FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = h_id diff --git a/contrib/timescaledb/src/hypertable.cpp b/contrib/timescaledb/src/hypertable.cpp index 6f4e563c3..2df5127ba 100644 --- a/contrib/timescaledb/src/hypertable.cpp +++ b/contrib/timescaledb/src/hypertable.cpp @@ -1516,9 +1516,6 @@ insert_blocker_trigger_add(Oid relid) */ objaddr = CreateTriggerCompat(&stmt, NULL, relid, InvalidOid, InvalidOid, InvalidOid, false,0); - if (!OidIsValid(objaddr.objectId)) - elog(ERROR, "could not create insert blocker trigger"); - return objaddr.objectId; } diff --git a/contrib/timescaledb/src/partitioning.cpp b/contrib/timescaledb/src/partitioning.cpp index c2a55ba08..49b496a52 100644 --- a/contrib/timescaledb/src/partitioning.cpp +++ b/contrib/timescaledb/src/partitioning.cpp @@ -161,8 +161,7 @@ find_text_coercion_func(Oid type) */ cpt = find_coercion_pathway(TEXTOID, type, COERCION_EXPLICIT, &funcid); - if (cpt != COERCION_PATH_FUNC) - getTypeOutputInfo(type, &funcid, &is_varlena); + getTypeOutputInfo(type, &funcid, &is_varlena); return funcid; } diff --git a/contrib/timescaledb/src/process_utility.cpp b/contrib/timescaledb/src/process_utility.cpp index 73f2926e0..bafa622d7 100644 --- a/contrib/timescaledb/src/process_utility.cpp +++ b/contrib/timescaledb/src/process_utility.cpp @@ -912,15 +912,7 @@ process_drop_hypertable(ProcessUtilityArgs *args, DropStmt *stmt) errmsg("dropping compressed hypertables not supported"), errhint("Please drop the corresponding uncompressed hypertable " "instead."))); - - if (stmt->behavior != DROP_CASCADE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Dropping hypertables without CASCADE is not supported"), - errhint("Please use drop hypertable.... CASCADE " - "instead."))); - /* * We need to drop hypertable chunks before the hypertable to avoid the need * to CASCADE such drops; @@ -937,7 +929,7 @@ process_drop_hypertable(ProcessUtilityArgs *args, DropStmt *stmt) ts_hypertable_drop(compressed_hypertable, DROP_CASCADE); } #ifdef OG30 - if (stmt->behavior == DROP_CASCADE && !TS_HYPERTABLE_HAS_COMPRESSION(ht)) + if (!TS_HYPERTABLE_HAS_COMPRESSION(ht)) { ts_hypertable_drop(ht, DROP_CASCADE); handled = true; -- Gitee From c0c75f1f8b8d220de4f4a4da7bc83380d2c43513 Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Mon, 18 Dec 2023 19:26:26 +0800 Subject: [PATCH 136/434] sync ef69af7604dfbbca67e9986803a9ca5e9e9593f3 --- contrib/dolphin/include/builtin_funcs.ini | 12 ++++++++ .../dolphin/include/plugin_nodes/parsenodes.h | 2 ++ contrib/dolphin/include/plugin_utils/fmgr.h | 3 +- contrib/dolphin/plugin_parser/analyze.cpp | 29 +++++++++---------- .../dolphin/plugin_parser/parse_utilcmd.cpp | 7 ++++- contrib/dolphin/plugin_utility.cpp | 9 +++++- .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 27 +++++++++++++++-- contrib/dolphin/plugin_utils/fmgr/fmgr.cpp | 5 ++-- 8 files changed, 71 insertions(+), 23 deletions(-) diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index b4101a0f2..67af70fd5 100755 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -9788,6 +9788,18 @@ AddFuncGroup( "pt_contained_poly", 1, AddBuiltinFunc(_0(1429), _1("pt_contained_poly"), _2(2), _3(true), _4(false), _5(pt_contained_poly), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 600, 604), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pt_contained_poly"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "pubddl_decode", 1, + AddBuiltinFunc(_0(4648), _1("pubddl_decode"), _2(1), _3(true), _4(false), _5(pubddl_decode), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 20), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pubddl_decode"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "publication_deparse_ddl_command_end", 1, + AddBuiltinFunc(_0(4642), _1("publication_deparse_ddl_command_end"), _2(2), _3(true), _4(false), _5(publication_deparse_ddl_command_end), _6(3838), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("publication_deparse_ddl_command_end"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "publication_deparse_ddl_command_start", 1, + AddBuiltinFunc(_0(4643), _1("publication_deparse_ddl_command_start"), _2(2), _3(true), _4(false), _5(publication_deparse_ddl_command_start), _6(3838), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("publication_deparse_ddl_command_start"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "pv_builtin_functions", 1, AddBuiltinFunc(_0(5345), _1("pv_builtin_functions"), _2(0), _3(false), _4(true), _5(pv_builtin_functions), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(3100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(32, 19, 26, 26, 26, 700, 700, 26, 24, 16, 16, 16, 16, 16, 16, 18, 21, 21, 26, 30, 1007, 1002, 1009, 194, 25, 25, 1009, 1034, 22, 16, 16, 16, 26), _22(32, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(32, "proname", "pronamespace", "proowner", "prolang", "procost", "prorows", "provariadic", "protransform", "proisagg", "proiswindow", "prosecdef", "proleakproof", "proisstrict", "proretset", "provolatile", "pronargs", "pronargdefaults", "prorettype", "proargtypes", "proallargtypes", "proargmodes", "proargnames", "proargdefaults", "prosrc", "probin", "proconfig", "proacl", "prodefaultargpos", "fencedmode", "proshippable", "propackage", "oid"), _24(NULL), _25("pv_builtin_functions"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index 9d7936c37..68a5c8119 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -1143,6 +1143,7 @@ typedef struct CreateSeqStmt { bool canCreateTempSeq; /* create sequence when "create table (like )" */ bool is_large; bool missing_ok; /* skip error if a Sequence is exists */ + bool is_autoinc; } CreateSeqStmt; typedef struct AlterSeqStmt { @@ -1154,6 +1155,7 @@ typedef struct AlterSeqStmt { bool is_serial; /* Indicates if this sequence is part of SERIAL process */ #endif bool is_large; /* Indicates if this is a large or normal sequence */ + bool is_autoinc; } AlterSeqStmt; /* ---------------------- diff --git a/contrib/dolphin/include/plugin_utils/fmgr.h b/contrib/dolphin/include/plugin_utils/fmgr.h index bc464b799..63cd6bb7e 100644 --- a/contrib/dolphin/include/plugin_utils/fmgr.h +++ b/contrib/dolphin/include/plugin_utils/fmgr.h @@ -457,8 +457,7 @@ typedef const Pg_magic_struct* (*PGModuleMagicFunction)(void); * are allowed to be NULL. */ extern Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1, bool can_ignore = false); -extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, - bool can_ignore = false); +extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, bool can_ignore = false); extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, bool can_ignore = false); extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index 6236235d2..0a7fc7e59 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -4370,30 +4370,29 @@ static void MergeTargetList(List** targetLists, RangeTblEntry* rte1, int rtindex targetLists[rtindex2 - 1] = NULL; } -static void transformMultiTargetList(List* target_rangetblentry, List** targetLists) +static void transformMultiTargetList(List* target_rangetblentry, List** targetLists, List* result_relations) { - int rtindex1 = 1, rtindex2 = 1; - ListCell* l1; - ListCell* l2; - if (list_length(target_rangetblentry) <= 1) { return; } - foreach (l1, target_rangetblentry) { + + ListCell *l1 = NULL; + ListCell *l2 = NULL; + forboth (l1, target_rangetblentry, l2, result_relations) { RangeTblEntry* rte1 = (RangeTblEntry*)lfirst(l1); - rtindex2 = 0; + int rtindex1 = lfirst_int(l2); + ListCell *l3 = lnext(l1); + ListCell *l4 = lnext(l2); - l2 = lnext(l1); - rtindex2 = rtindex1 + 1; - while (l2 != NULL) { - RangeTblEntry* rte2 = (RangeTblEntry*)lfirst(l2); + while (l3 && l4) { + RangeTblEntry* rte2 = (RangeTblEntry*)lfirst(l3); + int rtindex2 = lfirst_int(l4); if (rte2->relid == rte1->relid) { MergeTargetList(targetLists, rte1, rtindex1, rte2, rtindex2); } - rtindex2++; - l2 = lnext(l2); + l3 = lnext(l3); + l4 = lnext(l4); } - rtindex1++; } } @@ -4820,7 +4819,7 @@ static List* transformUpdateTargetList(ParseState* pstate, List* qryTlist, List* * If there are actually the same result relations by different alias * or synonym in multiple update, merge their targetLists. */ - transformMultiTargetList(pstate->p_target_rangetblentry, new_tle); + transformMultiTargetList(pstate->p_target_rangetblentry, new_tle, resultRelations); if (targetRelationNum == 1) { int i = linitial_int(resultRelations); diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index cab39c9ee..25f96ee83 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -1150,7 +1150,7 @@ static void createSeqOwnedByTable(CreateStmtContext* cxt, ColumnDef* column, boo seqstmt = makeNode(CreateSeqStmt); seqstmt->sequence = makeRangeVar(snamespace, sname, -1); seqstmt->options = is_autoinc ? GetAutoIncSeqOptions(cxt) : NULL; - + seqstmt->is_autoinc = is_autoinc; #ifdef PGXC seqstmt->is_serial = true; #endif @@ -1199,6 +1199,7 @@ static void createSeqOwnedByTable(CreateStmtContext* cxt, ColumnDef* column, boo attnamelist = list_make3(makeString(snamespace), makeString(cxt->relation->relname), makeString(column->colname)); altseqstmt->options = list_make1(makeDefElem("owned_by", (Node*)attnamelist)); altseqstmt->is_large = large; + altseqstmt->is_autoinc = is_autoinc; cxt->alist = lappend(cxt->alist, altseqstmt); @@ -4165,6 +4166,10 @@ static IndexStmt* transformIndexConstraint(Constraint* constraint, CreateStmtCon ReleaseSysCache(tup_idx); index->indexOid = index_oid; + + /* save the original index name, it wll be replace by constraint */ + DefElem *def = makeDefElem("origin_indexname", (Node*)makeString(constraint->indexname)); + index->options = lappend(index->options, def); } /* diff --git a/contrib/dolphin/plugin_utility.cpp b/contrib/dolphin/plugin_utility.cpp index f9b717e81..78a773a14 100644 --- a/contrib/dolphin/plugin_utility.cpp +++ b/contrib/dolphin/plugin_utility.cpp @@ -2328,7 +2328,8 @@ void CreateCommand(CreateStmt *parse_tree, const char *query_string, ParamListIn true, #endif /* PGXC */ NULL, - is_top_level ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY, + //is_top_level ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY, + PROCESS_UTILITY_SUBCOMMAND, isCTAS); } @@ -5145,6 +5146,12 @@ ProcessUtilitySlow(Node *parse_tree, ObjectAddress address; ObjectAddress secondaryObject = InvalidObjectAddress; + if (T_CreateStmt == nodeTag(parse_tree) && isCTAS) { + isCompleteQuery = true; + } else if (T_AlterTableStmt == nodeTag(parse_tree) && ((AlterTableStmt*)parse_tree)->fromCreate) { + isCompleteQuery = false; + } + /* All event trigger calls are done only when isCompleteQuery is true */ needCleanup = isCompleteQuery && EventTriggerBeginCompleteQuery(); diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index 268f3ff91..b879b7f60 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -32,6 +32,7 @@ #include "catalog/pg_type.h" #include "catalog/pg_partition_fn.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_publication.h" #include "commands/dbcommands.h" #include "commands/user.h" #include "commands/vacuum.h" @@ -3421,6 +3422,28 @@ Datum locktag_decode(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(result); } +Datum pubddl_decode(PG_FUNCTION_ARGS) +{ + int64 pubddl = DatumGetInt64(PG_GETARG_DATUM(0)); + StringInfoData tmpbuf; + initStringInfo(&tmpbuf); + if (pubddl == PUBDDL_NONE) { + appendStringInfo(&tmpbuf, "%s", "none"); + } else if (pubddl == PUBDDL_ALL) { + appendStringInfo(&tmpbuf, "%s", "all"); + } else { + bool first = true; + if (ENABLE_PUBDDL_TYPE(pubddl, PUBDDL_TABLE)) { + appendStringInfo(&tmpbuf, "%s", "table"); + first = false; + } + } + text *result = cstring_to_text(tmpbuf.data); + + FreeStringInfo(&tmpbuf); + PG_RETURN_TEXT_P(result); +} + Datum working_version_num(PG_FUNCTION_ARGS) { PG_RETURN_UINT32(t_thrd.proc->workingVersionNum); @@ -15419,8 +15442,8 @@ void fill_drc_info_to_values(dv_drc_buf_info *drc_info, Datum *values) values[13] = UInt32GetDatum((uint32)drc_info->recovery_skip); values[14] = UInt32GetDatum((uint32)drc_info->recycling); values[15] = UInt32GetDatum((uint32)drc_info->converting_req_info_inst_id); - values[16] = UInt32GetDatum((uint32)drc_info->converting_req_info_curr_mod); - values[17] = UInt32GetDatum((uint32)drc_info->converting_req_info_req_mod); + values[16] = UInt32GetDatum((uint32)drc_info->converting_req_info_curr_mode); + values[17] = UInt32GetDatum((uint32)drc_info->converting_req_info_req_mode); } Datum query_all_drc_info(PG_FUNCTION_ARGS) diff --git a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp index 434a55e44..98f49f0e2 100644 --- a/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp +++ b/contrib/dolphin/plugin_utils/fmgr/fmgr.cpp @@ -1322,7 +1322,7 @@ Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum fcinfo.argnull[0] = false; fcinfo.argnull[1] = false; fcinfo.can_ignore = can_ignore; - + result = (*func)(&fcinfo); /* Check for null result, since caller is clearly not expecting one */ @@ -1390,7 +1390,8 @@ Datum DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum } Datum DirectFunctionCall5Coll( - PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, bool can_ignore) + PGFunction func, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4, Datum arg5, + bool can_ignore) { FunctionCallInfoData fcinfo; Datum result; -- Gitee From aa76eef657ca4a20bcc65e4a9e486595e3219edf Mon Sep 17 00:00:00 2001 From: Mijamind Date: Mon, 18 Dec 2023 15:35:05 +0800 Subject: [PATCH 137/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E6=B7=BB=E5=8A=A0remoteQuery=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E8=8E=B7=E5=8F=96libcomm=E7=AB=AF=E5=8F=A3=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/src/guc_spq.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 529c7330c..96866de82 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1559,7 +1559,7 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) if (!SplitIdentifierString(rawname, ',', &nodelist)) { pfree(rawname); /* syntax error in name list */ - GUC_check_errdetail("spq cluster map is invalid, name|ip|port|oid|cport|sport,..."); + GUC_check_errdetail("spq cluster map is invalid, name|ip|port,..."); MemoryContextSwitchTo(oldContext); return false; } @@ -1569,11 +1569,11 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) foreach_cell(lnode, nodelist) { node = &nodesDefinition[idx]; List *itemlist; - char *name, *ip, *port, *nodeoid, *ctlport, *sctpport; + char *name, *ip, *port; char *nodestring = pstrdup((char *)lfirst(lnode)); (void)SplitIdentifierString(nodestring, '|', &itemlist); - if (list_length(itemlist) != 6) { - GUC_check_errdetail("spq cluster map is invalid, name|ip|port|oid|cport|sport,..."); + if (list_length(itemlist) != 3) { + GUC_check_errdetail("spq cluster map is invalid, name|ip|port,..."); pfree(rawname); pfree(nodestring); list_free(nodelist); @@ -1585,9 +1585,6 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) name = (char *)list_nth(itemlist, 0); ip = (char *)list_nth(itemlist, 1); port = (char *)list_nth(itemlist, 2); - nodeoid = (char *)list_nth(itemlist, 3); - ctlport = (char *)list_nth(itemlist, 4); - sctpport = (char *)list_nth(itemlist, 5); node->nodeid = idx; rc = strncpy_s(node->nodename.data, NAMEDATALEN, name, NAMEDATALEN); securec_check_c(rc, "\0", "\0"); @@ -1596,9 +1593,6 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) rc = strncpy_s(node->nodehost1.data, NAMEDATALEN, ip, NAMEDATALEN); securec_check_c(rc, "\0", "\0"); node->nodeport = (int)strtol(port, NULL, 10); - node->nodeoid = (Oid)strtol(nodeoid, NULL, 10); - node->nodectlport = (int)strtol(ctlport, NULL, 10); - node->nodesctpport = (int)strtol(sctpport, NULL, 10); idx++; pfree(nodestring); list_free(itemlist); -- Gitee From 2002c48435420ff59cd1ddd4ed20d228899465e6 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Wed, 20 Dec 2023 14:57:23 +0800 Subject: [PATCH 138/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D5.0.1=E5=8D=87?= =?UTF-8?q?=E7=BA=A75.1.1=E5=A4=B1=E8=B4=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_parser/parse_coerce.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index 56c01e46e..f2eed8fda 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -3371,7 +3371,9 @@ int findNumTimeFunctionIdx(Oid typeId) Oid findNumTimeExplicitCastFunction(Oid sourceTypeId, Oid funcid) { int idx = findNumTimeFunctionIdx(sourceTypeId); - return (idx == INVALID_IDX) ? funcid : get_func_oid(numCastTimeFunction[idx], PG_CATALOG_NAMESPACE, NULL); + Oid cast_oid = (idx == INVALID_IDX) ? InvalidOid : + get_func_oid(numCastTimeFunction[idx], PG_CATALOG_NAMESPACE, NULL); + return (cast_oid != InvalidOid) ? cast_oid : funcid; } int findEnumFunctionIdx(Oid typeId) -- Gitee From 74a4d5526bfa3bb6526b033f90cbf2014ed9bbda Mon Sep 17 00:00:00 2001 From: lukeman Date: Wed, 20 Dec 2023 10:44:30 +0800 Subject: [PATCH 139/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5server=E4=BB=93?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/include/builtin_funcs.ini | 2 +- .../include/plugin_parser/parse_relation.h | 2 +- contrib/dolphin/plugin_catalog/heap.cpp | 42 +++- .../dolphin/plugin_parser/parse_relation.cpp | 215 +++++++++++++++++ .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 74 ++---- contrib/dolphin/tablecmds.cpp | 166 +++++++++++++- .../include/plugin_parser/parse_relation.h | 2 +- .../whale/plugin_parser/parse_relation.cpp | 216 ++++++++++++++++++ contrib/whale/tablecmds.cpp | 166 +++++++++++++- 9 files changed, 809 insertions(+), 76 deletions(-) diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index 67af70fd5..5affd20ce 100755 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -12973,7 +12973,7 @@ AddFuncGroup( ), AddFuncGroup( "query_node_reform_info", 1, - AddBuiltinFunc(_0(2867), _1("query_node_reform_info"), _2(3), _3(true), _4(true), _5(query_node_reform_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(64), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(10, INT4OID, TEXTOID, TEXTOID, TEXTOID, BOOLOID, TEXTOID, TEXTOID, INT4OID, TEXTOID, TEXTOID), _22(10,'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "reform_node_id", "reform_type", "reform_start_time", "reform_end_time", "is_reform_success", "redo_start_time", "redo_end_time", "xlog_total_bytes", "hashmap_construct_time", "action"), _24(NULL), _25("query_node_reform_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33("query node reform information"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(2867), _1("query_node_reform_info"), _2(3), _3(true), _4(true), _5(query_node_reform_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(64), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(10, INT4OID, TEXTOID, TIMESTAMPTZOID, TIMESTAMPTZOID, BOOLOID, TIMESTAMPTZOID, TIMESTAMPTZOID, INT4OID, TIMESTAMPTZOID, TEXTOID), _22(10,'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "reform_node_id", "reform_type", "reform_start_time", "reform_end_time", "is_reform_success", "redo_start_time", "redo_end_time", "xlog_total_bytes", "hashmap_construct_time", "action"), _24(NULL), _25("query_node_reform_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33("query node reform information"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "query_page_distribution_info", 1, diff --git a/contrib/dolphin/include/plugin_parser/parse_relation.h b/contrib/dolphin/include/plugin_parser/parse_relation.h index fba5f35f1..92abe771e 100644 --- a/contrib/dolphin/include/plugin_parser/parse_relation.h +++ b/contrib/dolphin/include/plugin_parser/parse_relation.h @@ -58,7 +58,7 @@ extern Oid attnumTypeId(Relation rd, int attid); extern Oid attnumCollationId(Relation rd, int attid); extern bool GetPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); extern bool GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); - +extern bool ValidateDependView(Oid view_oid, char objType); #ifdef PGXC extern int specialAttNum(const char* attname); #endif diff --git a/contrib/dolphin/plugin_catalog/heap.cpp b/contrib/dolphin/plugin_catalog/heap.cpp index 415bfbc79..2fa14edaf 100644 --- a/contrib/dolphin/plugin_catalog/heap.cpp +++ b/contrib/dolphin/plugin_catalog/heap.cpp @@ -1012,6 +1012,7 @@ void InsertPgAttributeTuple(Relation pg_attribute_rel, Form_pg_attribute new_att /* at default, new fileld attinitdefval of pg_attribute is null. */ nulls[Anum_pg_attribute_attinitdefval - 1] = true; + nulls[Anum_pg_attribute_attdroppedname - 1] = true; tup = heap_form_tuple(RelationGetDescr(pg_attribute_rel), values, nulls); @@ -3390,6 +3391,13 @@ void RemoveAttributeById(Oid relid, AttrNumber attnum) Form_pg_attribute attStruct; char newattname[NAMEDATALEN]; bool isRedisDropColumn = false; + Datum values[Natts_pg_attribute] = { 0 }; + bool nulls[Natts_pg_attribute] = { 0 }; + bool replaces[Natts_pg_attribute] = { 0 }; + const int keyNum = 2; + ScanKeyData key[keyNum]; + SysScanDesc scan; + HeapTuple newatttuple; /* * Grab an exclusive lock on the target table, which we will NOT release @@ -3402,7 +3410,10 @@ void RemoveAttributeById(Oid relid, AttrNumber attnum) attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); - atttuple = SearchSysCacheCopy2(ATTNUM, ObjectIdGetDatum(relid), Int16GetDatum(attnum)); + ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); + ScanKeyInit(&key[1], Anum_pg_attribute_attnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(attnum)); + scan = systable_beginscan(attr_rel, AttributeRelidNumIndexId, true, SnapshotSelf, keyNum, key); + atttuple = systable_getnext(scan); if (!HeapTupleIsValid(atttuple)) /* shouldn't happen */ { Assert(0); @@ -3410,7 +3421,6 @@ void RemoveAttributeById(Oid relid, AttrNumber attnum) (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for attribute %d of relation %u", attnum, relid))); } - attStruct = (Form_pg_attribute)GETSTRUCT(atttuple); if (RelationIsRedistributeDest(rel) && (attnum > 0 && attnum == rel->rd_att->natts)) isRedisDropColumn = true; @@ -3422,9 +3432,10 @@ void RemoveAttributeById(Oid relid, AttrNumber attnum) } else { errno_t rc; /* Dropping user attributes is lots harder */ + attStruct = (Form_pg_attribute)GETSTRUCT(atttuple); /* Mark the attribute as dropped */ - attStruct->attisdropped = true; + values[Anum_pg_attribute_attisdropped - 1] = BoolGetDatum(true); /* * Set the type OID to invalid. A dropped attribute's type link @@ -3435,27 +3446,39 @@ void RemoveAttributeById(Oid relid, AttrNumber attnum) * atttypid to zero here as a means of catching code that incorrectly * expects it to be valid. */ - attStruct->atttypid = InvalidOid; + values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(InvalidOid); /* Remove any NOT NULL constraint the column may have */ - attStruct->attnotnull = false; + values[Anum_pg_attribute_attnotnull - 1] = BoolGetDatum(false); /* We don't want to keep stats for it anymore */ - attStruct->attstattarget = 0; + values[Anum_pg_attribute_attstattarget - 1] = Int32GetDatum(0); + + values[Anum_pg_attribute_attdroppedname - 1] = NameGetDatum(&(attStruct->attname)); /* * Change the column name to something that isn't likely to conflict */ + rc = snprintf_s(newattname, sizeof(newattname), sizeof(newattname) - 1, "........pg.dropped.%d........", attnum); securec_check_ss(rc, "\0", "\0"); - (void)namestrcpy(&(attStruct->attname), newattname); + values[Anum_pg_attribute_attname - 1] = NameGetDatum(newattname); + replaces[Anum_pg_attribute_attisdropped - 1] = true; + replaces[Anum_pg_attribute_atttypid - 1] = true; + replaces[Anum_pg_attribute_attnotnull - 1] = true; + replaces[Anum_pg_attribute_attstattarget - 1] = true; + replaces[Anum_pg_attribute_attdroppedname - 1] = true; + replaces[Anum_pg_attribute_attname - 1] = true; - simple_heap_update(attr_rel, &atttuple->t_self, atttuple); + newatttuple = heap_modify_tuple(atttuple, RelationGetDescr(attr_rel), values, nulls, replaces); + simple_heap_update(attr_rel, &newatttuple->t_self, newatttuple); /* keep the system catalog indexes current */ - CatalogUpdateIndexes(attr_rel, atttuple); + CatalogUpdateIndexes(attr_rel, newatttuple); + heap_freetuple_ext(newatttuple); } + systable_endscan(scan); /* * Because updating the pg_attribute row will trigger a relcache flush for @@ -8317,6 +8340,7 @@ HeapTuple heaptuple_from_pg_attribute(Relation pg_attribute_rel, /* at default, new fileld attinitdefval of pg_attribute is null. */ nulls[Anum_pg_attribute_attinitdefval - 1] = true; + nulls[Anum_pg_attribute_attdroppedname - 1] = true; return heap_form_tuple(RelationGetDescr(pg_attribute_rel), values, nulls); } diff --git a/contrib/dolphin/plugin_parser/parse_relation.cpp b/contrib/dolphin/plugin_parser/parse_relation.cpp index 7a96bf06d..c16bc876a 100644 --- a/contrib/dolphin/plugin_parser/parse_relation.cpp +++ b/contrib/dolphin/plugin_parser/parse_relation.cpp @@ -58,6 +58,9 @@ #include "plugin_optimizer/planner.h" #include "storage/tcap.h" #include "gs_ledger/ledger_utils.h" +#include "catalog/pg_object.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_rewrite.h" #ifdef ENABLE_MOT #include "storage/mot/jit_def.h" #endif @@ -997,7 +1000,210 @@ static void buildScalarFunctionAlias(Node* funcexpr, char* funcname, Alias* alia eref->colnames = list_make1(makeString(eref->aliasname)); } +static void CopyAttributeInfo(Form_pg_attribute newtuple, Form_pg_attribute oldtuple) +{ + newtuple->attnum = oldtuple->attnum; + newtuple->atttypid = oldtuple->atttypid; + newtuple->attlen = oldtuple->attlen; + newtuple->atttypmod = oldtuple->atttypmod; + // for matview + newtuple->attcollation = oldtuple->attcollation; + newtuple->attbyval = oldtuple->attbyval; + newtuple->attstorage = oldtuple->attstorage; +} + +static void CheckViewColumnExists(Oid view_oid, int2 attnum, Form_pg_attribute newtuple) +{ + HeapTuple tuple; + Form_pg_attribute form_attribute; + tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(view_oid), Int16GetDatum(attnum)); + if (!HeapTupleIsValid(tuple)) { + elog(ERROR, "catalog lookup failed for column %d of relation %u", attnum, view_oid); + } + form_attribute = (Form_pg_attribute)GETSTRUCT(tuple); + CopyAttributeInfo(newtuple, form_attribute); + ReleaseSysCache(tuple); +} +static bool CheckRelationColumnExists(Oid rel_oid, int2 attnum, Form_pg_attribute attrtuple) +{ + HeapTuple tuple; + Form_pg_attribute attForm; + tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(rel_oid), Int16GetDatum(attnum)); + if (!HeapTupleIsValid(tuple)) { + elog(ERROR, "catalog lookup failed for column %d of relation %u", attnum, rel_oid); + } + attForm = (Form_pg_attribute)GETSTRUCT(tuple); + if (!attForm->attisdropped) { + CopyAttributeInfo(attrtuple, attForm); + ReleaseSysCache(tuple); + return true; + } + const char* droppedname = attForm->attdroppedname.data; + HeapTuple tuple_drop; + Form_pg_attribute attForm_drop; + tuple_drop = SearchSysCache2(ATTNAME, ObjectIdGetDatum(rel_oid), CStringGetDatum(droppedname)); + ReleaseSysCache(tuple); + if (!HeapTupleIsValid(tuple_drop)) { + return false; + } + attForm_drop = (Form_pg_attribute)GETSTRUCT(tuple_drop); + CopyAttributeInfo(attrtuple, attForm_drop); + ReleaseSysCache(tuple_drop); + return true; +} + +static void CheckPgAttribute(Oid obj_oid, char* attName, Form_pg_attribute new_attribute) +{ + const int keyNum = 2; + Relation rel; + ScanKeyData key[keyNum]; + SysScanDesc scan; + HeapTuple tuple; + HeapTuple new_dep_tuple; + Form_pg_attribute attForm; + rel = heap_open(AttributeRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(obj_oid)); + ScanKeyInit(&key[1], Anum_pg_attribute_attname, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(attName)); + scan = systable_beginscan(rel, AttributeRelidNameIndexId, true, SnapshotSelf, keyNum, &key[0]); + tuple = systable_getnext(scan); + if (!HeapTupleIsValid(tuple)) { + systable_endscan(scan); + heap_close(rel, RowExclusiveLock); + elog(ERROR, "catalog lookup failed for column %s of relation %u", attName, obj_oid); + } + attForm = (Form_pg_attribute)GETSTRUCT(tuple); + Datum values[Natts_pg_attribute] = { 0 }; + bool nulls[Natts_pg_attribute] = { 0 }; + bool replaces[Natts_pg_attribute] = { 0 }; + values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(new_attribute->atttypid); + values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(new_attribute->attlen); + values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(new_attribute->atttypmod); + values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(new_attribute->attbyval); + values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(new_attribute->attstorage); + values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(new_attribute->attcollation); + replaces[Anum_pg_attribute_atttypid - 1] = true; + replaces[Anum_pg_attribute_attlen - 1] = true; + replaces[Anum_pg_attribute_atttypmod - 1] = true; + replaces[Anum_pg_attribute_attbyval - 1] = true; + replaces[Anum_pg_attribute_attstorage - 1] = true; + replaces[Anum_pg_attribute_attcollation - 1] = true; + new_dep_tuple = heap_modify_tuple(tuple, RelationGetDescr(rel), values, nulls, replaces); + simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rel, new_dep_tuple); + heap_freetuple_ext(new_dep_tuple); + CommandCounterIncrement(); + systable_endscan(scan); + heap_close(rel, RowExclusiveLock); +} + +bool ValidateDependView(Oid view_oid, char objType) +{ + bool isValid = true; + Oid rw_objid = InvalidOid; + // 1. filter the valid view + if (GetPgObjectValid(view_oid, objType)) { + return isValid; + } + // 2. find pg_rewrite entry which this view depends on internally + const int keyNum = 2; + ScanKeyData key[keyNum]; + SysScanDesc scan = NULL; + HeapTuple tup = NULL; + Relation rel = heap_open(DependRelationId, AccessShareLock); + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(view_oid)); + scan = systable_beginscan(rel, DependReferenceIndexId, true, NULL, keyNum, key); + while (HeapTupleIsValid((tup = systable_getnext(scan)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup); + if (depform->classid == RewriteRelationId && depform->deptype == DEPENDENCY_INTERNAL) { + rw_objid = depform->objid; + break; + } + } + systable_endscan(scan); + heap_close(rel, AccessShareLock); + if (!OidIsValid(rw_objid)) { + elog(ERROR, "cannot find the internal dependent pg_rewrite entry."); + } + // 3. find all columns of parent views and tables which this view depends on directly, + // and check their validity recursively. + List *query_str = NIL; + ScanKeyData key_dep[keyNum]; + SysScanDesc scan_dep = NULL; + HeapTuple tup_dep = NULL; + Relation rel_dep = heap_open(DependRelationId, RowExclusiveLock); + ScanKeyInit(&key_dep[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RewriteRelationId)); + ScanKeyInit(&key_dep[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rw_objid)); + scan_dep = systable_beginscan(rel_dep, DependDependerIndexId, true, NULL, keyNum, key_dep); + Form_pg_attribute newtuple = (Form_pg_attribute)palloc0(sizeof(FormData_pg_attribute)); + while (HeapTupleIsValid((tup_dep = systable_getnext(scan_dep)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup_dep); + if (depform->refclassid != RelationRelationId || depform->deptype != DEPENDENCY_NORMAL || + depform->refobjsubid == 0) { + continue; + } + Oid dep_objid = depform->refobjid; + int2 dep_objsubid = depform->refobjsubid; + char relkind = get_rel_relkind(dep_objid); + char* attName = NULL; + if (relkind == RELKIND_RELATION) { + // the column exists, and its type may have changed or it mat have been deleted and recreated + isValid &= CheckRelationColumnExists(dep_objid, dep_objsubid, newtuple); + if (newtuple->attnum > 0) { + // change pg_depend + Datum values[Natts_pg_depend] = { 0 }; + bool nulls[Natts_pg_depend] = { 0 }; + bool replaces[Natts_pg_depend] = { 0 }; + HeapTuple new_dep_tuple; + values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newtuple->attnum); + replaces[Anum_pg_depend_refobjsubid - 1] = true; + new_dep_tuple = heap_modify_tuple(tup_dep, RelationGetDescr(rel_dep), values, nulls, replaces); + simple_heap_update(rel_dep, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rel_dep, new_dep_tuple); + heap_freetuple_ext(new_dep_tuple); + CommandCounterIncrement(); + // change pg_rewrite targetEntry + CheckPgRewriteWithDroppedColumn(dep_objid, rw_objid, newtuple, dep_objsubid, &attName, &query_str); + // change pg_attribute + CheckPgAttribute(view_oid, attName, newtuple); + } + } else if (relkind == RELKIND_VIEW || relkind == RELKIND_MATVIEW) { + isValid &= ValidateDependView(dep_objid, + relkind == RELKIND_VIEW ? OBJECT_TYPE_VIEW : OBJECT_TYPE_MATVIEW); + if (isValid) { + // here means dep_objid is valid, we should keep the same view_oid.attr with dep_objid.dep_objsubid + // find dep_objid.dep_objsubid + CheckViewColumnExists(dep_objid, dep_objsubid, newtuple); + // change pg_rewrite targetEntry + CheckPgRewriteWithDroppedColumn(dep_objid, rw_objid, newtuple, dep_objsubid, &attName, &query_str); + // change pg_attribute + CheckPgAttribute(view_oid, attName, newtuple); + } + } + errno_t rc = memset_s(newtuple, sizeof(FormData_pg_attribute), 0, sizeof(FormData_pg_attribute)); + securec_check_c(rc, "\0", "\0"); + pfree_ext(attName); + if (!isValid) { + pfree_ext(newtuple); + systable_endscan(scan_dep); + heap_close(rel_dep, RowExclusiveLock); + return false; + } + } + pfree_ext(newtuple); + systable_endscan(scan_dep); + heap_close(rel_dep, RowExclusiveLock); + // 4. mark the current view valid + SetPgObjectValid(view_oid, objType, true); + /* create or replace view */ + if (objType == OBJECT_TYPE_VIEW) { + ReplaceViewQueryFirstAfter(query_str); + } + return isValid; +} /* * Open a table during parse analysis @@ -1154,6 +1360,15 @@ Relation parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockm errmsg("relation \"%s\" has data only in database \"postgres\"", relation->relname), errhint("please use database \"postgres\""))); } + if (RelationGetRelkind(rel) == RELKIND_VIEW && + RelationGetRelid(rel) >= FirstNormalObjectId && + !ValidateDependView(RelationGetRelid(rel), OBJECT_TYPE_VIEW)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The view %s is invalid, please make it valid before operation.", + RelationGetRelationName(rel)), + errhint("Please re-add missing table fields."))); + } if (!u_sess->attr.attr_common.XactReadOnly && rel->rd_id == UserStatusRelationId) { TryUnlockAllAccounts(); diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index b879b7f60..1595958e9 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -14952,38 +14952,18 @@ TupleDesc create_query_node_reform_info_tupdesc() TupleDesc tupdesc = CreateTemplateTupleDesc(column, false); TupleDescInitEntry(tupdesc, (AttrNumber)1, "reform_node_id", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)2, "reform_type", TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)3, "reform_start_time", TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)4, "reform_end_time", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "reform_start_time", TIMESTAMPTZOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "reform_end_time", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)5, "is_reform_success", BOOLOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)6, "redo_start_time", TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)7, "rode_end_time", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "redo_start_time", TIMESTAMPTZOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "rode_end_time", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)8, "xlog_total_bytes", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)9, "hashmap_construct_time", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)9, "hashmap_construct_time", TIMESTAMPTZOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)10, "action", TEXTOID, -1, 0); BlessTupleDesc(tupdesc); return tupdesc; } -/* -* @Description : Convert timeval to string -* @in : time -* @out : buffer -*/ -void timeval_to_string(timeval time, char* buffer, int buf_size) -{ - if (buffer == NULL || buf_size == 0 || time.tv_sec == 0) { - return; - } - time_t format_time = time.tv_sec; - struct tm *p_time = localtime(&format_time); - - char tmp_buf[32] = {0}; - strftime(tmp_buf, sizeof(tmp_buf), "%Y-%m-%d %H:%M:%S", p_time); - errno_t rc = sprintf_s(buffer, buf_size - 1, "%s.%ld ", tmp_buf, time.tv_usec / 1000); - securec_check_ss(rc, "\0", "\0"); -} - - typedef struct { uint64 changed_inst_list; uint64 stable_inst_list; @@ -15034,6 +15014,7 @@ Datum query_node_reform_info(PG_FUNCTION_ARGS) #define MAX_BUF_SIZE 256 char tmp_buf[MAX_BUF_SIZE] = {0}; Datum values[10]; + bool nulls[10] = {false}; values[0] = UInt16GetDatum(i); if (i == (uint64)SS_MY_INST_ID) { switch (reform_info.reform_type) { @@ -15050,41 +15031,34 @@ Datum query_node_reform_info(PG_FUNCTION_ARGS) values[1] = CStringGetTextDatum("NULL"); } - timeval_to_string(reform_info.reform_start_time, tmp_buf, MAX_BUF_SIZE); - values[2] = CStringGetTextDatum(tmp_buf); - - timeval_to_string(reform_info.reform_end_time, tmp_buf, MAX_BUF_SIZE); - values[3] = CStringGetTextDatum(tmp_buf); + values[2] = TimestampTzGetDatum(reform_info.reform_start_time); + values[3] = TimestampTzGetDatum(reform_info.reform_end_time); values[4] = BoolGetDatum(reform_info.reform_success); if (reform_info.reform_type == DMS_REFORM_TYPE_FOR_FAILOVER_OPENGAUSS) { - timeval_to_string(reform_info.redo_start_time, tmp_buf, MAX_BUF_SIZE); - values[5] = CStringGetTextDatum(tmp_buf); - - timeval_to_string(reform_info.redo_end_time, tmp_buf, MAX_BUF_SIZE); - values[6] = CStringGetTextDatum(tmp_buf); - + values[5] = TimestampTzGetDatum(reform_info.redo_start_time); + if (reform_info.redo_start_time > reform_info.redo_end_time) { + nulls[6] = true; + } else { + values[6] = TimestampTzGetDatum(reform_info.redo_end_time); + } values[7] = UInt64GetDatum(reform_info.redo_total_bytes); - - timeval_to_string(reform_info.construct_hashmap, tmp_buf, MAX_BUF_SIZE); - values[8] = CStringGetTextDatum(tmp_buf); + values[8] = TimestampTzGetDatum(reform_info.construct_hashmap); } else { - sprintf_s(tmp_buf, MAX_BUF_SIZE, "-"); - values[5] = CStringGetTextDatum(tmp_buf); - values[6] = CStringGetTextDatum(tmp_buf); values[7] = UInt64GetDatum(-1); - values[8] = CStringGetTextDatum(tmp_buf); + nulls[5] = true; + nulls[6] = true; + nulls[8] = true; } } else { values[1] = CStringGetTextDatum("-"); - sprintf_s(tmp_buf, MAX_BUF_SIZE, "-"); - values[2] = CStringGetTextDatum(tmp_buf); - values[3] = CStringGetTextDatum(tmp_buf); values[4] = BoolGetDatum(reform_info.reform_success); - values[5] = CStringGetTextDatum(tmp_buf); - values[6] = CStringGetTextDatum(tmp_buf); values[7] = UInt64GetDatum(-1); - values[8] = CStringGetTextDatum(tmp_buf); + nulls[2] = true; + nulls[3] = true; + nulls[5] = true; + nulls[6] = true; + nulls[8] = true; } if (iterate->changed_inst_list & (1 << i)) { @@ -15098,7 +15072,7 @@ Datum query_node_reform_info(PG_FUNCTION_ARGS) } values[9] = CStringGetTextDatum(tmp_buf); - bool nulls[10] = {false}; + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); if (tuple != NULL) { iterate->iterate_idx++; diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index d301f293e..22c09bc69 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -546,7 +546,6 @@ static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattn static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); static ViewInfoForAdd *GetViewInfoFirstAfter(const char *rel_name, Oid objid, bool keep_star = false); static List *CheckPgRewriteFirstAfter(Relation rel); -static void ReplaceViewQueryFirstAfter(List *query_str); static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, bool *has_depend); static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum); @@ -12198,12 +12197,94 @@ static List *CheckPgRewriteFirstAfter(Relation rel) return query_str; } +void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute attForm, + int2 old_attnum, char** attName, List **old_query_str) +{ + List *query_str = NIL; + ScanKeyData entry; + ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rw_oid)); + Relation rewrite_rel = heap_open(RewriteRelationId, RowExclusiveLock); + SysScanDesc rewrite_scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry); + HeapTuple rewrite_tup = systable_getnext(rewrite_scan); + if (!HeapTupleIsValid(rewrite_tup)) { + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); + return; + } + Form_pg_rewrite rewrite_form = (Form_pg_rewrite)GETSTRUCT(rewrite_tup); + if (strcmp(NameStr(rewrite_form->rulename), ViewSelectRuleName) != 0) { + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); + return; + } + bool is_null = false; + Datum evActiomDatum = fastgetattr(rewrite_tup, Anum_pg_rewrite_ev_action, rewrite_rel->rd_att, &is_null); + if (!is_null) { + Datum values[Natts_pg_rewrite] = { 0 }; + bool nulls[Natts_pg_rewrite] = { 0 }; + bool replaces[Natts_pg_rewrite] = { 0 }; + char *evActionString = TextDatumGetCString(evActiomDatum); + List *evAction = (List *)stringToNode(evActionString); + Query* query = (Query*)linitial(evAction); + // change query targetEntry + ListCell* lc = NULL; + foreach (lc, query->targetList) { + TargetEntry* tle = (TargetEntry*)lfirst(lc); + if (nodeTag((Node*)tle->expr) == T_Var && tle->resorigtbl == rel_oid && + ((Var*)tle->expr)->varoattno == old_attnum) { + Var *var = (Var *)tle->expr; + var->varattno = attForm->attnum; + var->varoattno = attForm->attnum; + var->vartype = attForm->atttypid; + var->vartypmod = attForm->atttypmod; + *attName = pstrdup(tle->resname); + } + } + char* actiontree = nodeToString((Node*)evAction); + HeapTuple new_dep_tuple; + values[Anum_pg_rewrite_ev_action - 1] = CStringGetTextDatum(actiontree); + replaces[Anum_pg_rewrite_ev_action - 1] = true; + new_dep_tuple = heap_modify_tuple(rewrite_tup, RelationGetDescr(rewrite_rel), values, nulls, replaces); + simple_heap_update(rewrite_rel, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rewrite_rel, new_dep_tuple); + CommandCounterIncrement(); + StringInfoData buf; + initStringInfo(&buf); + Relation ev_relation = heap_open(rewrite_form->ev_class, AccessShareLock); + get_query_def(query, + &buf, + NIL, + RelationGetDescr(ev_relation), + 0, + -1, + 0, + false, + false, + NULL, + false, + false); + appendStringInfo(&buf, ";"); + ViewInfoForAdd * info = static_cast(palloc(sizeof(ViewInfoForAdd))); + info->ev_class = rewrite_form->ev_class; + info->query_string = pstrdup(buf.data); + heap_close(ev_relation, AccessShareLock); + FreeStringInfo(&buf); + query_str = lappend(query_str, info); + *old_query_str = query_str; + heap_freetuple_ext(new_dep_tuple); + pfree_ext(evActionString); + pfree_ext(actiontree); + } + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); +} + /* * create or replace view when the table has view. * 1. add column with first or after col_name. * 2. modify column to first or after column. */ -static void ReplaceViewQueryFirstAfter(List *query_str) +void ReplaceViewQueryFirstAfter(List *query_str) { if (query_str != NIL) { ListCell* viewinfo = NULL; @@ -16752,6 +16833,69 @@ static void UpdateNewvalsAttnum(AlteredTableInfo* tab, Relation rel, AlterTableC } } +bool InvalidateDependView(Oid viewOid, char objType) +{ + List* view_oid_list = NIL; + List *dep_oid_list = NIL; + // 1. filter the invalid view + if (!GetPgObjectValid(viewOid, objType)) { + return false; + } + // 2. find all views which depend on this view directly or indirectly + view_oid_list = lappend_oid(view_oid_list, viewOid); + dep_oid_list = lappend_oid(dep_oid_list, viewOid); + const int keyNum = 2; + ScanKeyData key[keyNum]; + SysScanDesc scan = NULL; + HeapTuple tup = NULL; + Relation dep_rel = heap_open(DependRelationId, AccessShareLock); + while (list_length(dep_oid_list) > 0) { + // (1) get dependent view oid + Oid objid = linitial_oid(dep_oid_list); + dep_oid_list = list_delete_first(dep_oid_list); + List *rw_oid_list = NIL; + ListCell *rw_cell = NULL; + // (2) find rw_objid of pg_rewrite entry from pg_depend by objid + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid)); + scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, keyNum, key); + while (HeapTupleIsValid((tup = systable_getnext(scan)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup); + Oid rw_objid = depform->objid; + if (depform->classid == RewriteRelationId && depform->deptype == DEPENDENCY_NORMAL + && !list_member_oid(rw_oid_list, rw_objid)) { + rw_oid_list = lappend_oid(rw_oid_list, rw_objid); + } + } + // (3) find dependent view oid from pg_rewrite by rw_objid + foreach(rw_cell, rw_oid_list) { + Oid rw_objid = lfirst_oid(rw_cell); + Oid dep_view_oid = get_rewrite_relid(rw_objid, true); + if (!OidIsValid(dep_view_oid) || dep_view_oid == objid) { + continue; + } + char relkind = get_rel_relkind(dep_view_oid); + if (relkind != RELKIND_VIEW && relkind != RELKIND_MATVIEW) { + continue; + } + dep_oid_list = lappend_oid(dep_oid_list, dep_view_oid); + view_oid_list = lappend_oid(view_oid_list, dep_view_oid); + } + list_free_ext(rw_oid_list); + systable_endscan(scan); + } + heap_close(dep_rel, AccessShareLock); + // 3. mark all dependent view invalid + ListCell *dep_cell = NULL; + foreach(dep_cell, view_oid_list) { + Oid depoid = lfirst_oid(dep_cell); + SetPgObjectValid(depoid, objType, false); + } + list_free_ext(view_oid_list); + list_free_ext(dep_oid_list); + return true; +} static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode) { @@ -17020,12 +17164,20 @@ static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, } break; - case OCLASS_REWRITE: + case OCLASS_REWRITE: { /* XXX someday see if we can cope with revising views */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type of a column used by a view or rule"), - errdetail("%s depends on column \"%s\"", getObjectDescription(&foundObject), colName))); - break; + Oid objOid = get_rewrite_relid(foundObject.objectId, false); + char relKind = get_rel_relkind(objOid); + if (relKind == RELKIND_VIEW || relKind == RELKIND_MATVIEW) { + (void)InvalidateDependView(objOid, + relKind == RELKIND_VIEW ? OBJECT_TYPE_VIEW : OBJECT_TYPE_MATVIEW); + } else { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot alter type of a column used by a relation kind %c", relKind), + errdetail("%s depends on column \"%s\"", getObjectDescription(&foundObject), colName))); + } + break; + } case OCLASS_TRIGGER: diff --git a/contrib/whale/include/plugin_parser/parse_relation.h b/contrib/whale/include/plugin_parser/parse_relation.h index fba5f35f1..92abe771e 100644 --- a/contrib/whale/include/plugin_parser/parse_relation.h +++ b/contrib/whale/include/plugin_parser/parse_relation.h @@ -58,7 +58,7 @@ extern Oid attnumTypeId(Relation rd, int attid); extern Oid attnumCollationId(Relation rd, int attid); extern bool GetPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); extern bool GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); - +extern bool ValidateDependView(Oid view_oid, char objType); #ifdef PGXC extern int specialAttNum(const char* attname); #endif diff --git a/contrib/whale/plugin_parser/parse_relation.cpp b/contrib/whale/plugin_parser/parse_relation.cpp index 8c6441dbc..0b4f64431 100644 --- a/contrib/whale/plugin_parser/parse_relation.cpp +++ b/contrib/whale/plugin_parser/parse_relation.cpp @@ -58,6 +58,9 @@ #include "optimizer/planner.h" #include "storage/tcap.h" #include "gs_ledger/ledger_utils.h" +#include "catalog/pg_object.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_rewrite.h" #ifdef ENABLE_MOT #include "storage/mot/jit_def.h" #endif @@ -978,7 +981,210 @@ static void buildScalarFunctionAlias(Node* funcexpr, char* funcname, Alias* alia eref->colnames = list_make1(makeString(eref->aliasname)); } +static void CopyAttributeInfo(Form_pg_attribute newtuple, Form_pg_attribute oldtuple) +{ + newtuple->attnum = oldtuple->attnum; + newtuple->atttypid = oldtuple->atttypid; + newtuple->attlen = oldtuple->attlen; + newtuple->atttypmod = oldtuple->atttypmod; + // for matview + newtuple->attcollation = oldtuple->attcollation; + newtuple->attbyval = oldtuple->attbyval; + newtuple->attstorage = oldtuple->attstorage; +} + +static void CheckViewColumnExists(Oid view_oid, int2 attnum, Form_pg_attribute newtuple) +{ + HeapTuple tuple; + Form_pg_attribute form_attribute; + tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(view_oid), Int16GetDatum(attnum)); + if (!HeapTupleIsValid(tuple)) { + elog(ERROR, "catalog lookup failed for column %d of relation %u", attnum, view_oid); + } + form_attribute = (Form_pg_attribute)GETSTRUCT(tuple); + CopyAttributeInfo(newtuple, form_attribute); + ReleaseSysCache(tuple); +} + +static bool CheckRelationColumnExists(Oid rel_oid, int2 attnum, Form_pg_attribute attrtuple) +{ + HeapTuple tuple; + Form_pg_attribute attForm; + tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(rel_oid), Int16GetDatum(attnum)); + if (!HeapTupleIsValid(tuple)) { + elog(ERROR, "catalog lookup failed for column %d of relation %u", attnum, rel_oid); + } + attForm = (Form_pg_attribute)GETSTRUCT(tuple); + if (!attForm->attisdropped) { + CopyAttributeInfo(attrtuple, attForm); + ReleaseSysCache(tuple); + return true; + } + const char* droppedname = attForm->attdroppedname.data; + HeapTuple tuple_drop; + Form_pg_attribute attForm_drop; + tuple_drop = SearchSysCache2(ATTNAME, ObjectIdGetDatum(rel_oid), CStringGetDatum(droppedname)); + ReleaseSysCache(tuple); + if (!HeapTupleIsValid(tuple_drop)) { + return false; + } + attForm_drop = (Form_pg_attribute)GETSTRUCT(tuple_drop); + CopyAttributeInfo(attrtuple, attForm_drop); + ReleaseSysCache(tuple_drop); + return true; +} +static void CheckPgAttribute(Oid obj_oid, char* attName, Form_pg_attribute new_attribute) +{ + const int keyNum = 2; + Relation rel; + ScanKeyData key[keyNum]; + SysScanDesc scan; + HeapTuple tuple; + HeapTuple new_dep_tuple; + Form_pg_attribute attForm; + rel = heap_open(AttributeRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], Anum_pg_attribute_attrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(obj_oid)); + ScanKeyInit(&key[1], Anum_pg_attribute_attname, BTEqualStrategyNumber, F_NAMEEQ, NameGetDatum(attName)); + scan = systable_beginscan(rel, AttributeRelidNameIndexId, true, SnapshotSelf, keyNum, &key[0]); + tuple = systable_getnext(scan); + if (!HeapTupleIsValid(tuple)) { + systable_endscan(scan); + heap_close(rel, RowExclusiveLock); + elog(ERROR, "catalog lookup failed for column %s of relation %u", attName, obj_oid); + } + attForm = (Form_pg_attribute)GETSTRUCT(tuple); + Datum values[Natts_pg_attribute] = { 0 }; + bool nulls[Natts_pg_attribute] = { 0 }; + bool replaces[Natts_pg_attribute] = { 0 }; + values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(new_attribute->atttypid); + values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(new_attribute->attlen); + values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(new_attribute->atttypmod); + values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(new_attribute->attbyval); + values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(new_attribute->attstorage); + values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(new_attribute->attcollation); + replaces[Anum_pg_attribute_atttypid - 1] = true; + replaces[Anum_pg_attribute_attlen - 1] = true; + replaces[Anum_pg_attribute_atttypmod - 1] = true; + replaces[Anum_pg_attribute_attbyval - 1] = true; + replaces[Anum_pg_attribute_attstorage - 1] = true; + replaces[Anum_pg_attribute_attcollation - 1] = true; + new_dep_tuple = heap_modify_tuple(tuple, RelationGetDescr(rel), values, nulls, replaces); + simple_heap_update(rel, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rel, new_dep_tuple); + heap_freetuple_ext(new_dep_tuple); + CommandCounterIncrement(); + systable_endscan(scan); + heap_close(rel, RowExclusiveLock); +} + +bool ValidateDependView(Oid view_oid, char objType) +{ + bool isValid = true; + Oid rw_objid = InvalidOid; + // 1. filter the valid view + if (GetPgObjectValid(view_oid, objType)) { + return isValid; + } + // 2. find pg_rewrite entry which this view depends on internally + const int keyNum = 2; + ScanKeyData key[keyNum]; + SysScanDesc scan = NULL; + HeapTuple tup = NULL; + Relation rel = heap_open(DependRelationId, AccessShareLock); + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(view_oid)); + scan = systable_beginscan(rel, DependReferenceIndexId, true, NULL, keyNum, key); + while (HeapTupleIsValid((tup = systable_getnext(scan)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup); + if (depform->classid == RewriteRelationId && depform->deptype == DEPENDENCY_INTERNAL) { + rw_objid = depform->objid; + break; + } + } + systable_endscan(scan); + heap_close(rel, AccessShareLock); + if (!OidIsValid(rw_objid)) { + elog(ERROR, "cannot find the internal dependent pg_rewrite entry."); + } + // 3. find all columns of parent views and tables which this view depends on directly, + // and check their validity recursively. + List *query_str = NIL; + ScanKeyData key_dep[keyNum]; + SysScanDesc scan_dep = NULL; + HeapTuple tup_dep = NULL; + Relation rel_dep = heap_open(DependRelationId, RowExclusiveLock); + ScanKeyInit(&key_dep[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RewriteRelationId)); + ScanKeyInit(&key_dep[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rw_objid)); + scan_dep = systable_beginscan(rel_dep, DependDependerIndexId, true, NULL, keyNum, key_dep); + Form_pg_attribute newtuple = (Form_pg_attribute)palloc0(sizeof(FormData_pg_attribute)); + while (HeapTupleIsValid((tup_dep = systable_getnext(scan_dep)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup_dep); + if (depform->refclassid != RelationRelationId || depform->deptype != DEPENDENCY_NORMAL || + depform->refobjsubid == 0) { + continue; + } + Oid dep_objid = depform->refobjid; + int2 dep_objsubid = depform->refobjsubid; + char relkind = get_rel_relkind(dep_objid); + char* attName = NULL; + if (relkind == RELKIND_RELATION) { + // the column exists, and its type may have changed or it mat have been deleted and recreated + isValid &= CheckRelationColumnExists(dep_objid, dep_objsubid, newtuple); + if (newtuple->attnum > 0) { + // change pg_depend + Datum values[Natts_pg_depend] = { 0 }; + bool nulls[Natts_pg_depend] = { 0 }; + bool replaces[Natts_pg_depend] = { 0 }; + HeapTuple new_dep_tuple; + values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(newtuple->attnum); + replaces[Anum_pg_depend_refobjsubid - 1] = true; + new_dep_tuple = heap_modify_tuple(tup_dep, RelationGetDescr(rel_dep), values, nulls, replaces); + simple_heap_update(rel_dep, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rel_dep, new_dep_tuple); + heap_freetuple_ext(new_dep_tuple); + CommandCounterIncrement(); + // change pg_rewrite targetEntry + CheckPgRewriteWithDroppedColumn(dep_objid, rw_objid, newtuple, dep_objsubid, &attName, &query_str); + // change pg_attribute + CheckPgAttribute(view_oid, attName, newtuple); + } + } else if (relkind == RELKIND_VIEW || relkind == RELKIND_MATVIEW) { + isValid &= ValidateDependView(dep_objid, + relkind == RELKIND_VIEW ? OBJECT_TYPE_VIEW : OBJECT_TYPE_MATVIEW); + if (isValid) { + // here means dep_objid is valid, we should keep the same view_oid.attr with dep_objid.dep_objsubid + // find dep_objid.dep_objsubid + CheckViewColumnExists(dep_objid, dep_objsubid, newtuple); + // change pg_rewrite targetEntry + CheckPgRewriteWithDroppedColumn(dep_objid, rw_objid, newtuple, dep_objsubid, &attName, &query_str); + // change pg_attribute + CheckPgAttribute(view_oid, attName, newtuple); + } + } + errno_t rc = memset_s(newtuple, sizeof(FormData_pg_attribute), 0, sizeof(FormData_pg_attribute)); + securec_check_c(rc, "\0", "\0"); + pfree_ext(attName); + if (!isValid) { + pfree_ext(newtuple); + systable_endscan(scan_dep); + heap_close(rel_dep, RowExclusiveLock); + return false; + } + } + pfree_ext(newtuple); + systable_endscan(scan_dep); + heap_close(rel_dep, RowExclusiveLock); + // 4. mark the current view valid + SetPgObjectValid(view_oid, objType, true); + /* create or replace view */ + if (objType == OBJECT_TYPE_VIEW) { + ReplaceViewQueryFirstAfter(query_str); + } + return isValid; +} /* * Open a table during parse analysis @@ -1136,6 +1342,16 @@ Relation parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockm errhint("please use database \"postgres\""))); } + if (RelationGetRelkind(rel) == RELKIND_VIEW && + RelationGetRelid(rel) >= FirstNormalObjectId && + !ValidateDependView(RelationGetRelid(rel), OBJECT_TYPE_VIEW)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The view %s is invalid, please make it valid before operation.", + RelationGetRelationName(rel)), + errhint("Please re-add missing table fields."))); + } + if (!u_sess->attr.attr_common.XactReadOnly && rel->rd_id == UserStatusRelationId) { TryUnlockAllAccounts(); } diff --git a/contrib/whale/tablecmds.cpp b/contrib/whale/tablecmds.cpp index 6e67f727d..4df677356 100644 --- a/contrib/whale/tablecmds.cpp +++ b/contrib/whale/tablecmds.cpp @@ -515,7 +515,6 @@ static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattn static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); static ViewInfoForAdd *GetViewInfoFirstAfter(const char *rel_name, Oid objid, bool keep_star = false); static List *CheckPgRewriteFirstAfter(Relation rel); -static void ReplaceViewQueryFirstAfter(List *query_str); static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, bool *has_depend); static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum); @@ -11863,12 +11862,94 @@ static List *CheckPgRewriteFirstAfter(Relation rel) return query_str; } +void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute attForm, + int2 old_attnum, char** attName, List **old_query_str) +{ + List *query_str = NIL; + ScanKeyData entry; + ScanKeyInit(&entry, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(rw_oid)); + Relation rewrite_rel = heap_open(RewriteRelationId, RowExclusiveLock); + SysScanDesc rewrite_scan = systable_beginscan(rewrite_rel, RewriteOidIndexId, true, NULL, 1, &entry); + HeapTuple rewrite_tup = systable_getnext(rewrite_scan); + if (!HeapTupleIsValid(rewrite_tup)) { + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); + return; + } + Form_pg_rewrite rewrite_form = (Form_pg_rewrite)GETSTRUCT(rewrite_tup); + if (strcmp(NameStr(rewrite_form->rulename), ViewSelectRuleName) != 0) { + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); + return; + } + bool is_null = false; + Datum evActiomDatum = fastgetattr(rewrite_tup, Anum_pg_rewrite_ev_action, rewrite_rel->rd_att, &is_null); + if (!is_null) { + Datum values[Natts_pg_rewrite] = { 0 }; + bool nulls[Natts_pg_rewrite] = { 0 }; + bool replaces[Natts_pg_rewrite] = { 0 }; + char *evActionString = TextDatumGetCString(evActiomDatum); + List *evAction = (List *)stringToNode(evActionString); + Query* query = (Query*)linitial(evAction); + // change query targetEntry + ListCell* lc = NULL; + foreach (lc, query->targetList) { + TargetEntry* tle = (TargetEntry*)lfirst(lc); + if (nodeTag((Node*)tle->expr) == T_Var && tle->resorigtbl == rel_oid && + ((Var*)tle->expr)->varoattno == old_attnum) { + Var *var = (Var *)tle->expr; + var->varattno = attForm->attnum; + var->varoattno = attForm->attnum; + var->vartype = attForm->atttypid; + var->vartypmod = attForm->atttypmod; + *attName = pstrdup(tle->resname); + } + } + char* actiontree = nodeToString((Node*)evAction); + HeapTuple new_dep_tuple; + values[Anum_pg_rewrite_ev_action - 1] = CStringGetTextDatum(actiontree); + replaces[Anum_pg_rewrite_ev_action - 1] = true; + new_dep_tuple = heap_modify_tuple(rewrite_tup, RelationGetDescr(rewrite_rel), values, nulls, replaces); + simple_heap_update(rewrite_rel, &new_dep_tuple->t_self, new_dep_tuple); + CatalogUpdateIndexes(rewrite_rel, new_dep_tuple); + CommandCounterIncrement(); + StringInfoData buf; + initStringInfo(&buf); + Relation ev_relation = heap_open(rewrite_form->ev_class, AccessShareLock); + get_query_def(query, + &buf, + NIL, + RelationGetDescr(ev_relation), + 0, + -1, + 0, + false, + false, + NULL, + false, + false); + appendStringInfo(&buf, ";"); + ViewInfoForAdd * info = static_cast(palloc(sizeof(ViewInfoForAdd))); + info->ev_class = rewrite_form->ev_class; + info->query_string = pstrdup(buf.data); + heap_close(ev_relation, AccessShareLock); + FreeStringInfo(&buf); + query_str = lappend(query_str, info); + *old_query_str = query_str; + heap_freetuple_ext(new_dep_tuple); + pfree_ext(evActionString); + pfree_ext(actiontree); + } + systable_endscan(rewrite_scan); + heap_close(rewrite_rel, RowExclusiveLock); +} + /* * create or replace view when the table has view. * 1. add column with first or after col_name. * 2. modify column to first or after column. */ -static void ReplaceViewQueryFirstAfter(List *query_str) +void ReplaceViewQueryFirstAfter(List *query_str) { if (query_str != NIL) { ListCell* viewinfo = NULL; @@ -16372,6 +16453,69 @@ static void UpdateNewvalsAttnum(AlteredTableInfo* tab, Relation rel, AlterTableC } } +bool InvalidateDependView(Oid viewOid, char objType) +{ + List* view_oid_list = NIL; + List *dep_oid_list = NIL; + // 1. filter the invalid view + if (!GetPgObjectValid(viewOid, objType)) { + return false; + } + // 2. find all views which depend on this view directly or indirectly + view_oid_list = lappend_oid(view_oid_list, viewOid); + dep_oid_list = lappend_oid(dep_oid_list, viewOid); + const int keyNum = 2; + ScanKeyData key[keyNum]; + SysScanDesc scan = NULL; + HeapTuple tup = NULL; + Relation dep_rel = heap_open(DependRelationId, AccessShareLock); + while (list_length(dep_oid_list) > 0) { + // (1) get dependent view oid + Oid objid = linitial_oid(dep_oid_list); + dep_oid_list = list_delete_first(dep_oid_list); + List *rw_oid_list = NIL; + ListCell *rw_cell = NULL; + // (2) find rw_objid of pg_rewrite entry from pg_depend by objid + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(objid)); + scan = systable_beginscan(dep_rel, DependReferenceIndexId, true, NULL, keyNum, key); + while (HeapTupleIsValid((tup = systable_getnext(scan)))) { + Form_pg_depend depform = (Form_pg_depend)GETSTRUCT(tup); + Oid rw_objid = depform->objid; + if (depform->classid == RewriteRelationId && depform->deptype == DEPENDENCY_NORMAL + && !list_member_oid(rw_oid_list, rw_objid)) { + rw_oid_list = lappend_oid(rw_oid_list, rw_objid); + } + } + // (3) find dependent view oid from pg_rewrite by rw_objid + foreach(rw_cell, rw_oid_list) { + Oid rw_objid = lfirst_oid(rw_cell); + Oid dep_view_oid = get_rewrite_relid(rw_objid, true); + if (!OidIsValid(dep_view_oid) || dep_view_oid == objid) { + continue; + } + char relkind = get_rel_relkind(dep_view_oid); + if (relkind != RELKIND_VIEW && relkind != RELKIND_MATVIEW) { + continue; + } + dep_oid_list = lappend_oid(dep_oid_list, dep_view_oid); + view_oid_list = lappend_oid(view_oid_list, dep_view_oid); + } + list_free_ext(rw_oid_list); + systable_endscan(scan); + } + heap_close(dep_rel, AccessShareLock); + // 3. mark all dependent view invalid + ListCell *dep_cell = NULL; + foreach(dep_cell, view_oid_list) { + Oid depoid = lfirst_oid(dep_cell); + SetPgObjectValid(depoid, objType, false); + } + list_free_ext(view_oid_list); + list_free_ext(dep_oid_list); + return true; +} static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode) { @@ -16640,12 +16784,20 @@ static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, } break; - case OCLASS_REWRITE: + case OCLASS_REWRITE: { /* XXX someday see if we can cope with revising views */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot alter type of a column used by a view or rule"), - errdetail("%s depends on column \"%s\"", getObjectDescription(&foundObject), colName))); - break; + Oid objOid = get_rewrite_relid(foundObject.objectId, false); + char relKind = get_rel_relkind(objOid); + if (relKind == RELKIND_VIEW || relKind == RELKIND_MATVIEW) { + (void)InvalidateDependView(objOid, + relKind == RELKIND_VIEW ? OBJECT_TYPE_VIEW : OBJECT_TYPE_MATVIEW); + } else { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot alter type of a column used by a relation kind %c", relKind), + errdetail("%s depends on column \"%s\"", getObjectDescription(&foundObject), colName))); + } + break; + } case OCLASS_TRIGGER: -- Gitee From d599bf15f312c96f2df007b16e0599fe8f3dc13c Mon Sep 17 00:00:00 2001 From: chenxiaobin19 <1025221611@qq.com> Date: Wed, 20 Dec 2023 19:52:49 +0800 Subject: [PATCH 140/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Djson=5Fobjectagg?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E5=AD=98=E5=9C=A8=E9=87=8D=E5=A4=8D=E9=94=AE?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/json_objectagg.out | 46 ++++++++++++++++++- .../dolphin/plugin_utils/adt/jsonfuncs.cpp | 13 +++++- contrib/dolphin/sql/json_objectagg.sql | 15 ++++++ 3 files changed, 72 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/expected/json_objectagg.out b/contrib/dolphin/expected/json_objectagg.out index 5df884b4e..54dfbf723 100644 --- a/contrib/dolphin/expected/json_objectagg.out +++ b/contrib/dolphin/expected/json_objectagg.out @@ -190,6 +190,50 @@ select pg_catalog.json_objectagg(1, cast(3765 as regdictionary)); {"1": "simple"} (1 row) +drop table if exists t_json0030; +NOTICE: table "t_json0030" does not exist, skipping +create table t_json0030(col01 int, col02 json not null); +insert ignore into t_json0030 values(1,'{"id": "3", "name": "Barney"}'), +(2,'{"id": "4", "name": "Betty"}'),(3,'{"id": "2", "name": "Wilma"}'), +(10,'[3.65,10,"true","x","44"]'),(10,'[3.65,10,"true",17,[22,"y",66]]'),(10,'[3.65,10,"true",7,{"x":"y"}]'), +(20,'"qwi<>23,。"'),(20,'"123â€â€œï¼#ï¿¥"'),(20,'"按时间JIwqe22"'), +(30,cast('true' as json)),(30,null),(30,cast('656964' as json)),(30,cast('56848.695' as json)); +WARNING: null value in column "col02" violates not-null constraint +DETAIL: Failing row contains (30, null). +select * from t_json0030 where col02 is null; + col01 | col02 +-------+------- +(0 rows) + +select json_objectagg(col01, col02) from t_json0030 group by col01; + json_objectagg +------------------------------------------- + {"1": {"id": "3", "name": "Barney"}} + {"3": {"id": "2", "name": "Wilma"}} + {"20": "按时间JIwqe22"} + {"10": [3.65, 10, "true", 7, {"x": "y"}]} + {"30": 56848.695} + {"2": {"id": "4", "name": "Betty"}} +(6 rows) + +create table t1 (a int, b int); +insert into t1 values (1,1), (2,2), (1,2); +select json_objectagg(a, b) from t1; + json_objectagg +------------------ + {"1": 2, "2": 2} +(1 row) + +select json_objectagg(a, b) from t1 group by a; + json_objectagg +---------------- + {"1": 2} + {"2": 2} +(2 rows) + drop schema json_objectagg_test cascade; -NOTICE: drop cascades to table city +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table city +drop cascades to table t_json0030 +drop cascades to table t1 reset current_schema; diff --git a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp index 4997773b9..65f715b42 100644 --- a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp @@ -6896,7 +6896,18 @@ Datum json_objectagg_mysql_transfn(PG_FUNCTION_ARGS) arg = PG_GETARG_DATUM(2); value = get_json_value(val_type, arg, typIsVarlena, typOutput); } - cJSON_AddItemToObject(state->root, keyString, value); + + char* pathString = (char *)palloc((strlen(keyString) + 5) * sizeof(char)); + rc = snprintf_s(pathString, strlen(keyString) + 5, strlen(keyString) + 4, "$.\"%s\"", keyString); + securec_check_ss_c(rc, "\0", "\0"); + int error_pos = -1; + cJSON_JsonPath *jp = jp_parse(pathString, error_pos); + pfree(pathString); + bool invalidPath = false; + /* If there are duplicate keys, replace value with new one */ + if (!cJSON_JsonReplace(state->root, jp, value, invalidPath)) { + cJSON_AddItemToObject(state->root, keyString, value); + } pfree(keyString); MemoryContextSwitchTo(oldcontext); diff --git a/contrib/dolphin/sql/json_objectagg.sql b/contrib/dolphin/sql/json_objectagg.sql index afd8a9c1b..2927871b7 100644 --- a/contrib/dolphin/sql/json_objectagg.sql +++ b/contrib/dolphin/sql/json_objectagg.sql @@ -71,5 +71,20 @@ select pg_catalog.json_objectagg(1, cast(23 as regtype)); select pg_catalog.json_objectagg(1, cast(3748 as regconfig)); select pg_catalog.json_objectagg(1, cast(3765 as regdictionary)); +drop table if exists t_json0030; +create table t_json0030(col01 int, col02 json not null); +insert ignore into t_json0030 values(1,'{"id": "3", "name": "Barney"}'), +(2,'{"id": "4", "name": "Betty"}'),(3,'{"id": "2", "name": "Wilma"}'), +(10,'[3.65,10,"true","x","44"]'),(10,'[3.65,10,"true",17,[22,"y",66]]'),(10,'[3.65,10,"true",7,{"x":"y"}]'), +(20,'"qwi<>23,。"'),(20,'"123â€â€œï¼#ï¿¥"'),(20,'"按时间JIwqe22"'), +(30,cast('true' as json)),(30,null),(30,cast('656964' as json)),(30,cast('56848.695' as json)); +select * from t_json0030 where col02 is null; +select json_objectagg(col01, col02) from t_json0030 group by col01; + +create table t1 (a int, b int); +insert into t1 values (1,1), (2,2), (1,2); +select json_objectagg(a, b) from t1; +select json_objectagg(a, b) from t1 group by a; + drop schema json_objectagg_test cascade; reset current_schema; -- Gitee From f9c69b1096da38710a3a36c2f9e2f30a78368d21 Mon Sep 17 00:00:00 2001 From: "wei.liu" Date: Thu, 21 Dec 2023 08:57:26 +0800 Subject: [PATCH 141/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=8D=E6=94=AF?= =?UTF-8?q?=E6=8C=81cidr=E7=B1=BB=E5=9E=8B=20cidr=20=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E8=BD=AC=E6=8D=A2=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/include/plugin_parser/kwlist.h | 1 + contrib/dolphin/plugin_parser/gram.y | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/include/plugin_parser/kwlist.h b/contrib/dolphin/include/plugin_parser/kwlist.h index 5b2375bc1..8291313ff 100644 --- a/contrib/dolphin/include/plugin_parser/kwlist.h +++ b/contrib/dolphin/include/plugin_parser/kwlist.h @@ -139,6 +139,7 @@ PG_KEYWORD("check", CHECK, RESERVED_KEYWORD) PG_KEYWORD("checkpoint", CHECKPOINT, UNRESERVED_KEYWORD) #ifdef DOLPHIN PG_KEYWORD("checksum", CHECKSUM, UNRESERVED_KEYWORD) +PG_KEYWORD("cidr", DB_B_CIDR, UNRESERVED_KEYWORD) PG_KEYWORD("circle", DB_B_CIRCLE, UNRESERVED_KEYWORD) #endif PG_KEYWORD("class", CLASS, UNRESERVED_KEYWORD) diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 5d6ce486f..1b51b12db 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -1271,7 +1271,7 @@ static inline Node* MakeSubLinkWithOp(SubLinkType subType, Node* testExpr, char* ZEROFILL ZONE - AST DB_B_JSON DB_B_JSONB DB_B_BOX DB_B_CIRCLE DB_B_POLYGON DB_B_BYTEA DB_B_TIMETZ DB_B_TIMESTAMPTZ DB_B_POINT + AST DB_B_JSON DB_B_JSONB DB_B_BOX DB_B_CIRCLE DB_B_POLYGON DB_B_BYTEA DB_B_TIMETZ DB_B_TIMESTAMPTZ DB_B_POINT DB_B_CIDR WEIGHT_STRING REVERSE %token ALGORITHM_UNDEFINED ALGORITHM_MERGE ALGORITHM_TEMPTABLE @@ -1370,7 +1370,7 @@ static inline Node* MakeSubLinkWithOp(SubLinkType subType, Node* testExpr, char* * blame any funny behavior of UNBOUNDED on the SQL standard, though. */ %nonassoc UNBOUNDED /* ideally should have same precedence as IDENT */ -%nonassoc IDENT GENERATED NULL_P PARTITION SUBPARTITION RANGE ROWS PRECEDING FOLLOWING CUBE ROLLUP DB_B_JSON DB_B_JSONB DB_B_BOX DB_B_CIRCLE DB_B_POLYGON DB_B_BYTEA DB_B_TIMETZ DB_B_TIMESTAMPTZ DB_B_POINT +%nonassoc IDENT GENERATED NULL_P PARTITION SUBPARTITION RANGE ROWS PRECEDING FOLLOWING CUBE ROLLUP DB_B_JSON DB_B_JSONB DB_B_BOX DB_B_CIRCLE DB_B_POLYGON DB_B_BYTEA DB_B_TIMETZ DB_B_TIMESTAMPTZ DB_B_POINT DB_B_CIDR %left Op OPERATOR '@' /* multi-character ops and user-defined operators */ %nonassoc NOTNULL %nonassoc ISNULL @@ -36820,6 +36820,10 @@ DOLPHINIDENT: IDENT { $$ = CreateDolphinIdent(pstrdup($1), false); } + | DB_B_CIDR + { + $$ = CreateDolphinIdent(pstrdup($1), false); + } ; AexprConst: @@ -37017,6 +37021,12 @@ AexprConst_without_Sconst: Iconst tmp->location = @1; $$ = makeStringConstCast($2, @2, tmp); } + | DB_B_CIDR SCONST + { + TypeName * tmp = SystemTypeName("point"); + tmp->location = @1; + $$ = makeStringConstCast($2, @2, tmp); + } /* promote and expand CharacterWithoutLength to AexprConst */ | CHARACTER VARYING SCONST { -- Gitee From c83fae8d98bf58a0c94064f54c49e9fa2be69b85 Mon Sep 17 00:00:00 2001 From: liruixiang <461834084@qq.com> Date: Tue, 12 Dec 2023 23:28:57 +0800 Subject: [PATCH 142/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dtreat=5Fbxconst=5Fas?= =?UTF-8?q?=5Fbinary=E9=80=89=E9=A1=B9=E4=B8=8B|=E6=93=8D=E4=BD=9C?= =?UTF-8?q?=E7=AC=A6=E4=BD=BF=E7=94=A8bxconst=E6=97=B6=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/bxconst_test.out | 464 ++++++++++++++++-- .../dolphin/expected/charset_utf8mb4_b_db.out | 13 +- .../rollback_script/dolphin--3.0--2.0.sql | 11 + contrib/dolphin/sql/bxconst_test.sql | 52 ++ contrib/dolphin/sql/charset_utf8mb4_b_db.sql | 2 +- .../upgrade_script/dolphin--2.0--3.0.sql | 18 +- 6 files changed, 502 insertions(+), 58 deletions(-) diff --git a/contrib/dolphin/expected/bxconst_test.out b/contrib/dolphin/expected/bxconst_test.out index 76ee124d9..8467042ad 100644 --- a/contrib/dolphin/expected/bxconst_test.out +++ b/contrib/dolphin/expected/bxconst_test.out @@ -1,105 +1,467 @@ -create schema bxconst_test; -set current_schema to 'bxconst_test'; - -set dolphin.b_compatibility_mode to on; - -create table t_bit(a bit(16)); -create table t_bin(a binary(6)); - -select b'11100000111000'; - ?column? +create schema bxconst_test; +set current_schema to 'bxconst_test'; + +set dolphin.b_compatibility_mode to on; + +-- å°†bxconst当作bitå¤„ç† +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero; + +create table t_bit(a bit(16)); +create table t_bin(a binary(6)); + +select b'11100000111000'; + ?column? ---------------- 11100000111000 (1 row) -select pg_typeof(b'11100000111000'); - pg_typeof +select pg_typeof(b'11100000111000'); + pg_typeof ----------- bit (1 row) -select x'4c'; - ?column? +select x'4c'; + ?column? ---------- 01001100 (1 row) -select pg_typeof(x'4c'); - pg_typeof +select pg_typeof(x'4c'); + pg_typeof ----------- bit (1 row) -insert into t_bit values(b'11100000111000'), (x'4c'); -insert into t_bin values(b'11100000111000'), (x'4c'); - -select * from t_bit; - a + +insert into t_bit values(b'11100000111000'), (x'4c'); +insert into t_bin values(b'11100000111000'), (x'4c'); + +select * from t_bit; + a ------------------ 0011100000111000 0000000001001100 (2 rows) -select * from t_bin; - a +select * from t_bin; + a ---------------- \x313433393200 \x373600000000 (2 rows) -drop table t_bit; -drop table t_bin; + +drop table t_bit; +drop table t_bin; + +select 0x01 | 2; + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::int1); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::int2); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::int4); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::int8); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::uint1); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::uint2); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::uint4); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::uint8); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::float4); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::float8); + ?column? +---------- + 3 +(1 row) + +select 0x01 | (2::numeric); + ?column? +---------- + 3 +(1 row) + +select 2 | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::int1) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::int2) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::int4) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::int8) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::uint1) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::uint2) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::uint4) | 0x01; + ?column? +---------- + 3 +(1 row) -set dolphin.sql_mode = treat_bxconst_as_binary; +select (2::uint8) | 0x01; + ?column? +---------- + 3 +(1 row) -create table t_bit(a bit(16)); -create table t_bin(a binary(6)); +select (2::float4) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::float8) | 0x01; + ?column? +---------- + 3 +(1 row) + +select (2::numeric) | 0x01; + ?column? +---------- + 3 +(1 row) + +select 0x01 | 0x02; + ?column? +---------- + 00000011 +(1 row) -select b'11100000111000'; - ?column? + +-- å°†bxconst当作binaryå¤„ç† +set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary; + +create table t_bit(a bit(16)); +create table t_bin(a binary(6)); + +select b'11100000111000'; + ?column? ---------- \x3838 (1 row) -select pg_typeof(b'11100000111000'); - pg_typeof +select pg_typeof(b'11100000111000'); + pg_typeof ----------- "binary" (1 row) -select x'4c'; - ?column? +select x'4c'; + ?column? ---------- \x4c (1 row) -select pg_typeof(x'4c'); - pg_typeof +select pg_typeof(x'4c'); + pg_typeof ----------- "binary" (1 row) -insert into t_bit values(b'11000100110001'), (x'3130'); -insert into t_bin values(b'11000100110001'), (x'3130'); - -select * from t_bit; - a + +insert into t_bit values(b'11100000111000'), (x'4c'); +insert into t_bin values(b'11100000111000'), (x'4c'); + +select * from t_bit; + a ------------------ - 0000000000000011 - 0000000000000010 + 0011100000111000 + 0000000001001100 (2 rows) -select * from t_bin; - a +select * from t_bin; + a ---------------- - \x313100000000 - \x313000000000 + \x383800000000 + \x4c0000000000 (2 rows) -drop table t_bit; -drop table t_bin; + +drop table t_bit; +drop table t_bin; + +select 0x01 | 2; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::int1); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::int2); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::int4); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::int8); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::uint1); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_uint8" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::uint2); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_uint8" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::uint4); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_uint8" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::uint8); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_uint8" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::float4); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::float8); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | (2::numeric); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_double" statement 1 + ?column? +---------- + 2 +(1 row) + +select 2 | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::int1) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::int2) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::int4) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::int8) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::uint1) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "uint8_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::uint2) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "uint8_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::uint4) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "uint8_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) -reset dolphin.sql_mode; +select (2::uint8) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "uint8_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::float4) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::float8) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select (2::numeric) | 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "double_or_binary" statement 1 + ?column? +---------- + 2 +(1 row) + +select 0x01 | 0x02; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_binary" statement 1 +WARNING: invalid input syntax for type numeric: "" +CONTEXT: SQL function "binary_or_binary" statement 1 + ?column? +---------- + 0 +(1 row) -drop schema bxconst_test cascade; + +reset dolphin.sql_mode; + +drop schema bxconst_test cascade; reset current_schema; diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index 8ff2c2c3b..338b9993c 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -145,11 +145,14 @@ SELECT _utf8mb4 B'111010011010101110011000111001101001011010101111高斯'; -- ER ERROR: "é" is not a valid binary digit LINE 1: SELECT _utf8mb4 B'111010011010101110011000111001101001011010... ^ -SELECT _binary X'000D' | X'0BC0'; -- ERROR not support yet -ERROR: operator does not exist: "varbinary" | bit -LINE 1: SELECT _binary X'000D' | X'0BC0'; - ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +SELECT _binary X'000D' | X'0BC0'; +WARNING: invalid input syntax for numeric: "" +CONTEXT: SQL function "binary_or_double" during inlining + ?column? +---------- + 3008 +(1 row) + -- ------------------------------------------ -- SET NAMES utf8mb4; SET NAMES 'utf8mb4'; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 40bbc5032..4a8b5d1a4 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -262,3 +262,14 @@ DROP FUNCTION IF EXISTS pg_catalog.hex(longblob); CREATE OR REPLACE FUNCTION pg_catalog.binary_out (binary) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; CREATE OR REPLACE FUNCTION pg_catalog.varbinary_out (varbinary) RETURNS cstring LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteaout'; + +DROP OPERATOR IF EXISTS pg_catalog.|(double precision, binary) CASCADE; +DROP OPERATOR IF EXISTS pg_catalog.|(binary, double precision) CASCADE; +DROP OPERATOR IF EXISTS pg_catalog.|(uint8, binary) CASCADE; +DROP OPERATOR IF EXISTS pg_catalog.|(binary, uint8) CASCADE; +DROP OPERATOR IF EXISTS pg_catalog.|(binary, binary) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.double_or_binary(double precision, binary) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.binary_or_double(binary, double precision) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.uint8_or_binary(uint8, binary) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.binary_or_uint8(binary, uint8) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.binary_or_binary(binary, binary) CASCADE; \ No newline at end of file diff --git a/contrib/dolphin/sql/bxconst_test.sql b/contrib/dolphin/sql/bxconst_test.sql index dfba4b06e..6aeb790e5 100644 --- a/contrib/dolphin/sql/bxconst_test.sql +++ b/contrib/dolphin/sql/bxconst_test.sql @@ -23,6 +23,32 @@ select * from t_bin; drop table t_bit; drop table t_bin; +select 0x01 | 2; +select 0x01 | (2::int1); +select 0x01 | (2::int2); +select 0x01 | (2::int4); +select 0x01 | (2::int8); +select 0x01 | (2::uint1); +select 0x01 | (2::uint2); +select 0x01 | (2::uint4); +select 0x01 | (2::uint8); +select 0x01 | (2::float4); +select 0x01 | (2::float8); +select 0x01 | (2::numeric); +select 2 | 0x01; +select (2::int1) | 0x01; +select (2::int2) | 0x01; +select (2::int4) | 0x01; +select (2::int8) | 0x01; +select (2::uint1) | 0x01; +select (2::uint2) | 0x01; +select (2::uint4) | 0x01; +select (2::uint8) | 0x01; +select (2::float4) | 0x01; +select (2::float8) | 0x01; +select (2::numeric) | 0x01; +select 0x01 | 0x02; + -- å°†bxconst当作binaryå¤„ç† set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary; @@ -43,6 +69,32 @@ select * from t_bin; drop table t_bit; drop table t_bin; +select 0x01 | 2; +select 0x01 | (2::int1); +select 0x01 | (2::int2); +select 0x01 | (2::int4); +select 0x01 | (2::int8); +select 0x01 | (2::uint1); +select 0x01 | (2::uint2); +select 0x01 | (2::uint4); +select 0x01 | (2::uint8); +select 0x01 | (2::float4); +select 0x01 | (2::float8); +select 0x01 | (2::numeric); +select 2 | 0x01; +select (2::int1) | 0x01; +select (2::int2) | 0x01; +select (2::int4) | 0x01; +select (2::int8) | 0x01; +select (2::uint1) | 0x01; +select (2::uint2) | 0x01; +select (2::uint4) | 0x01; +select (2::uint8) | 0x01; +select (2::float4) | 0x01; +select (2::float8) | 0x01; +select (2::numeric) | 0x01; +select 0x01 | 0x02; + reset dolphin.sql_mode; drop schema bxconst_test cascade; diff --git a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql index 2ec8d971f..86fee6963 100644 --- a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql +++ b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql @@ -37,7 +37,7 @@ SELECT _binary B'111010011010101110011000111001101001011010101111'; SELECT _utf8mb4 B'111010011010101110011000111001101001011010101111'; SELECT _gbk B'111010011010101110011000111001101001011010101111'; SELECT _utf8mb4 B'111010011010101110011000111001101001011010101111高斯'; -- ERROR -SELECT _binary X'000D' | X'0BC0'; -- ERROR not support yet +SELECT _binary X'000D' | X'0BC0'; -- ------------------------------------------ -- SET NAMES utf8mb4; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 626a70177..9937ddc4c 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -438,4 +438,20 @@ CREATE OR REPLACE FUNCTION pg_catalog.hex(mediumblob) RETURNS text LANGUAGE C IM CREATE OR REPLACE FUNCTION pg_catalog.hex(longblob) RETURNS text LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'bytea_to_hex'; CREATE OR REPLACE FUNCTION pg_catalog.binary_out (binary) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_binaryout'; -CREATE OR REPLACE FUNCTION pg_catalog.varbinary_out (varbinary) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_binaryout'; \ No newline at end of file +CREATE OR REPLACE FUNCTION pg_catalog.varbinary_out (varbinary) RETURNS cstring LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_binaryout'; + +DROP FUNCTION IF EXISTS pg_catalog.double_or_binary(double precision, binary) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.double_or_binary(double precision, binary) RETURNS bigint LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT $1 | $2::numeric $$; +CREATE OPERATOR pg_catalog.|(leftarg = double precision, rightarg = binary, procedure = pg_catalog.double_or_binary); +DROP FUNCTION IF EXISTS pg_catalog.binary_or_double(binary, double precision) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.binary_or_double(binary, double precision) RETURNS bigint LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT $1::numeric | $2 $$; +CREATE OPERATOR pg_catalog.|(leftarg = binary, rightarg = double precision, procedure = pg_catalog.binary_or_double); +DROP FUNCTION IF EXISTS pg_catalog.uint8_or_binary(uint8, binary) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.uint8_or_binary(uint8, binary) RETURNS uint8 LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT $1 | $2::numeric $$; +CREATE OPERATOR pg_catalog.|(leftarg = uint8, rightarg = binary, procedure = pg_catalog.uint8_or_binary); +DROP FUNCTION IF EXISTS pg_catalog.binary_or_uint8(binary, uint8) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.binary_or_uint8(binary, uint8) RETURNS uint8 LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT $1::numeric | $2 $$; +CREATE OPERATOR pg_catalog.|(leftarg = binary, rightarg = uint8, procedure = pg_catalog.binary_or_uint8); +DROP FUNCTION IF EXISTS pg_catalog.binary_or_binary(binary, binary) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.binary_or_binary(binary, binary) RETURNS bigint LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT $1::numeric | $2::numeric $$; +CREATE OPERATOR pg_catalog.|(leftarg = binary, rightarg = binary, procedure = pg_catalog.binary_or_binary); \ No newline at end of file -- Gitee From b2a29c9b75ba7f3750c73a4d8d409b60cd935330 Mon Sep 17 00:00:00 2001 From: Julong-Li <584147810@qq.com> Date: Thu, 21 Dec 2023 14:30:43 +0800 Subject: [PATCH 143/434] =?UTF-8?q?=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B?= =?UTF-8?q?=E5=90=8C=E6=AD=A5server=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/input/view_definer_test.source | 8 ++++---- contrib/dolphin/output/view_definer_test.source | 10 ++++------ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/contrib/dolphin/input/view_definer_test.source b/contrib/dolphin/input/view_definer_test.source index 4304ea61a..72fe406c0 100644 --- a/contrib/dolphin/input/view_definer_test.source +++ b/contrib/dolphin/input/view_definer_test.source @@ -33,12 +33,12 @@ end; SELECT c.relname as view_name, u.usename as rolname FROM pg_class c, pg_user u WHERE u.usesysid = c.relowner AND relname like '%definer_test_view%' order by view_name; -- dump all views -\! @abs_bindir@/gs_dump test_db -p @portstring@ --include-depend-objs --exclude-self | grep -vE '^SET|^REVOKE|^GRANT|^--|^gs_dump|^COMMENT|^DROP'| tr -s '\n' > @abs_bindir@/definer_view_dump.sql 2>&1 +\! @abs_bindir@/gs_dump test_db -p @portstring@ --include-depend-objs --exclude-self --disable-progress | grep -vE '^SET|^REVOKE|^GRANT|^--|^gs_dump|^COMMENT|^DROP'| tr -s '\n' > @abs_bindir@/definer_view_dump.sql 2>&1 \! cat @abs_bindir@/definer_view_dump.sql -\! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp +\! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp --disable-progress CREATE DATABASE target DBCOMPATIBILITY 'B'; -\! @abs_bindir@/gs_restore -d target -p @portstring@ @abs_bindir@/definer_view_dump.dmp +\! @abs_bindir@/gs_restore -d target -p @portstring@ @abs_bindir@/definer_view_dump.dmp --disable-progress SELECT * FROM definer_test_view1; SELECT * FROM definer_test_view2; @@ -53,7 +53,7 @@ create database test_1; create table startwith_t(id int, level int, connect_by_isleaf int, connect_by_iscycle int); create view startwith_v as select id, connect_by_isleaf as level, level as connect_by_isleaf from startwith_t; -\! @abs_bindir@/gs_dump test_1 -p @portstring@ -f @abs_bindir@/dump_postgres.sql +\! @abs_bindir@/gs_dump test_1 -p @portstring@ -f @abs_bindir@/dump_postgres.sql --disable-progress drop database if exists test_2; create database test_2; \! @abs_bindir@/gsql -d test_2 -p @portstring@ -f @abs_bindir@/dump_postgres.sql diff --git a/contrib/dolphin/output/view_definer_test.source b/contrib/dolphin/output/view_definer_test.source index 61a1bfe62..d0455f956 100644 --- a/contrib/dolphin/output/view_definer_test.source +++ b/contrib/dolphin/output/view_definer_test.source @@ -61,7 +61,7 @@ SELECT c.relname as view_name, u.usename as rolname FROM pg_class c, pg_user u W (3 rows) -- dump all views -\! @abs_bindir@/gs_dump test_db -p @portstring@ --include-depend-objs --exclude-self | grep -vE '^SET|^REVOKE|^GRANT|^--|^gs_dump|^COMMENT|^DROP'| tr -s '\n' > @abs_bindir@/definer_view_dump.sql 2>&1 +\! @abs_bindir@/gs_dump test_db -p @portstring@ --include-depend-objs --exclude-self --disable-progress | grep -vE '^SET|^REVOKE|^GRANT|^--|^gs_dump|^COMMENT|^DROP'| tr -s '\n' > @abs_bindir@/definer_view_dump.sql 2>&1 Begin scanning database. Finish scanning database. Start dumping objects @@ -99,17 +99,16 @@ ALTER TABLE public.tab_1107262 OWNER TO "Root_Test"; COPY public.tab_1107262 (id, c1) FROM stdin; \. ; -\! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp +\! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp --disable-progress Begin scanning database. Finish scanning database. ---?gs_dump[port='@portstring@'][test_db].* Start dumping objects Finish dumping objects --?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* CREATE DATABASE target DBCOMPATIBILITY 'B'; -\! @abs_bindir@/gs_restore -d target -p @portstring@ @abs_bindir@/definer_view_dump.dmp +\! @abs_bindir@/gs_restore -d target -p @portstring@ @abs_bindir@/definer_view_dump.dmp --disable-progress start restore operation ... end restore operation ... restore operation successful @@ -145,10 +144,9 @@ create database test_1; \c test_1 create table startwith_t(id int, level int, connect_by_isleaf int, connect_by_iscycle int); create view startwith_v as select id, connect_by_isleaf as level, level as connect_by_isleaf from startwith_t; -\! @abs_bindir@/gs_dump test_1 -p @portstring@ -f @abs_bindir@/dump_postgres.sql +\! @abs_bindir@/gs_dump test_1 -p @portstring@ -f @abs_bindir@/dump_postgres.sql --disable-progress Begin scanning database. Finish scanning database. ---?gs_dump[port='@portstring@'][test_1].* Start dumping objects Finish dumping objects --?gs_dump[port='@portstring@'][test_1].* -- Gitee From b3e0136f5bea769d0a9802204b9c06234bfc117b Mon Sep 17 00:00:00 2001 From: "wei.liu" Date: Thu, 21 Dec 2023 16:49:47 +0800 Subject: [PATCH 144/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=8D=E6=94=AF?= =?UTF-8?q?=E6=8C=81cidr=E7=B1=BB=E5=9E=8B=20cidr=20=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E8=BD=AC=E6=8D=A2=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=20#1=20=E5=A2=9E=E5=8A=A0=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/column_quote_alias.out | 6 ++++++ contrib/dolphin/plugin_parser/gram.y | 2 +- contrib/dolphin/sql/column_quote_alias.sql | 3 ++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/contrib/dolphin/expected/column_quote_alias.out b/contrib/dolphin/expected/column_quote_alias.out index a1dbbad1f..6460eded2 100644 --- a/contrib/dolphin/expected/column_quote_alias.out +++ b/contrib/dolphin/expected/column_quote_alias.out @@ -896,6 +896,12 @@ LINE 1: select hex(compress(point '(1,1)')); ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: referenced column: hex +select cidr'10.1.0.0/16'; + cidr +------------- + 10.1.0.0/16 +(1 row) + DROP TABLE t_alias_case0001_1; drop schema column_quote_alias cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 1b51b12db..da45aef9e 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -37023,7 +37023,7 @@ AexprConst_without_Sconst: Iconst } | DB_B_CIDR SCONST { - TypeName * tmp = SystemTypeName("point"); + TypeName * tmp = SystemTypeName("cidr"); tmp->location = @1; $$ = makeStringConstCast($2, @2, tmp); } diff --git a/contrib/dolphin/sql/column_quote_alias.sql b/contrib/dolphin/sql/column_quote_alias.sql index 523cf2db2..4bb8785d8 100644 --- a/contrib/dolphin/sql/column_quote_alias.sql +++ b/contrib/dolphin/sql/column_quote_alias.sql @@ -140,6 +140,7 @@ SELECT a as 'SSSS' from t_alias_case0001_1; SELECT a 'SSSS' from t_alias_case0001_1; select atan(point '(1,1)',point '(2,1)'); select hex(compress(point '(1,1)')); +select cidr'10.1.0.0/16'; DROP TABLE t_alias_case0001_1; drop schema column_quote_alias cascade; -reset current_schema; \ No newline at end of file +reset current_schema; -- Gitee From e659e6099852cac7b9c5a3e2e4982c7c4b838a20 Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Fri, 22 Dec 2023 14:44:13 +0800 Subject: [PATCH 145/434] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=8E=A7=E5=88=B6?= =?UTF-8?q?=EF=BC=9Ab=5Fcompatibility=5Fmode=E6=8E=A7=E5=88=B6=E6=98=AF?= =?UTF-8?q?=E5=90=A6null=20first=EF=BC=88=E9=80=9A=E8=BF=87=E6=8C=87?= =?UTF-8?q?=E5=AE=9Aorderby=E6=8E=A7=E5=88=B6=EF=BC=8C=E5=BD=B1=E5=93=8D?= =?UTF-8?q?=E9=83=A8=E5=88=86=E6=89=A7=E8=A1=8C=E8=AE=A1=E5=88=92=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/test_charset_collate.out | 2 +- .../dolphin/expected/test_mysql_operator.out | 4 ++-- .../dolphin/expected/test_mysql_prepare.out | 18 +++++++++--------- contrib/dolphin/plugin_parser/gram.y | 18 +++++++++++++++++- contrib/dolphin/sql/test_mysql_operator.sql | 4 ++-- 5 files changed, 31 insertions(+), 15 deletions(-) diff --git a/contrib/dolphin/expected/test_charset_collate.out b/contrib/dolphin/expected/test_charset_collate.out index cb273e81d..879c602d2 100644 --- a/contrib/dolphin/expected/test_charset_collate.out +++ b/contrib/dolphin/expected/test_charset_collate.out @@ -180,6 +180,7 @@ insert into test_binary1 (f2) values('ppp'),('PpP'),('PPP'),('AAA'),('Aaa'),('aa select distinct f2 from test_binary1 order by f2; f2 ---------- + AAA Aaa PPP @@ -188,7 +189,6 @@ select distinct f2 from test_binary1 order by f2; aaa ppp \305\240 - (9 rows) select f2 from test_binary1 where f2 = 'ppp'::blob collate 'binary'; diff --git a/contrib/dolphin/expected/test_mysql_operator.out b/contrib/dolphin/expected/test_mysql_operator.out index 2f955d0ae..c955a5af2 100644 --- a/contrib/dolphin/expected/test_mysql_operator.out +++ b/contrib/dolphin/expected/test_mysql_operator.out @@ -608,7 +608,7 @@ select count(tem) from (select a&&b tem from testforboolean); 5 (1 row) -select a&&b,b from testforboolean order by b; +select a&&b,b from testforboolean order by b nulls last; ?column? | b ----------+--- f | f @@ -650,7 +650,7 @@ select count(tem) from (select a||b tem from testforboolean); 5 (1 row) -select a||b,b from testforboolean order by b; +select a||b,b from testforboolean order by b nulls last; ?column? | b ----------+--- f | f diff --git a/contrib/dolphin/expected/test_mysql_prepare.out b/contrib/dolphin/expected/test_mysql_prepare.out index d31f14a10..dfd73d026 100644 --- a/contrib/dolphin/expected/test_mysql_prepare.out +++ b/contrib/dolphin/expected/test_mysql_prepare.out @@ -192,7 +192,7 @@ explain (costs off, verbose on) execute p5; Output: "*SELECT* 1".a, (0) -> Sort Output: "*SELECT* 1".a, (0) - Sort Key: "*SELECT* 1".a + Sort Key: "*SELECT* 1".a NULLS FIRST -> HashSetOp Intersect Output: "*SELECT* 1".a, (0) -> Append @@ -869,12 +869,12 @@ explain (costs off, verbose on) execute s0 using @a; Merge Cond: (t1.id1 = t2.id1) -> Sort Output: t1.id1, t1.id2, t1.num - Sort Key: t1.id1 + Sort Key: t1.id1 NULLS FIRST -> Seq Scan on test_mysql_prepare.t1_xc_fqs t1 Output: t1.id1, t1.id2, t1.num -> Sort Output: t2.id1, t2.id2, t2.num - Sort Key: t2.id1 + Sort Key: t2.id1 NULLS FIRST -> Seq Scan on test_mysql_prepare.t2_xc_fqs t2 Output: t2.id1, t2.id2, t2.num Filter: (t2.id1 = $1) @@ -885,7 +885,7 @@ explain (costs off, verbose on) execute s1 using 1; ------------------------------------------------------------------------ Sort Output: t1.id1, t1.id2, t1.num, t2.id1, t2.id2, t2.num, t2.id1 - Sort Key: t2.id1 + Sort Key: t2.id1 NULLS FIRST -> Hash Left Join Output: t1.id1, t1.id2, t1.num, t2.id1, t2.id2, t2.num, t2.id1 Hash Cond: (t2.id1 = t1.id1) @@ -903,7 +903,7 @@ explain (costs off, verbose on) execute s2 using 1; -------------------------------------------------------------------------------- Sort Output: t1.id1, t1.id2, t1.num, t2.id1, t2.id2, t2.num, t1.id1, t2.id1 - Sort Key: t1.id1, t2.id1 + Sort Key: t1.id1 NULLS FIRST, t2.id1 NULLS FIRST -> Hash Full Join Output: t1.id1, t1.id2, t1.num, t2.id1, t2.id2, t2.num, t1.id1, t2.id1 Hash Cond: (t1.id1 = t2.id1) @@ -939,15 +939,15 @@ execute s1 using @a; execute s2 using 1; id1 | id2 | num | id1 | id2 | num -----+-----+-----+-----+-----+----- + | | | 2 | 3 | 22 + | | | 3 | 4 | 32 + | | | 4 | 5 | 42 + | | | 5 | 6 | 52 1 | 1 | 11 | 1 | 2 | 12 2 | 2 | 21 | | | 3 | 3 | 31 | | | 4 | 4 | 41 | | | 5 | 5 | 51 | | | - | | | 2 | 3 | 22 - | | | 3 | 4 | 32 - | | | 4 | 5 | 42 - | | | 5 | 6 | 52 (9 rows) deallocate s0; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index da45aef9e..b8843501b 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -531,6 +531,8 @@ static inline void ChangeBpcharCastType(TypeName* typname); static inline List* NakeLikeOpList(); static inline List* MakeNotLikeOpList(); static inline Node* MakeSubLinkWithOp(SubLinkType subType, Node* testExpr, char* op, Node* subSelect, int location); +/* null is the minimum value in sortby */ +static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRule); %} %define api.pure @@ -29959,7 +29961,7 @@ sortby: a_expr USING qual_all_Op opt_nulls_order $$ = makeNode(SortBy); $$->node = $1; $$->sortby_dir = (SortByDir)$2; - $$->sortby_nulls = (SortByNulls)$3; + $$->sortby_nulls = GetNullOrderRule($$->sortby_dir, (SortByNulls)$3);; $$->useOp = NIL; $$->location = -1; /* no operator */ } @@ -41730,6 +41732,20 @@ static inline void ChangeBpcharCastType(TypeName* typname) } } +static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRule) +{ + if (!ENABLE_B_CMPT_MODE) { + return nullRule; + } + if (sortBy == SORTBY_DESC && nullRule == SORTBY_NULLS_DEFAULT) { + return SORTBY_NULLS_LAST; + } else if ((sortBy == SORTBY_ASC || sortBy == SORTBY_DEFAULT) && nullRule == SORTBY_NULLS_DEFAULT) { + return SORTBY_NULLS_FIRST; + } + return nullRule; +} + + /* * Must undefine this stuff before including scan.c, since it has different * definitions for these macros. diff --git a/contrib/dolphin/sql/test_mysql_operator.sql b/contrib/dolphin/sql/test_mysql_operator.sql index 9da30f718..554628038 100644 --- a/contrib/dolphin/sql/test_mysql_operator.sql +++ b/contrib/dolphin/sql/test_mysql_operator.sql @@ -244,7 +244,7 @@ drop view testforboolean_v; --- test for function select count(tem) from (select a&&b tem from testforboolean); -select a&&b,b from testforboolean order by b; +select a&&b,b from testforboolean order by b nulls last; select char_length('asbjhc')&&char_length('askjdhkj'); select left('1023jasdzlxc',5)&&left('1023jasdnzxc',5); @@ -252,7 +252,7 @@ select substring('as1dz34lcas',3)&&substring('zxcbkj1shd',5); select replace('123456789','234','asd')&&replace('123456789','234','asd'); select count(tem) from (select a||b tem from testforboolean); -select a||b,b from testforboolean order by b; +select a||b,b from testforboolean order by b nulls last; select char_length('asbjhc')||char_length('askjdhkj'); select left('1023jasdzlxc',5)||left('1023jasdnzxc',5); -- Gitee From 42c088ba272035c09d176d2cfb410f78092dbbb7 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Fri, 22 Dec 2023 15:06:05 +0800 Subject: [PATCH 146/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BB=A3=E7=A0=81?= =?UTF-8?q?=E4=B8=AD=E5=8F=AF=E8=83=BD=E4=BC=9A=E5=87=BA=E7=8E=B0=E7=9A=84?= =?UTF-8?q?=E5=AE=89=E5=85=A8=E9=9A=90=E6=82=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/whale/include/plugin_postgres.h | 3 ++- contrib/whale/plugin_orafce/file.cpp | 14 ++++++-------- contrib/whale/plugin_postgres.cpp | 3 ++- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/contrib/whale/include/plugin_postgres.h b/contrib/whale/include/plugin_postgres.h index c6d25b046..87b9d8b71 100644 --- a/contrib/whale/include/plugin_postgres.h +++ b/contrib/whale/include/plugin_postgres.h @@ -86,7 +86,8 @@ typedef struct ASqlPluginContext { /* file.c */ OraFileSlot slots[MAX_SLOTS]; /* initilaized with zeros */ int32 slotid = 0; /* next slot id */ - SPIPlanPtr plan = NULL; + SPIPlanPtr safe_named_location_plan = NULL; + SPIPlanPtr check_secure_locality_plan = NULL; /* others.c */ char *lc_collate_cache = NULL; size_t multiplication = 1; diff --git a/contrib/whale/plugin_orafce/file.cpp b/contrib/whale/plugin_orafce/file.cpp index 4b88568e1..693d0daee 100644 --- a/contrib/whale/plugin_orafce/file.cpp +++ b/contrib/whale/plugin_orafce/file.cpp @@ -702,8 +702,6 @@ Datum utl_file_fclose_all(PG_FUNCTION_ARGS) */ static void check_secure_locality(const char *path) { - static SPIPlanPtr plan = NULL; - Oid argtypes[] = {TEXTOID}; Datum values[1]; char nulls[1] = {' '}; @@ -722,7 +720,7 @@ static void check_secure_locality(const char *path) if (SPI_connect() < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed"))); - if (!plan) { + if (!GetSessionContext()->check_secure_locality_plan) { /* Don't use LIKE not to escape '_' and '%' */ SPIPlanPtr p = SPI_prepare("SELECT 1 FROM utl_file.utl_file_dir" " WHERE CASE WHEN substring(dir from '.$') = '/' THEN" @@ -732,11 +730,11 @@ static void check_secure_locality(const char *path) " END", 1, argtypes); - if (p == NULL || (plan = SPI_saveplan(p)) == NULL) + if (p == NULL || (GetSessionContext()->check_secure_locality_plan = SPI_saveplan(p)) == NULL) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_prepare_failed"))); } - if (SPI_OK_SELECT != SPI_execute_plan(plan, values, nulls, false, 1)) + if (SPI_OK_SELECT != SPI_execute_plan(GetSessionContext()->check_secure_locality_plan, values, nulls, false, 1)) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("can't execute sql"))); if (SPI_processed == 0) @@ -761,15 +759,15 @@ static char *safe_named_location(text *location) if (SPI_connect() < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed"))); - if (!GetSessionContext()->plan) { + if (!GetSessionContext()->safe_named_location_plan) { /* Don't use LIKE not to escape '_' and '%' */ SPIPlanPtr p = SPI_prepare("SELECT dir FROM utl_file.utl_file_dir WHERE dirname = $1", 1, argtypes); - if (p == NULL || (GetSessionContext()->plan = SPI_saveplan(p)) == NULL) + if (p == NULL || (GetSessionContext()->safe_named_location_plan = SPI_saveplan(p)) == NULL) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_prepare_failed"))); } - if (SPI_OK_SELECT != SPI_execute_plan(GetSessionContext()->plan, values, nulls, false, 1)) + if (SPI_OK_SELECT != SPI_execute_plan(GetSessionContext()->safe_named_location_plan, values, nulls, false, 1)) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("can't execute sql"))); if (SPI_processed > 0) { diff --git a/contrib/whale/plugin_postgres.cpp b/contrib/whale/plugin_postgres.cpp index d559a4c9e..91a3ed9f9 100644 --- a/contrib/whale/plugin_postgres.cpp +++ b/contrib/whale/plugin_postgres.cpp @@ -232,7 +232,8 @@ static void init_orafce_vars(ASqlPluginContext* orafce_psc) orafce_psc->mru_weekdays = NULL; /* file.c */ orafce_psc->slotid = 0; - orafce_psc->plan = NULL; + orafce_psc->check_secure_locality_plan = NULL; + orafce_psc->safe_named_location_plan = NULL; init_slots(); /* others.c */ orafce_psc->lc_collate_cache = NULL; -- Gitee From 97f08865e73fe0f44f73bfca420a832d9aa6ea26 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Fri, 22 Dec 2023 15:58:08 +0800 Subject: [PATCH 147/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dsql=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/og-timescaledb1.7.4.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/timescaledb/og-timescaledb1.7.4.sql b/contrib/timescaledb/og-timescaledb1.7.4.sql index 28b5b1086..2437ad643 100644 --- a/contrib/timescaledb/og-timescaledb1.7.4.sql +++ b/contrib/timescaledb/og-timescaledb1.7.4.sql @@ -812,6 +812,7 @@ DECLARE dimension_cnt INTEGER; dimension_row record; ret TEXT; + v_count INTEGER; BEGIN SELECT COUNT(*) INTO v_count -- Gitee From 3e0b3b8d12371125685e55fdb187381d69d64aa1 Mon Sep 17 00:00:00 2001 From: liruixiang <461834084@qq.com> Date: Wed, 20 Dec 2023 19:25:20 +0800 Subject: [PATCH 148/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddiv=E6=93=8D=E4=BD=9C?= =?UTF-8?q?=E7=AC=A6=E5=AF=B9binary=E7=B1=BB=E5=9E=8B=E6=95=B0=E6=8D=AE?= =?UTF-8?q?=E7=9A=84=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/bxconst_test.out | 439 +++++++++++++++++- contrib/dolphin/expected/db_b_parser3.out | 101 ++-- contrib/dolphin/parallel_schedule_dolphin | 2 +- .../rollback_script/dolphin--3.0--2.0.sql | 6 +- contrib/dolphin/sql/bxconst_test.sql | 52 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 33 +- 6 files changed, 563 insertions(+), 70 deletions(-) diff --git a/contrib/dolphin/expected/bxconst_test.out b/contrib/dolphin/expected/bxconst_test.out index 8467042ad..e11954d5d 100644 --- a/contrib/dolphin/expected/bxconst_test.out +++ b/contrib/dolphin/expected/bxconst_test.out @@ -45,10 +45,10 @@ select * from t_bit; (2 rows) select * from t_bin; - a ----------------- - \x313433393200 - \x373600000000 + a +------------ + 14392\0 + 76\0\0\0\0 (2 rows) @@ -206,6 +206,157 @@ select 0x01 | 0x02; (1 row) +select 5::binary div 2; + div +----- + 2 +(1 row) + +select 5::binary div (2::int1); + div +----- + 2 +(1 row) + +select 5::binary div (2::int2); + div +----- + 2 +(1 row) + +select 5::binary div (2::int4); + div +----- + 2 +(1 row) + +select 5::binary div (2::int8); + div +----- + 2 +(1 row) + +select 5::binary div (2::uint1); + div +----- + 2 +(1 row) + +select 5::binary div (2::uint2); + div +----- + 2 +(1 row) + +select 5::binary div (2::uint4); + div +----- + 2 +(1 row) + +select 5::binary div (2::uint8); + div +----- + 2 +(1 row) + +select 5::binary div (2::float4); + div +----- + 2 +(1 row) + +select 5::binary div (2::float8); + div +----- + 2 +(1 row) + +select 5::binary div (2::numeric); + div +----- + 2 +(1 row) + +select 5 div 2::binary; + div +----- + 2 +(1 row) + +select (5::int1) div 2::binary; + div +----- + 2 +(1 row) + +select (5::int2) div 2::binary; + div +----- + 2 +(1 row) + +select (5::int4) div 2::binary; + div +----- + 2 +(1 row) + +select (5::int8) div 2::binary; + div +----- + 2 +(1 row) + +select (5::uint1) div 2::binary; + div +----- + 2 +(1 row) + +select (5::uint2) div 2::binary; + div +----- + 2 +(1 row) + +select (5::uint4) div 2::binary; + div +----- + 2 +(1 row) + +select (5::uint8) div 2::binary; + div +----- + 2 +(1 row) + +select (5::float4) div 2::binary; + div +----- + 2 +(1 row) + +select (5::float8) div 2::binary; + div +----- + 2 +(1 row) + +select (5::numeric) div 2::binary; + div +----- + 2 +(1 row) + +select 5::binary div 2::binary; + div +----- + 2 +(1 row) + + -- å°†bxconst当作binaryå¤„ç† set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary; @@ -215,7 +366,7 @@ create table t_bin(a binary(6)); select b'11100000111000'; ?column? ---------- - \x3838 + 88 (1 row) select pg_typeof(b'11100000111000'); @@ -227,7 +378,7 @@ select pg_typeof(b'11100000111000'); select x'4c'; ?column? ---------- - \x4c + L (1 row) select pg_typeof(x'4c'); @@ -248,10 +399,10 @@ select * from t_bit; (2 rows) select * from t_bin; - a ----------------- - \x383800000000 - \x4c0000000000 + a +------------- + 88\0\0\0\0 + L\0\0\0\0\0 (2 rows) @@ -461,6 +612,274 @@ CONTEXT: SQL function "binary_or_binary" statement 1 (1 row) +select 0x01 div 2; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::int1); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::int2); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::int4); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::int8); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::uint1); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::uint2); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::uint4); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::uint8); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::float4); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::float8); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 0x01 div (2::numeric); +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div + div +----- + 0 +(1 row) + +select 2 div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::int1) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::int2) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::int4) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::int8) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::uint1) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::uint2) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::uint4) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::uint8) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::float4) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::float8) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select (2::numeric) div 0x01; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"(double precision,"binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + +select 0x01 div 0x02; +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary","binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: invalid input syntax for type numeric: "" +CONTEXT: PL/pgSQL function "div"("binary","binary") line 4 during statement block local variable initialization +referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function "div"("binary","binary") line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + + reset dolphin.sql_mode; drop schema bxconst_test cascade; diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index 0ec8a4f7c..3478154b2 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -452,9 +452,8 @@ LINE 1: select div 1; ^ select '-12.3abc' div null; WARNING: invalid input syntax for type numeric: "-12.3abc" -LINE 1: select '-12.3abc' div null; - ^ -CONTEXT: referenced column: div +CONTEXT: PL/pgSQL function "div"("binary","binary") line 4 during statement block local variable initialization +referenced column: div div ----- @@ -472,9 +471,8 @@ CONTEXT: referenced column: div select '-12.3abc' div 0; WARNING: invalid input syntax for type numeric: "-12.3abc" -LINE 1: select '-12.3abc' div 0; - ^ -CONTEXT: referenced column: div +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div div ----- @@ -482,9 +480,8 @@ CONTEXT: referenced column: div select '-12.3abc' div 5; WARNING: invalid input syntax for type numeric: "-12.3abc" -LINE 1: select '-12.3abc' div 5; - ^ -CONTEXT: referenced column: div +CONTEXT: PL/pgSQL function "div"("binary",double precision) line 4 during statement block local variable initialization +referenced column: div div ----- -2 @@ -532,13 +529,11 @@ CONTEXT: referenced column: div select '-12.3abc' div 'null'; WARNING: invalid input syntax for type numeric: "-12.3abc" -LINE 1: select '-12.3abc' div 'null'; - ^ -CONTEXT: referenced column: div +CONTEXT: PL/pgSQL function "div"("binary","binary") line 4 during statement block local variable initialization +referenced column: div WARNING: invalid input syntax for type numeric: "null" -LINE 1: select '-12.3abc' div 'null'; - ^ -CONTEXT: referenced column: div +CONTEXT: PL/pgSQL function "div"("binary","binary") line 4 during statement block local variable initialization +referenced column: div div ----- @@ -834,11 +829,11 @@ select c1 div c12 from div_test; (1 row) select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div + div +----- + +(1 row) + select c1 div c14 from div_test; div ----- @@ -902,11 +897,6 @@ insert into tmp_res select c1 div c10 from div_test; insert into tmp_res select c1 div c11 from div_test; insert into tmp_res select c1 div c12 from div_test; insert into tmp_res select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: insert into tmp_res select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div insert into tmp_res select c1 div c14 from div_test; insert into tmp_res select c1 div c15 from div_test; ERROR: invalid input syntax for type double precision: "{"0":"0"}" @@ -1171,11 +1161,11 @@ select c1 div c12 from div_test; (1 row) select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div + div +----- + +(1 row) + select c1 div c14 from div_test; div ----- @@ -1244,11 +1234,6 @@ insert into tmp_res select c1 div c10 from div_test; insert into tmp_res select c1 div c11 from div_test; insert into tmp_res select c1 div c12 from div_test; insert into tmp_res select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: insert into tmp_res select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div insert into tmp_res select c1 div c14 from div_test; insert into tmp_res select c1 div c15 from div_test; WARNING: invalid input syntax for type double precision: "{"0":"0"}" @@ -1573,11 +1558,14 @@ CONTEXT: referenced column: div (1 row) select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function `div`(double precision,`binary`) line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + select c1 div c14 from div_test; WARNING: division by zero CONTEXT: referenced column: div @@ -1720,11 +1708,9 @@ insert into tmp_res select c1 div c12 from div_test; ERROR: division by zero CONTEXT: referenced column: div insert into tmp_res select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: insert into tmp_res select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div +ERROR: division by zero +CONTEXT: PL/pgSQL function `div`(double precision,`binary`) line 4 during statement block local variable initialization +referenced column: div insert into tmp_res select c1 div c14 from div_test; ERROR: division by zero CONTEXT: referenced column: a @@ -2059,11 +2045,14 @@ CONTEXT: referenced column: div (1 row) select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function `div`(double precision,`binary`) line 4 during statement block local variable initialization +referenced column: div + div +----- + +(1 row) + select c1 div c14 from div_test; WARNING: division by zero CONTEXT: referenced column: div @@ -2219,11 +2208,9 @@ insert into tmp_res select c1 div c12 from div_test; WARNING: division by zero CONTEXT: referenced column: div insert into tmp_res select c1 div c13 from div_test; -ERROR: function pg_catalog.div(tinyint, `binary`) does not exist -LINE 1: insert into tmp_res select c1 div c13 from div_test; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: referenced column: div +WARNING: division by zero +CONTEXT: PL/pgSQL function `div`(double precision,`binary`) line 4 during statement block local variable initialization +referenced column: div insert into tmp_res select c1 div c14 from div_test; WARNING: division by zero CONTEXT: referenced column: a @@ -2237,13 +2224,13 @@ referenced column: div select count(*) from tmp_res; count ------- - 126 + 129 (1 row) select count(*) from tmp_res where a is null; count ------- - 126 + 129 (1 row) drop table tmp_res; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index 43d77c206..97f75a84e 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -30,7 +30,7 @@ test: test_shows_4 test_shows_5 test: nvarchar regexp upsert zerofill test_set_charset test_optimize test_charset_collate charset_utf8mb4_b_db charset_gbk_b_db -test: test_binary test_blob test_datatype test_fixed test_mysql_enum +test: test_binary test_blob test_datatype test_fixed test_mysql_enum bxconst_test test: test_enum_collation diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 4a8b5d1a4..5399cfffe 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -272,4 +272,8 @@ DROP FUNCTION IF EXISTS pg_catalog.double_or_binary(double precision, binary) CA DROP FUNCTION IF EXISTS pg_catalog.binary_or_double(binary, double precision) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.uint8_or_binary(uint8, binary) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.binary_or_uint8(binary, uint8) CASCADE; -DROP FUNCTION IF EXISTS pg_catalog.binary_or_binary(binary, binary) CASCADE; \ No newline at end of file +DROP FUNCTION IF EXISTS pg_catalog.binary_or_binary(binary, binary) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.div(binary, double precision); +DROP FUNCTION IF EXISTS pg_catalog.div(double precision, binary); +DROP FUNCTION IF EXISTS pg_catalog.div(binary, binary); \ No newline at end of file diff --git a/contrib/dolphin/sql/bxconst_test.sql b/contrib/dolphin/sql/bxconst_test.sql index 6aeb790e5..6c622bc8c 100644 --- a/contrib/dolphin/sql/bxconst_test.sql +++ b/contrib/dolphin/sql/bxconst_test.sql @@ -49,6 +49,32 @@ select (2::float8) | 0x01; select (2::numeric) | 0x01; select 0x01 | 0x02; +select 5::binary div 2; +select 5::binary div (2::int1); +select 5::binary div (2::int2); +select 5::binary div (2::int4); +select 5::binary div (2::int8); +select 5::binary div (2::uint1); +select 5::binary div (2::uint2); +select 5::binary div (2::uint4); +select 5::binary div (2::uint8); +select 5::binary div (2::float4); +select 5::binary div (2::float8); +select 5::binary div (2::numeric); +select 5 div 2::binary; +select (5::int1) div 2::binary; +select (5::int2) div 2::binary; +select (5::int4) div 2::binary; +select (5::int8) div 2::binary; +select (5::uint1) div 2::binary; +select (5::uint2) div 2::binary; +select (5::uint4) div 2::binary; +select (5::uint8) div 2::binary; +select (5::float4) div 2::binary; +select (5::float8) div 2::binary; +select (5::numeric) div 2::binary; +select 5::binary div 2::binary; + -- å°†bxconst当作binaryå¤„ç† set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary; @@ -95,6 +121,32 @@ select (2::float8) | 0x01; select (2::numeric) | 0x01; select 0x01 | 0x02; +select 0x01 div 2; +select 0x01 div (2::int1); +select 0x01 div (2::int2); +select 0x01 div (2::int4); +select 0x01 div (2::int8); +select 0x01 div (2::uint1); +select 0x01 div (2::uint2); +select 0x01 div (2::uint4); +select 0x01 div (2::uint8); +select 0x01 div (2::float4); +select 0x01 div (2::float8); +select 0x01 div (2::numeric); +select 2 div 0x01; +select (2::int1) div 0x01; +select (2::int2) div 0x01; +select (2::int4) div 0x01; +select (2::int8) div 0x01; +select (2::uint1) div 0x01; +select (2::uint2) div 0x01; +select (2::uint4) div 0x01; +select (2::uint8) div 0x01; +select (2::float4) div 0x01; +select (2::float8) div 0x01; +select (2::numeric) div 0x01; +select 0x01 div 0x02; + reset dolphin.sql_mode; drop schema bxconst_test cascade; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 9937ddc4c..110bbd515 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -454,4 +454,35 @@ CREATE OR REPLACE FUNCTION pg_catalog.binary_or_uint8(binary, uint8) RETURNS uin CREATE OPERATOR pg_catalog.|(leftarg = binary, rightarg = uint8, procedure = pg_catalog.binary_or_uint8); DROP FUNCTION IF EXISTS pg_catalog.binary_or_binary(binary, binary) CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.binary_or_binary(binary, binary) RETURNS bigint LANGUAGE SQL IMMUTABLE STRICT as $$ SELECT $1::numeric | $2::numeric $$; -CREATE OPERATOR pg_catalog.|(leftarg = binary, rightarg = binary, procedure = pg_catalog.binary_or_binary); \ No newline at end of file +CREATE OPERATOR pg_catalog.|(leftarg = binary, rightarg = binary, procedure = pg_catalog.binary_or_binary); + +DROP FUNCTION IF EXISTS pg_catalog.div(binary, double precision); +CREATE OR REPLACE FUNCTION pg_catalog.div(binary, double precision) +returns numeric language plpgsql as +$$ +declare + quotient double precision := $1::numeric / $2; +begin + return sign(quotient) * floor(abs(quotient)); +end; +$$; +DROP FUNCTION IF EXISTS pg_catalog.div(double precision, binary); +CREATE OR REPLACE FUNCTION pg_catalog.div(double precision, binary) +returns numeric language plpgsql as +$$ +declare + quotient double precision := $1 / $2::numeric; +begin + return sign(quotient) * floor(abs(quotient)); +end; +$$; +DROP FUNCTION IF EXISTS pg_catalog.div(binary, binary); +CREATE OR REPLACE FUNCTION pg_catalog.div(binary, binary) +returns numeric language plpgsql as +$$ +declare + quotient double precision := $1::numeric / $2::numeric; +begin + return sign(quotient) * floor(abs(quotient)); +end; +$$; \ No newline at end of file -- Gitee From 9c1f60e0af5a73d8448bbefd70a7f902299112d4 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Mon, 25 Dec 2023 10:34:17 +0800 Subject: [PATCH 149/434] =?UTF-8?q?=E6=8F=90=E5=8F=96date.cpp=E4=B8=AD?= =?UTF-8?q?=E7=9A=84=E9=87=8D=E5=A4=8D=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_utils/adt/date.cpp | 82 ++++++++--------------- 1 file changed, 27 insertions(+), 55 deletions(-) diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 27b7cc404..46a7f3ae3 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -67,6 +67,7 @@ extern const char* extract_numericstr(const char* str); extern "C" DLL_PUBLIC Datum uint8out(PG_FUNCTION_ARGS); static char* adjust_b_format_time(char *str, int *timeSign, int *D, bool *hasD); int DatetimeDate(char *str, pg_tm *tm, bool is_date_sconst = false); +static float8 getPartFromTm(pg_tm* tm, fsec_t fsec, int part); PG_FUNCTION_INFO_V1_PUBLIC(int8_b_format_time); extern "C" DLL_PUBLIC Datum int8_b_format_time(PG_FUNCTION_ARGS); @@ -5871,6 +5872,28 @@ Datum adddate_time_interval(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } +static float8 getPartFromTm(pg_tm* tm, fsec_t fsec, int part) +{ + float8 result = 0; + switch (part) { + case HOUR: + result = tm->tm_hour; + break; + case MINUTE: + result = tm->tm_min; + break; + case SECOND: + result = tm->tm_sec; + break; + case MICROSECOND: + result = fsec; + break; + default: + break; + } + return result; +} + static inline Datum GetSpecificPartOfTime(PG_FUNCTION_ARGS, int part) { char *tString = text_to_cstring(PG_GETARG_TEXT_PP(0)); @@ -5888,7 +5911,6 @@ static inline Datum GetSpecificPartOfTime(PG_FUNCTION_ARGS, int part) bool warnings; int tm_type; bool null_func_result = false; - float8 result = 0; if (!cstring_to_time(tString, tm, fsec, timeSign, tm_type, warnings, &null_func_result) || null_func_result) { PG_RETURN_NULL(); } @@ -5897,23 +5919,7 @@ static inline Datum GetSpecificPartOfTime(PG_FUNCTION_ARGS, int part) ereport(errlevel, (errcode(DTERR_BAD_FORMAT), errmsg("Truncated incorrect time value: \"%s\"", tString))); } - switch (part) { - case HOUR: - result = tm->tm_hour; - break; - case MINUTE: - result = tm->tm_min; - break; - case SECOND: - result = tm->tm_sec; - break; - case MICROSECOND: - result = fsec; - break; - default: - break; - } - PG_RETURN_FLOAT8(result); + PG_RETURN_FLOAT8(getPartFromTm(tm, fsec, part)); } Datum GetHour(PG_FUNCTION_ARGS) @@ -5942,29 +5948,13 @@ static Datum GetSpecificPartOfTimeInDate(PG_FUNCTION_ARGS, int part) fsec_t fsec; pg_tm tt; pg_tm* tm = &tt; - float8 result = 0; if (timestamp2tm(date2timestamp(dateVal), NULL, tm, &fsec, NULL, NULL) == 0) { - switch (part) { - case HOUR: - result = tm->tm_hour; - break; - case MINUTE: - result = tm->tm_min; - break; - case SECOND: - result = tm->tm_sec; - break; - case MICROSECOND: - result = fsec; - break; - default: - break; - } + PG_RETURN_FLOAT8(getPartFromTm(tm, fsec, part)); } else { ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); } - PG_RETURN_FLOAT8(result); + PG_RETURN_FLOAT8(0); } Datum GetHourFromDate(PG_FUNCTION_ARGS) @@ -5995,25 +5985,7 @@ static Datum GetSpecificPartOfTimeInTimeTz(PG_FUNCTION_ARGS, int part) fsec_t fsec; int tz; timetz2tm(time, tm, &fsec, &tz); - float8 result = 0; - - switch (part) { - case HOUR: - result = tm->tm_hour; - break; - case MINUTE: - result = tm->tm_min; - break; - case SECOND: - result = tm->tm_sec; - break; - case MICROSECOND: - result = fsec; - break; - default: - break; - } - PG_RETURN_FLOAT8(result); + PG_RETURN_FLOAT8(getPartFromTm(tm, fsec, part)); } Datum GetHourFromTimeTz(PG_FUNCTION_ARGS) -- Gitee From 371e1131a565507c84e535263419c2e7db3b09fb Mon Sep 17 00:00:00 2001 From: yuchao Date: Thu, 21 Dec 2023 14:11:26 +0800 Subject: [PATCH 150/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dcompress=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E8=B6=85=E8=8C=83=E5=9B=B4=E5=90=8E=E6=9C=AA=E6=8A=A5?= =?UTF-8?q?=E9=94=99=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../string_func_test/db_b_compress_test.out | 26 +++---------------- .../string_func_test/db_b_nameconst_test.out | 16 ++---------- contrib/dolphin/plugin_parser/scan.l | 24 +++++++++++++++++ 3 files changed, 29 insertions(+), 37 deletions(-) diff --git a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out index 94b906521..5e6569544 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out @@ -240,15 +240,9 @@ SELECT HEX(COMPRESS(1E+1000)); (1 row) SELECT HEX(COMPRESS(1E+1001)); --ERROR -WARNING: invalid input syntax for type numeric: "1E+1001" +ERROR: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT HEX(COMPRESS(1E+1001)); ^ -CONTEXT: referenced column: hex - hex --------------------------------------------- - ea030000789c33341805a360140c7f000028b4bbe2 -(1 row) - SELECT HEX(COMPRESS(RPAD('a', 1024, 'a'))); hex -------------------------------------------- @@ -573,17 +567,9 @@ CONTEXT: referenced column: uncompress (1 row) SELECT UNCOMPRESS(1E+1001); --ERROR -WARNING: invalid input syntax for type numeric: "1E+1001" +ERROR: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT UNCOMPRESS(1E+1001); ^ -CONTEXT: referenced column: uncompress -WARNING: ZLIB: Input data corrupted -CONTEXT: referenced column: uncompress - uncompress ------------- - -(1 row) - SELECT UNCOMPRESS(COMPRESS(RPAD('a', 1024, 'a'))); uncompress ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ @@ -899,15 +885,9 @@ SELECT UNCOMPRESSED_LENGTH(COMPRESS(RPAD('a', 1000000, 'a'))); (1 row) SELECT UNCOMPRESSED_LENGTH(1E+1001); --ERROR -WARNING: invalid input syntax for type numeric: "1E+1001" +ERROR: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT UNCOMPRESSED_LENGTH(1E+1001); ^ -CONTEXT: referenced column: uncompressed_length - uncompressed_length ---------------------- - 808464433 -(1 row) - SELECT UNCOMPRESSED_LENGTH(COMPRESS(RPAD('a', 1024, 'a'))); uncompressed_length --------------------- diff --git a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out index 4039b4c9b..15aab5342 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out @@ -356,15 +356,9 @@ SELECT NAME_CONST(1E+1000, 'test'); (1 row) SELECT NAME_CONST(1E+1001, 'test'); --ERROR -WARNING: invalid input syntax for type numeric: "1E+1001" +ERROR: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT NAME_CONST(1E+1001, 'test'); ^ -CONTEXT: referenced column: name_const - 100000000000000000000000000000000000000000000000000000000000000 ------------------------------------------------------------------ - test -(1 row) - SELECT NAME_CONST('test', 1E+1000); test ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -372,15 +366,9 @@ SELECT NAME_CONST('test', 1E+1000); (1 row) SELECT NAME_CONST('test', 1E+1001); --ERROR -WARNING: invalid input syntax for type numeric: "1E+1001" +ERROR: invalid input syntax for type numeric: "1E+1001" LINE 1: SELECT NAME_CONST('test', 1E+1001); ^ -CONTEXT: referenced column: name_const - test --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - SELECT NAME_CONST('test', '9999-99-99 99:99:99'::time); --ERROR WARNING: date/time field value out of range: "9999-99-99 99:99:99" LINE 1: SELECT NAME_CONST('test', '9999-99-99 99:99:99'::time); diff --git a/contrib/dolphin/plugin_parser/scan.l b/contrib/dolphin/plugin_parser/scan.l index f4244148a..c434b53dd 100644 --- a/contrib/dolphin/plugin_parser/scan.l +++ b/contrib/dolphin/plugin_parser/scan.l @@ -134,6 +134,7 @@ static void dealSahrpComment(core_yyscan_t yyscanner); extern int core_yyget_column(yyscan_t yyscanner); extern void core_yyset_column(int column_no, yyscan_t yyscanner); #ifdef DOLPHIN +static void interceptReal(char* str, core_yyscan_t yyscanner); static char* CopyIdentifier(const char* ident, int len, bool warn); #endif %} @@ -1246,6 +1247,9 @@ other . {real} { SET_YYLLOC(); yylval->str = pstrdup(yytext); +#ifdef DOLPHIN + interceptReal(yylval->str, yyscanner); +#endif yyextra->is_hint_str = false; return FCONST; } @@ -2306,6 +2310,26 @@ dealSahrpComment(core_yyscan_t yyscanner) } } +static void interceptReal(char* str, core_yyscan_t yyscanner) +{ + char* tmp; + long exponent; + char* endptr = NULL; + + tmp = str; + while (*tmp != 'e' && *tmp != 'E') { + tmp++; + } + + tmp++; + exponent = strtol(tmp, &endptr, 10); + if (exponent > NUMERIC_MAX_PRECISION || exponent < -NUMERIC_MAX_PRECISION) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type numeric: \"%s\"", str), + lexer_errposition())); +} + static char* CopyIdentifier(const char* ident, int len, bool warn) { char* result = NULL; -- Gitee From 2b16d6897dc4af5d6b3431c588997d55943dedb0 Mon Sep 17 00:00:00 2001 From: huangjiajun <1148612505@qq.com> Date: Mon, 25 Dec 2023 15:37:41 +0800 Subject: [PATCH 151/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dyear=E6=97=A0?= =?UTF-8?q?=E6=B3=95=E8=BD=AC=E6=8D=A2=E6=88=90boolean=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98,=E5=B9=B6=E4=BF=AE=E6=94=B9=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=94=A8=E4=BE=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../json_operator_test.out | 68 +++++++------------ .../rollback_script/dolphin--3.0--2.0.sql | 5 +- .../json_operator_test.sql | 16 ++--- .../upgrade_script/dolphin--2.0--3.0.sql | 9 ++- 4 files changed, 46 insertions(+), 52 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index c0b26d56d..3c8c26855 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -427,13 +427,13 @@ CREATE TABLE test_json_type AS SELECT `year` << `json` AS `year<>json | 2023 year<>year | 0 json< Date: Mon, 25 Dec 2023 18:46:30 +0800 Subject: [PATCH 152/434] =?UTF-8?q?=E5=88=A0=E9=99=A4=E4=B8=8D=E5=BF=85?= =?UTF-8?q?=E8=A6=81=E7=9A=84=E7=BA=A6=E6=9D=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 38 ++++++++++++----------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index ab4fd1e4a..e10409567 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -28,10 +28,8 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› -- TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; - 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› - # **2.** TimescaleDB安装方法 @@ -115,29 +113,19 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, # **3.** TimescaleDBå¯ç”¨æŽ¥å£ -| åºå· | 接å£åç§° | 说明 | -| ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| 1 | chunk_relation_size | 获å–超表å—çš„å…³ç³»å¤§å° | -| 2 | chunk_relation_size_pretty | 获å–超表å—çš„å…³ç³»å¤§å° | -| 3 | drop_chunks | 删除时间范围完全在指定时间之å‰ï¼ˆæˆ–之åŽï¼‰çš„æ•°æ®åŒºå—,跨所有超级表或针对特定超级表è¿è¡Œã€‚ | -| 4 | hypertable_relation_size | 获å–è¶…çº§è¡¨çš„å…³ç³»å¤§å° | -| 5 | hypertable_relation_size_pretty | 获å–è¶…çº§è¡¨çš„å…³ç³»å¤§å° | -| 6 | indexes_relation_size | 获å–è¶…è¡¨ä¸Šçš„ç´¢å¼•å¤§å° | -| 7 | indexes_relation_size_pretty | 获å–è¶…è¡¨ä¸Šçš„ç´¢å¼•å¤§å° | -| 8 | set_number_partitions | 设置超表上空间维度的分区(片)数 | -| 9 | show_chunks | 获å–与超表关è”的区å—列表 | -| 10 | add_dimension()空间分区 | å‘超表添加é¢å¤–的分区维度。选择作为维度的列å¯ä»¥ä½¿ç”¨é—´éš”分区或哈希分区。 | -| 11 | attach_tablespace()将表空间附加到超表 | 将表空间附加到超表并使用它æ¥å­˜å‚¨å— | -| 12 | create_hypertable()创建超表 | 创建超表 | -| 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | -| 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | -| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | -| 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | -| 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | -| 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | -| 19 | timescaledb_information.license获å–许å¯ä¿¡æ¯ | èŽ·å–æœ‰å…³å½“å‰è®¸å¯è¯çš„ä¿¡æ¯ | -| 20 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | - +| åºå· | 接å£åç§° | 说明 | +|:----:|:------------------------------------------------------------:|:---------------------------------------------------------------------------------:| +| 1 | add_dimension()空间分区 | å‘超表添加é¢å¤–的分区维度。选择作为维度的列å¯ä»¥ä½¿ç”¨é—´éš”分区或哈希分区。 | +| 2 | attach_tablespace()将表空间附加到超表 | 将表空间附加到超表并使用它æ¥å­˜å‚¨å— | +| 3 | create_hypertable()创建超表 | 创建超表 | +| 4 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | +| 5 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | +| 6 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | +| 7 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | +| 8 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | +| 9 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | +| 10 | timescaledb_information.license获å–许å¯ä¿¡æ¯ | èŽ·å–æœ‰å…³å½“å‰è®¸å¯è¯çš„ä¿¡æ¯ | +| 11 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | # **4.** TimescaleDBä¸å¯ç”¨æŽ¥å£ -- Gitee From e18e14fd9ac12eae83f03b9170b03e9729feece3 Mon Sep 17 00:00:00 2001 From: 08ming <754041231@qq.com> Date: Mon, 25 Dec 2023 18:48:54 +0800 Subject: [PATCH 153/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=89=A7=E8=A1=8Csql?= =?UTF-8?q?=E8=AF=AD=E5=8F=A5=E7=9A=84=E8=BF=87=E7=A8=8B=E4=B8=AD=E4=BC=9A?= =?UTF-8?q?=E5=87=BA=E7=8E=B0=E5=8D=A1=E4=BD=8F=E7=9A=84=E6=83=85=E5=86=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/timescaledb/README.md | 38 ++++++++-------------- contrib/timescaledb/src/base64_compat.cpp | 1 - contrib/timescaledb/src/cache.h | 23 +++++++++++++ contrib/timescaledb/src/chunk_adaptive.cpp | 1 - contrib/timescaledb/src/compat.h | 17 ++++++++++ contrib/timescaledb/src/event_trigger.cpp | 2 -- contrib/timescaledb/src/extension.cpp | 1 - contrib/timescaledb/src/func_cache.cpp | 1 - contrib/timescaledb/src/license_guc.cpp | 6 ---- contrib/timescaledb/src/tsdb.cpp | 18 ++++++++++ contrib/timescaledb/src/tsdb_dsm.cpp | 5 +-- 11 files changed, 72 insertions(+), 41 deletions(-) diff --git a/contrib/timescaledb/README.md b/contrib/timescaledb/README.md index ab4fd1e4a..e10409567 100644 --- a/contrib/timescaledb/README.md +++ b/contrib/timescaledb/README.md @@ -28,10 +28,8 @@ TimescaleDB能够以æ’件化的形å¼ï¼Œå¾ˆæ–¹ä¾¿çš„å¤„ç†æ—¶åºæ•°æ®ï¼Œéšç€ - 䏿”¯æŒéžç¼–译安装版本; - ç›®å‰TimescaleDB安装之åŽï¼Œä¸æ”¯æŒåˆ é™¤TimescaleDBæ’ä»¶ï¼› - TimescaleDBæ’ä»¶ä¾èµ–于public schemaï¼Œå› æ­¤ä¸æ”¯æŒä½¿ç”¨drop schema的方å¼åˆ é™¤public schemaï¼› -- TimescaleDB创建的超表需è¦ä½¿ç”¨drop table CASCADE;进行删除,ä¼šåŒæ—¶åˆ é™¤å…¶é™„加表; - 在ä¸åŒæ•°æ®åº“创建æ’件需è¦é‡å¯æ•°æ®åº“ï¼› - # **2.** TimescaleDB安装方法 @@ -115,29 +113,19 @@ SELECT time_bucket('15 minutes', time) AS fifteen_min, # **3.** TimescaleDBå¯ç”¨æŽ¥å£ -| åºå· | 接å£åç§° | 说明 | -| ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| 1 | chunk_relation_size | 获å–超表å—çš„å…³ç³»å¤§å° | -| 2 | chunk_relation_size_pretty | 获å–超表å—çš„å…³ç³»å¤§å° | -| 3 | drop_chunks | 删除时间范围完全在指定时间之å‰ï¼ˆæˆ–之åŽï¼‰çš„æ•°æ®åŒºå—,跨所有超级表或针对特定超级表è¿è¡Œã€‚ | -| 4 | hypertable_relation_size | 获å–è¶…çº§è¡¨çš„å…³ç³»å¤§å° | -| 5 | hypertable_relation_size_pretty | 获å–è¶…çº§è¡¨çš„å…³ç³»å¤§å° | -| 6 | indexes_relation_size | 获å–è¶…è¡¨ä¸Šçš„ç´¢å¼•å¤§å° | -| 7 | indexes_relation_size_pretty | 获å–è¶…è¡¨ä¸Šçš„ç´¢å¼•å¤§å° | -| 8 | set_number_partitions | 设置超表上空间维度的分区(片)数 | -| 9 | show_chunks | 获å–与超表关è”的区å—列表 | -| 10 | add_dimension()空间分区 | å‘超表添加é¢å¤–的分区维度。选择作为维度的列å¯ä»¥ä½¿ç”¨é—´éš”分区或哈希分区。 | -| 11 | attach_tablespace()将表空间附加到超表 | 将表空间附加到超表并使用它æ¥å­˜å‚¨å— | -| 12 | create_hypertable()创建超表 | 创建超表 | -| 13 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | -| 14 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | -| 15 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | -| 16 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | -| 17 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | -| 18 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | -| 19 | timescaledb_information.license获å–许å¯ä¿¡æ¯ | èŽ·å–æœ‰å…³å½“å‰è®¸å¯è¯çš„ä¿¡æ¯ | -| 20 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | - +| åºå· | 接å£åç§° | 说明 | +|:----:|:------------------------------------------------------------:|:---------------------------------------------------------------------------------:| +| 1 | add_dimension()空间分区 | å‘超表添加é¢å¤–的分区维度。选择作为维度的列å¯ä»¥ä½¿ç”¨é—´éš”分区或哈希分区。 | +| 2 | attach_tablespace()将表空间附加到超表 | 将表空间附加到超表并使用它æ¥å­˜å‚¨å— | +| 3 | create_hypertable()创建超表 | 创建超表 | +| 4 | detach_tablespace()从一个或多个超级表中分离表空间。 | 从一个或多个超级表中分离表空间 | +| 5 | detach_tablespaces()从超表中分离所有表空间。 | 从超表中分离所有表空间 | +| 6 | set_chunk_time_interval()设置超表上的chunk_time_interval。 | è®¾ç½®è¶…è¡¨ä¸Šçš„åŒºå—æ—¶é—´é—´éš”,默认å•ä½ä¸ºå¤© | +| 7 | set_integer_now_funcï¼ˆï¼‰è®¾ç½®æ•´æ•°è¶…è¡¨å½“å‰æ—¶é—´å‡½æ•° | åªé€‚用于整数类超表,它设置一个函数,该函数以时间列的å•ä½è¿”回nowï¼ˆï¼‰å€¼ï¼ˆå½“å‰æ—¶é—´ï¼‰ | +| 8 | time_bucket()函数 | time_bucket用于分æžä»»æ„æ—¶é—´é—´éš”çš„æ•°æ® | +| 9 | timescaledb_information.hypertable获å–è¶…è¡¨ä¿¡æ¯ | 获å–è¶…è¡¨çš„ç›¸å…³ä¿¡æ¯æˆ–者查看一个表是å¦ä¸ºè¶…表 | +| 10 | timescaledb_information.license获å–许å¯ä¿¡æ¯ | èŽ·å–æœ‰å…³å½“å‰è®¸å¯è¯çš„ä¿¡æ¯ | +| 11 | show_tablespaces()将显示附加到超表的表空间。 | 将显示附加到超表的表空间。 | # **4.** TimescaleDBä¸å¯ç”¨æŽ¥å£ diff --git a/contrib/timescaledb/src/base64_compat.cpp b/contrib/timescaledb/src/base64_compat.cpp index 725b4caff..1568dd245 100644 --- a/contrib/timescaledb/src/base64_compat.cpp +++ b/contrib/timescaledb/src/base64_compat.cpp @@ -24,7 +24,6 @@ * BASE64 */ -static const char _base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; static const int8 b64lookup[128] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, diff --git a/contrib/timescaledb/src/cache.h b/contrib/timescaledb/src/cache.h index 4906b60a1..1aa419387 100644 --- a/contrib/timescaledb/src/cache.h +++ b/contrib/timescaledb/src/cache.h @@ -85,6 +85,29 @@ typedef struct tsdb_session_context { char *tsdb_ts_guc_license_key; bool tsdb_loaded; bool tsdb_loader_present; + + char tsdb_base64[64]; + + int64 tsdb_fixed_memory_cache_size; + FmgrInfo tsdb_ddl_commands_fmgrinfo; + FmgrInfo tsdb_dropped_objects_fmgrinfo; + + Oid tsdb_extension_proxy_oid; + HTAB *tsdb_func_hash; + bool tsdb_downgrade_to_apache_enabled; + void *tsdb_tsl_handle; + PGFunction tsdb_tsl_validate_license_fn; + PGFunction tsdb_tsl_startup_fn; + bool tsdb_can_load; + GucSource tsdb_load_source; + + bool tsdb_dsm_init_done; + + dsm_handle tsdb_dsm_control_handle; + + Size tsdb_dsm_control_mapped_size; + void *tsdb_dsm_control_impl_private; + } tsdb_session_context; extern void ts_cache_init(Cache *cache); diff --git a/contrib/timescaledb/src/chunk_adaptive.cpp b/contrib/timescaledb/src/chunk_adaptive.cpp index 57354e3bb..508031896 100644 --- a/contrib/timescaledb/src/chunk_adaptive.cpp +++ b/contrib/timescaledb/src/chunk_adaptive.cpp @@ -32,7 +32,6 @@ /* This can be set to a positive number (and non-zero) value from tests to * simulate memory cache size. This makes it possible to run tests * deterministically. */ -static int64 fixed_memory_cache_size = -1; /* * Takes a PostgreSQL text representation of data (e.g., 40MB) and converts it diff --git a/contrib/timescaledb/src/compat.h b/contrib/timescaledb/src/compat.h index 88da9d1ce..f2d861b52 100644 --- a/contrib/timescaledb/src/compat.h +++ b/contrib/timescaledb/src/compat.h @@ -66,6 +66,23 @@ #define on_level (get_session_context()->tsdb_on_level) #define telemetry_level_options (get_session_context()->tsdb_telemetry_level_options) #define tsdb_first_start (get_session_context(true)->tsdb_first_start) +#define _base64 (get_session_context()->tsdb_base64) +#define func_hash (get_session_context()->tsdb_func_hash) +#define fixed_memory_cache_size (get_session_context()->tsdb_fixed_memory_cache_size) +#define ddl_commands_fmgrinfo (get_session_context()->tsdb_ddl_commands_fmgrinfo) +#define dropped_objects_fmgrinfo (get_session_context()->tsdb_dropped_objects_fmgrinfo) +#define extension_proxy_oid (get_session_context()->tsdb_extension_proxy_oid) +#define downgrade_to_apache_enabled (get_session_context()->tsdb_downgrade_to_apache_enabled) +#define tsl_handle (get_session_context()->tsdb_tsl_handle) +#define tsl_validate_license_fn (get_session_context()->tsdb_tsl_validate_license_fn) +#define tsl_startup_fn (get_session_context()->tsdb_tsl_startup_fn) +#define can_load (get_session_context()->tsdb_can_load) +#define load_source (get_session_context()->tsdb_load_source) + +#define dsm_init_done (get_session_context()->tsdb_dsm_init_done) +#define dsm_control_handle (get_session_context()->tsdb_dsm_control_handle) +#define dsm_control_mapped_size (get_session_context()->tsdb_dsm_control_mapped_size) +#define dsm_control_impl_private (get_session_context()->tsdb_dsm_control_impl_private) #define loaded (get_session_context()->tsdb_loaded) #define loader_present (get_session_context()->tsdb_loader_present) diff --git a/contrib/timescaledb/src/event_trigger.cpp b/contrib/timescaledb/src/event_trigger.cpp index 984b02cfa..8eec71a70 100644 --- a/contrib/timescaledb/src/event_trigger.cpp +++ b/contrib/timescaledb/src/event_trigger.cpp @@ -22,8 +22,6 @@ /* Function manager info for the event "pg_event_trigger_ddl_commands", which is * used to retrieve information on executed DDL commands in an event * trigger. The function manager info is initialized on extension load. */ -static FmgrInfo ddl_commands_fmgrinfo; -static FmgrInfo dropped_objects_fmgrinfo; /* * Get a list of executed DDL commands in an event trigger. diff --git a/contrib/timescaledb/src/extension.cpp b/contrib/timescaledb/src/extension.cpp index 976d46a74..0fca7f2fe 100644 --- a/contrib/timescaledb/src/extension.cpp +++ b/contrib/timescaledb/src/extension.cpp @@ -32,7 +32,6 @@ #define TS_UPDATE_SCRIPT_CONFIG_VAR "timescaledb.update_script_stage" #define POST_UPDATE "post" -static Oid extension_proxy_oid = InvalidOid; /* * ExtensionState tracks the state of extension metadata in the backend. diff --git a/contrib/timescaledb/src/func_cache.cpp b/contrib/timescaledb/src/func_cache.cpp index cd3554793..f7a0f7f5c 100644 --- a/contrib/timescaledb/src/func_cache.cpp +++ b/contrib/timescaledb/src/func_cache.cpp @@ -296,7 +296,6 @@ static FuncInfo funcinfo[] = { #define _MAX_CACHE_FUNCTIONS (sizeof(funcinfo) / sizeof(funcinfo[0])) -static HTAB *func_hash = NULL; static Oid proc_get_oid(HeapTuple tuple) diff --git a/contrib/timescaledb/src/license_guc.cpp b/contrib/timescaledb/src/license_guc.cpp index 449c076c9..53931dd32 100644 --- a/contrib/timescaledb/src/license_guc.cpp +++ b/contrib/timescaledb/src/license_guc.cpp @@ -23,12 +23,6 @@ TS_FUNCTION_INFO_V1(ts_license_expiration_time); TS_FUNCTION_INFO_V1(ts_license_edition); TS_FUNCTION_INFO_V1(ts_allow_downgrade_to_apache); -static bool downgrade_to_apache_enabled = false; -static void *tsl_handle = NULL; -static PGFunction tsl_validate_license_fn = NULL; -static PGFunction tsl_startup_fn = NULL; -static bool can_load = false; -static GucSource load_source = PGC_S_DEFAULT; #define TS_LICENSE_APACHE_ONLY_PRINT_TEXT "apache" #define TS_LICENSE_COMMUNITY_PRINT_TEXT "community" diff --git a/contrib/timescaledb/src/tsdb.cpp b/contrib/timescaledb/src/tsdb.cpp index 10fbc6181..5f6e8e755 100644 --- a/contrib/timescaledb/src/tsdb.cpp +++ b/contrib/timescaledb/src/tsdb.cpp @@ -3564,6 +3564,24 @@ void init_session_vars(void) psc->tsdb_loaded = false; psc->tsdb_loader_present = true; + + strcpy(psc->tsdb_base64, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); + + psc->tsdb_fixed_memory_cache_size=-1; + psc->tsdb_extension_proxy_oid = InvalidOid; + psc->tsdb_func_hash = NULL; + + psc->tsdb_downgrade_to_apache_enabled = false; + psc->tsdb_tsl_handle = NULL; + psc->tsdb_tsl_validate_license_fn = NULL; + psc->tsdb_tsl_startup_fn = NULL; + psc->tsdb_can_load = false; + psc->tsdb_load_source = PGC_S_DEFAULT; + + psc->tsdb_dsm_init_done = false; + + psc->tsdb_dsm_control_mapped_size = 0; + psc->tsdb_dsm_control_impl_private = NULL; _constraint_aware_append_init(); diff --git a/contrib/timescaledb/src/tsdb_dsm.cpp b/contrib/timescaledb/src/tsdb_dsm.cpp index bc8a501d0..26aefe29b 100644 --- a/contrib/timescaledb/src/tsdb_dsm.cpp +++ b/contrib/timescaledb/src/tsdb_dsm.cpp @@ -29,6 +29,7 @@ #include #include #include +#include "compat.h" #ifndef WIN32 #include #endif @@ -101,7 +102,6 @@ static bool dsm_control_segment_sane(dsm_control_header *control, static uint64 dsm_control_bytes_needed(uint32 nitems); /* Has this backend initialized the dynamic shared memory system yet? */ -static bool dsm_init_done = false; /* * List of dynamic shared memory segments used by this backend. @@ -129,10 +129,7 @@ static dlist_head dsm_segment_list = DLIST_STATIC_INIT(dsm_segment_list); * reference counted; instead, it lasts for the postmaster's entire * life cycle. For simplicity, it doesn't have a dsm_segment object either. */ -static dsm_handle dsm_control_handle; static dsm_control_header *dsm_control; -static Size dsm_control_mapped_size = 0; -static void *dsm_control_impl_private = NULL; /* * Start up the dynamic shared memory system. -- Gitee From 610d8d205f8a4c8159f36e50922c93bafaabdbd0 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Wed, 27 Dec 2023 12:55:31 +0800 Subject: [PATCH 154/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5plpgsql=5Fexec=5Ffunc?= =?UTF-8?q?tion=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/case_sensitive_test/schema_test.out | 12 ++++++++---- contrib/dolphin/expected/test_shows_3.out | 3 ++- contrib/dolphin/include/plugin_utils/plpgsql.h | 3 ++- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/contrib/dolphin/expected/case_sensitive_test/schema_test.out b/contrib/dolphin/expected/case_sensitive_test/schema_test.out index e8483e0af..335d0f734 100644 --- a/contrib/dolphin/expected/case_sensitive_test/schema_test.out +++ b/contrib/dolphin/expected/case_sensitive_test/schema_test.out @@ -28,6 +28,7 @@ WARNING: TEST is not a valid encoding name. default value set Test_Schema_User | Test_Schema_User Test_Schema_User_bak | Test_Schema_User_bak --? blockchain | .* +--? coverage | .* --? cstore | .* --? db4ai | .* --? dbe_perf | .* @@ -45,7 +46,7 @@ WARNING: TEST is not a valid encoding name. default value set --? test_2 | .* --? test_3 | .* --? test_temp | .* -(24 rows) +(25 rows) ALTER SCHEMA test_1 WITHOUT BLOCKCHAIN; ALTER SCHEMA Test_1 WITHOUT BLOCKCHAIN; @@ -72,6 +73,7 @@ ALTER SCHEMA Test_1_BAK OWNER TO Test_Schema_User; Test_Schema_User | Test_Schema_User Test_Schema_User_bak | Test_Schema_User_bak --? blockchain | .* +--? coverage | .* --? cstore | .* --? db4ai | .* --? dbe_perf | .* @@ -89,7 +91,7 @@ ALTER SCHEMA Test_1_BAK OWNER TO Test_Schema_User; --? test_2 | .* --? test_3 | .* --? test_temp | .* -(24 rows) +(25 rows) SET SEARCH_PATH TO test_1, Test1; SHOW SEARCH_PATH; @@ -124,6 +126,7 @@ DROP SCHEMA test_1_bak, Test_1_BAK; Test_Schema_User | Test_Schema_User Test_Schema_User_bak | Test_Schema_User_bak --? blockchain | .* +--? coverage | .* --? cstore | .* --? db4ai | .* --? dbe_perf | .* @@ -140,7 +143,7 @@ DROP SCHEMA test_1_bak, Test_1_BAK; --? test_2 | .* --? test_3 | .* --? test_temp | .* -(22 rows) +(23 rows) -- dolphin_any_name DROP SCHEMA IF EXISTS Test_Schema_User CASCADE; @@ -155,6 +158,7 @@ CREATE SCHEMA AUTHORIZATION Test_Schema_User; Test_Schema_User | Test_Schema_User Test_Schema_User_bak | Test_Schema_User_bak --? blockchain | .* +--? coverage | .* --? cstore | .* --? db4ai | .* --? dbe_perf | .* @@ -171,7 +175,7 @@ CREATE SCHEMA AUTHORIZATION Test_Schema_User; --? test_2 | .* --? test_3 | .* --? test_temp | .* -(22 rows) +(23 rows) -- dolphin_qualified_name CREATE TABLE test_1_table (a int); diff --git a/contrib/dolphin/expected/test_shows_3.out b/contrib/dolphin/expected/test_shows_3.out index cca078b05..959f3ec57 100644 --- a/contrib/dolphin/expected/test_shows_3.out +++ b/contrib/dolphin/expected/test_shows_3.out @@ -5,6 +5,7 @@ show databases; -------------------- information_schema blockchain + coverage cstore db4ai db_show_3 @@ -21,7 +22,7 @@ show databases; sc snapshot sqladvisor -(18 rows) +(19 rows) create schema aa1; create schema aa2; diff --git a/contrib/dolphin/include/plugin_utils/plpgsql.h b/contrib/dolphin/include/plugin_utils/plpgsql.h index 583bc8090..2740008c1 100644 --- a/contrib/dolphin/include/plugin_utils/plpgsql.h +++ b/contrib/dolphin/include/plugin_utils/plpgsql.h @@ -1854,7 +1854,8 @@ extern THR_LOCAL PLpgSQL_execstate* plpgsql_estate; */ #define BULK_COLLECT_MAX ((Size)0x3FFFFFF) /* maximum number of rows can be bulk collected (by 3FFFFFFF/16) */ -extern Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, bool dynexec_anonymous_block); +extern Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, + bool dynexec_anonymous_block, int* coverage = NULL); extern Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, char* source_text); extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata); extern void plpgsql_xact_cb(XactEvent event, void* arg); -- Gitee From afd41c9dd845385d755cca50a434d3c1ab5cfa4c Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Wed, 27 Dec 2023 18:00:25 +0800 Subject: [PATCH 155/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbinary=E3=80=81blob?= =?UTF-8?q?=E7=AD=89=E4=BC=A0=E8=BE=93=E5=88=B0MySQL=E7=9A=84JDBC=E4=B8=8A?= =?UTF-8?q?=E4=BF=A1=E6=81=AF=E5=A4=B1=E7=9C=9F=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/bxconst_test.out | 20 +- contrib/dolphin/expected/charset_gbk_b_db.out | 412 ++++++------- .../dolphin/expected/charset_utf8mb4_b_db.out | 582 +++++++++--------- contrib/dolphin/expected/conv_cast_test.out | 4 +- contrib/dolphin/expected/db_b_parser2.out | 12 +- .../ignore_not_null_constraints.out | 14 +- contrib/dolphin/expected/kwlist.out | 6 +- .../json_cmp_operator_test.out | 24 +- .../string_func_test/db_b_compress_test.out | 6 +- contrib/dolphin/expected/test_binary.out | 186 +++--- contrib/dolphin/expected/test_blob.out | 12 +- contrib/dolphin/expected/test_condition.out | 96 +-- .../include/plugin_protocol/bytestream.h | 5 +- .../dolphin/output/binary_export_test.source | 21 +- contrib/dolphin/plugin_protocol/printtup.cpp | 15 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 10 +- 16 files changed, 713 insertions(+), 712 deletions(-) diff --git a/contrib/dolphin/expected/bxconst_test.out b/contrib/dolphin/expected/bxconst_test.out index e11954d5d..84f5dddaa 100644 --- a/contrib/dolphin/expected/bxconst_test.out +++ b/contrib/dolphin/expected/bxconst_test.out @@ -45,10 +45,10 @@ select * from t_bit; (2 rows) select * from t_bin; - a ------------- - 14392\0 - 76\0\0\0\0 + a +---------------- + \x313433393200 + \x373600000000 (2 rows) @@ -366,7 +366,7 @@ create table t_bin(a binary(6)); select b'11100000111000'; ?column? ---------- - 88 + \x3838 (1 row) select pg_typeof(b'11100000111000'); @@ -378,7 +378,7 @@ select pg_typeof(b'11100000111000'); select x'4c'; ?column? ---------- - L + \x4c (1 row) select pg_typeof(x'4c'); @@ -399,10 +399,10 @@ select * from t_bit; (2 rows) select * from t_bin; - a -------------- - 88\0\0\0\0 - L\0\0\0\0\0 + a +---------------- + \x383800000000 + \x4c0000000000 (2 rows) diff --git a/contrib/dolphin/expected/charset_gbk_b_db.out b/contrib/dolphin/expected/charset_gbk_b_db.out index d8bfe9ad0..de3a73464 100644 --- a/contrib/dolphin/expected/charset_gbk_b_db.out +++ b/contrib/dolphin/expected/charset_gbk_b_db.out @@ -110,9 +110,9 @@ LINE 1: SELECT CAST('高斯' AS binary) COLLATE "utf8mb4_unicode_ci"... ^ CONTEXT: referenced column: binary SELECT CAST('高斯' AS binary) COLLATE "binary"; - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST('E9AB98E696AF' AS blob) COLLATE "utf8mb4_unicode_ci"; -- ERROR @@ -146,33 +146,33 @@ LINE 1: SELECT '高斯' COLLATE "binary"; ^ -- 中文 const charset SELECT CAST('高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_binary'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary --------- - ¸ß˹ + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT _binary'高斯'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4'高斯'; @@ -663,9 +663,9 @@ LINE 1: ... ^ -- -- -- diff charset & implicit collation SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------+------------------ - 高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +----------------------------+------------------ + 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); @@ -687,9 +687,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); (1 row) SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------+------------------ - 高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +----------------------------+------------------ + 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR @@ -701,21 +701,21 @@ ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chin LINE 1: ...30'高斯DB' , '高斯DB') result, pg_collation_for(result); ^ SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------+------------------ - 高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +----------------------------+------------------ + 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------+------------------ - 高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +----------------------------+------------------ + \xe9ab98e696af4442高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------+------------------ - 高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +----------------------------+------------------ + \xe9ab98e696af4442高斯DB | gbk_chinese_ci (1 row) -- -- -- explicit & implicit @@ -786,9 +786,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - ¸ß˹DB¸ß˹DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------------------+-------------------- + ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); @@ -810,9 +810,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_co (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+------------------ - ¸ß˹DB¸ß˹DB | utf8mb4_bin + result | pg_collation_for +--------------------------+------------------ + ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_bin (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); @@ -864,28 +864,28 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_co (1 row) SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - ¸ß˹DB¸ß˹DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------------------+-------------------- + \xe9ab98e696af4442¸ß˹DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------+------------------ - 高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +----------------------------+------------------ + \xe9ab98e696af4442高斯DB | gbk_chinese_ci (1 row) -- -- -- concat 3 args SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 高斯DB高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------------------------+------------------ + \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 高斯DB高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------------------------+------------------ + \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); @@ -901,15 +901,15 @@ SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 高斯DB高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------------------------+------------------ + 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------+-------------------- - ¸ß˹DB高斯DB高斯DB | gb18030_chinese_ci + result | pg_collation_for +----------------------------------+-------------------- + ¸ß˹DB高斯DB\xe9ab98e696af4442 | gb18030_chinese_ci (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR @@ -927,9 +927,9 @@ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _ut (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 高斯DB高斯DB高斯DB | gbk_chinese_ci + result | pg_collation_for +------------------------------------+------------------ + 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) -- -- -- const compare CONCAT @@ -1316,9 +1316,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+---------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+-------------------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 (1 row) SELECT char_length(futf8_bin), @@ -1512,9 +1512,9 @@ LINE 1: SELECT fbin COLLATE gbk_chinese_ci FROM t_diff_charset_colum... ^ CONTEXT: referenced column: fbin SELECT fbin COLLATE 'binary' FROM t_diff_charset_columns; - fbin ----------- - 高斯DB + fbin +-------------------- + \xe9ab98e696af4442 (1 row) SELECT fbin COLLATE "zh_CN.utf8" FROM t_diff_charset_columns; -- ERROR @@ -2812,39 +2812,39 @@ SELECT @id_res,@a_res; SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; -- ERROR ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 + id | a | b +----+--------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+---------- - 2 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -2855,13 +2855,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+---------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (5 rows) DELETE FROM t_charset_utf8mb4 WHERE a='高斯'; @@ -2974,37 +2974,37 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 + id | a | b +----+--------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+---------- - 2 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+----------+---------- - 2 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; @@ -3022,13 +3022,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+---------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (5 rows) DELETE FROM t_charset_gbk WHERE a='高斯'; @@ -3069,33 +3069,33 @@ SHOW collation_connection; -- 中文 SELECT CAST('高斯' AS binary); - binary --------- - 楂樻柉 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_binary'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary --------- - 楂樻柉 + binary +---------------- + \xe9ab98e696af (1 row) SELECT _binary'高斯'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4'高斯'; @@ -3111,9 +3111,9 @@ SELECT _gbk'高斯'; (1 row) SELECT _binary X'E9AB98E696AF'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4 X'E9AB98E696AF'; @@ -3241,43 +3241,43 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+-------- - 2 | 楂樻柉 | 楂樻柉 + id | a | b +----+--------+---------------------- + 2 | 楂樻柉 | \xe6a582e6a8bbe69f89 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+------ - 1 | 高斯 | 高斯 + id | a | b +----+------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------- - 3 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+--------+-------- - 3 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------- - 4 | 楂樻柉DB | 高斯DB - 5 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 5 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------- - 4 | 楂樻柉DB | 高斯DB - 5 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 5 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -3287,14 +3287,14 @@ UPDATE t_charset_utf8mb4 SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 5 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 5 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (6 rows) DELETE FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯'; @@ -3408,51 +3408,51 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+------ - 1 | 高斯 | 高斯 + id | a | b +----+------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------- - 2 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+----------+-------- - 3 | 楂樻柉DB | 高斯DB - 4 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 3 | 楂樻柉DB | \xe9ab98e696af4442 + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------- - 3 | 楂樻柉DB | 高斯DB - 4 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 3 | 楂樻柉DB | \xe9ab98e696af4442 + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------- - 3 | 楂樻柉DB | 高斯DB - 4 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 3 | 楂樻柉DB | \xe9ab98e696af4442 + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+-------- - 5 | 楂樻柉 | 楂樻柉 + id | a | b +----+--------+---------------------- + 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 (1 row) SELECT * FROM t_charset_gbk WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+-------- - 5 | 楂樻柉 | 楂樻柉 + id | a | b +----+--------+---------------------- + 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 (1 row) UPDATE t_charset_gbk SET a='高斯DB', b='高斯DB' @@ -3462,14 +3462,14 @@ UPDATE t_charset_gbk SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 5 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 5 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (6 rows) DELETE FROM t_charset_gbk WHERE a=_utf8mb4'高斯'; @@ -3504,9 +3504,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+-------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+-------------------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 (1 row) SELECT char_length(futf8_bin), diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index 338b9993c..b089bebcb 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -62,13 +62,13 @@ SELECT _gbk'ABCD' = _GbK'ABCD'; SELECT _binary E'', _binary E'' IS NULL; ?column? | ?column? ----------+---------- - | f + \x | f (1 row) SELECT _binary E'\\xe9ab98e696af'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4 E'\\xe9ab98e696af'; @@ -90,13 +90,13 @@ LINE 1: SELECT _utf8mb4 X'\\xe9ab98e696af'; SELECT _binary X'', _binary X'' IS NULL; ?column? | ?column? ----------+---------- - | f + \x | f (1 row) SELECT _binary X'E9AB98E696AF'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4 X'E9AB98E696AF'; @@ -114,19 +114,19 @@ SELECT _gbk X'E9AB98E696AF'; SELECT _binary B'', _binary B'' IS NULL; ?column? | ?column? ----------+---------- - | f + \x | f (1 row) SELECT _binary B'0'; ?column? ---------- - \0 + \x00 (1 row) SELECT _binary B'111010011010101110011000111001101001011010101111'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4 B'111010011010101110011000111001101001011010101111'; @@ -250,9 +250,9 @@ ERROR: COLLATION "gbk_chinese_ci" is not valid for binary type LINE 1: SELECT _binary'高斯' COLLATE gbk_chinese_ci; ^ SELECT _binary'高斯' COLLATE 'binary'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _binary'高斯' COLLATE "zh_CN.utf8"; -- ERROR @@ -340,9 +340,9 @@ LINE 1: SELECT CAST('高斯' AS binary) COLLATE "utf8mb4_unicode_ci"; ^ CONTEXT: referenced column: binary SELECT CAST('高斯' AS binary) COLLATE "binary"; - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST('E9AB98E696AF' AS blob) COLLATE "utf8mb4_unicode_ci"; -- ERROR @@ -437,33 +437,33 @@ WARNING: invalid input syntax for type bigint: "b" DROP TABLE t_collate_expr; -- 中文 const charset SELECT CAST('高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_binary'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary --------- - 楂樻柉 + binary +---------------- + \xe9ab98e696af (1 row) SELECT _binary'高斯'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4'高斯'; @@ -1054,9 +1054,9 @@ LINE 1: ...æ–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); ^ -- -- -- diff charset & implicit collation SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); @@ -1078,9 +1078,9 @@ SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); (1 row) SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR @@ -1094,21 +1094,21 @@ SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); (1 row) SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + \xe9ab98e696af4442高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + \xe9ab98e696af4442高斯DB | utf8mb4_general_ci (1 row) -- -- -- explicit & implicit @@ -1179,9 +1179,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); @@ -1203,9 +1203,9 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_co (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+------------------ - 高斯DB高斯DB | utf8mb4_bin + result | pg_collation_for +--------------------------+------------------ + 高斯DB\xe9ab98e696af4442 | utf8mb4_bin (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); @@ -1257,28 +1257,28 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_co (1 row) SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------------------+-------------------- + \xe9ab98e696af4442高斯DB | utf8mb4_unicode_ci (1 row) SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + \xe9ab98e696af4442高斯DB | utf8mb4_general_ci (1 row) -- -- -- concat 3 args SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------+-------------------- - 高斯DB高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------------+-------------------- + \xe9ab98e696af4442高斯DB高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 楂樻柉DB楂樻柉DB楂樻柉DB | gbk_chinese_ci + result | pg_collation_for +------------------------------------+------------------ + \xe9ab98e696af4442楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); @@ -1294,15 +1294,15 @@ SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------+-------------------- - 高斯DB高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------------+-------------------- + 高斯DB高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------+-------------------- - 高斯DB楂樻柉DB楂樻柉DB | gb18030_chinese_ci + result | pg_collation_for +----------------------------------+-------------------- + 高斯DB楂樻柉DB\xe9ab98e696af4442 | gb18030_chinese_ci (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR @@ -1320,9 +1320,9 @@ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _ut (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 楂樻柉DB楂樻柉DB楂樻柉DB | gbk_chinese_ci + result | pg_collation_for +------------------------------------+------------------ + 楂樻柉DB楂樻柉DB\xe9ab98e696af4442 | gbk_chinese_ci (1 row) -- -- -- const compare CONCAT @@ -1709,9 +1709,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+-------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+-------------------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 (1 row) SELECT char_length(futf8_bin), @@ -1905,9 +1905,9 @@ LINE 1: SELECT fbin COLLATE gbk_chinese_ci FROM t_diff_charset_colum... ^ CONTEXT: referenced column: fbin SELECT fbin COLLATE 'binary' FROM t_diff_charset_columns; - fbin --------- - 高斯DB + fbin +-------------------- + \xe9ab98e696af4442 (1 row) SELECT fbin COLLATE "zh_CN.utf8" FROM t_diff_charset_columns; -- ERROR @@ -2197,9 +2197,9 @@ LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- with binary & implicit collation SELECT CONCAT(futf8_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2209,9 +2209,9 @@ SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_cha (1 row) SELECT CONCAT(futf8_uni, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------+-------------------- - 高斯db高斯DB | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯db\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2221,9 +2221,9 @@ SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_cha (1 row) SELECT CONCAT(fgbk_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ------------+-------------------- - ˹DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +-----------------------+-------------------- + ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2233,9 +2233,9 @@ SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_char (1 row) SELECT CONCAT(fgb18030_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ------------+-------------------- - ˹DB高斯DB | utf8mb4_general_ci + result | pg_collation_for +-----------------------+-------------------- + ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci (1 row) SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2245,9 +2245,9 @@ SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_ (1 row) SELECT CONCAT(fbin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------+-------------------- - 高斯DBE9AB98E696AF | utf8mb4_general_ci + result | pg_collation_for +--------------------------------+-------------------- + \xe9ab98e696af4442E9AB98E696AF | utf8mb4_general_ci (1 row) -- -- concat column and @uservar @@ -3164,9 +3164,9 @@ ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_c... ^ SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯db | utf8mb4_general_ci + result | pg_collation_for +--------------------------+-------------------- + \xe9ab98e696af4442高斯db | utf8mb4_general_ci (1 row) -- -- test explicit collate on blob result @@ -3735,9 +3735,9 @@ ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "bi... ^ SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, pg_collation_for(result); - result | pg_collation_for ---------------+-------------------- - 高斯DB高斯DB | utf8mb4_unicode_ci + result | pg_collation_for +--------------------------+-------------------- + 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci (1 row) SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR @@ -3881,43 +3881,43 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+-------- - 2 | 楂樻柉 | 楂樻柉 + id | a | b +----+--------+---------------------- + 2 | 楂樻柉 | \xe6a582e6a8bbe69f89 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+------ - 1 | 高斯 | 高斯 + id | a | b +----+------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------- - 3 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+--------+-------- - 3 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------- - 4 | 楂樻柉DB | 高斯DB - 5 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 5 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------- - 4 | 楂樻柉DB | 高斯DB - 5 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 5 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -3927,14 +3927,14 @@ UPDATE t_charset_utf8mb4 SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 5 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 5 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (6 rows) DELETE FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯'; @@ -3948,27 +3948,27 @@ SELECT * FROM t_charset_utf8mb4 ORDER BY id; INSERT INTO t_charset_utf8mb4(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_utf8mb4 SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------- - 7 | 高斯DB | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 7 | 高斯DB | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) INSERT INTO t_charset_utf8mb4(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------- - 7 | 高斯db | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) UPDATE t_charset_utf8mb4 SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+--------+-------- - 7 | 高斯db | 高斯DB - 8 | DB高斯 | DB高斯 + id | a | b +----+--------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | DB高斯 | \x4442e9ab98e696af (2 rows) DROP TABLE IF EXISTS t_charset_utf8mb4; @@ -4075,51 +4075,51 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; - id | a | b -----+------+------ - 1 | 高斯 | 高斯 + id | a | b +----+------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+-------- - 2 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; - id | a | b -----+----------+-------- - 3 | 楂樻柉DB | 高斯DB - 4 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 3 | 楂樻柉DB | \xe9ab98e696af4442 + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯DB' ORDER BY id; - id | a | b -----+----------+-------- - 3 | 楂樻柉DB | 高斯DB - 4 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 3 | 楂樻柉DB | \xe9ab98e696af4442 + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+-------- - 3 | 楂樻柉DB | 高斯DB - 4 | 楂樻柉DB | 高斯DB - 6 | 楂樻柉DB | 高斯DB + id | a | b +----+----------+-------------------- + 3 | 楂樻柉DB | \xe9ab98e696af4442 + 4 | 楂樻柉DB | \xe9ab98e696af4442 + 6 | 楂樻柉DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+-------- - 5 | 楂樻柉 | 楂樻柉 + id | a | b +----+--------+---------------------- + 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 (1 row) SELECT * FROM t_charset_gbk WHERE a='楂樻柉' ORDER BY id; - id | a | b -----+--------+-------- - 5 | 楂樻柉 | 楂樻柉 + id | a | b +----+--------+---------------------- + 5 | 楂樻柉 | \xe6a582e6a8bbe69f89 (1 row) UPDATE t_charset_gbk SET a='高斯DB', b='高斯DB' @@ -4129,14 +4129,14 @@ UPDATE t_charset_gbk SET a=_gbk'高斯DB', b=(_gbk'高斯DB')::varbinary(16) UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_gbk'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 5 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 5 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (6 rows) DELETE FROM t_charset_gbk WHERE a=_utf8mb4'高斯'; @@ -4151,27 +4151,27 @@ SELECT * FROM t_charset_gbk ORDER BY id; INSERT INTO t_charset_gbk(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_gbk SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------- - 7 | 高斯DB | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 7 | 高斯DB | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) INSERT INTO t_charset_gbk(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------- - 7 | 高斯db | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+--------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) UPDATE t_charset_gbk SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+--------+-------- - 7 | 高斯db | 高斯DB - 8 | DB高斯 | DB高斯 + id | a | b +----+--------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | DB高斯 | \x4442e9ab98e696af (2 rows) DROP TABLE IF EXISTS t_charset_gbk; @@ -4633,33 +4633,33 @@ SHOW collation_connection; -- 中文 SELECT CAST('高斯' AS binary); - binary --------- - ¸ß˹ + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_binary'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_utf8mb4'高斯' AS binary); - binary --------- - ¸ß˹ + binary +---------------- + \xe9ab98e696af (1 row) SELECT CAST(_gbk'高斯' AS binary); - binary --------- - 高斯 + binary +---------------- + \xe9ab98e696af (1 row) SELECT _binary'高斯'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4'高斯'; @@ -4675,9 +4675,9 @@ SELECT _gbk'高斯'; (1 row) SELECT _binary X'E9AB98E696AF'; - ?column? ----------- - 高斯 + ?column? +---------------- + \xe9ab98e696af (1 row) SELECT _utf8mb4 X'E9AB98E696AF'; @@ -4805,39 +4805,39 @@ SELECT @id_res,@a_res; SELECT * FROM t_charset_utf8mb4 WHERE a='楂樻柉' ORDER BY id; -- ERROR ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 + id | a | b +----+--------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+---------- - 2 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_utf8mb4 WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_utf8mb4 WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) UPDATE t_charset_utf8mb4 SET a='高斯DB', b='高斯DB' @@ -4848,13 +4848,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_utf8mb4 SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+---------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (5 rows) DELETE FROM t_charset_utf8mb4 WHERE a='高斯'; @@ -4868,27 +4868,27 @@ SELECT * FROM t_charset_utf8mb4 ORDER BY id; INSERT INTO t_charset_utf8mb4(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_utf8mb4 SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+---------- - 7 | 高斯DB | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 7 | 高斯DB | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) INSERT INTO t_charset_utf8mb4(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+---------- - 7 | 高斯db | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) UPDATE t_charset_utf8mb4 SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_utf8mb4 ORDER BY id; - id | a | b -----+----------+---------- - 7 | 高斯db | 高斯DB - 8 | DB高斯 | DB高斯 + id | a | b +----+----------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | DB高斯 | \x4442e9ab98e696af (2 rows) DROP TABLE IF EXISTS t_charset_utf8mb4; @@ -4994,37 +4994,37 @@ SELECT @id_res,@a_res; -- -- -- -- test condition SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯' ORDER BY id; - id | a | b -----+--------+-------- - 1 | 高斯 | 高斯 + id | a | b +----+--------+---------------- + 1 | 高斯 | \xe9ab98e696af (1 row) SELECT * FROM t_charset_gbk WHERE a=_gbk'高斯db' COLLATE gbk_chinese_ci ORDER BY id; - id | a | b -----+----------+---------- - 2 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_gbk WHERE a=_binary'高斯DB' ORDER BY id; -- ERROR - id | a | b -----+----------+---------- - 2 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 2 | 高斯DB | \xe9ab98e696af4442 (1 row) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯DB' ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯db' COLLATE utf8mb4_unicode_ci ORDER BY id; - id | a | b -----+--------+---------- - 3 | ¸ß˹DB | 高斯DB - 4 | ¸ß˹DB | 高斯DB - 6 | ¸ß˹DB | 高斯DB + id | a | b +----+--------+-------------------- + 3 | ¸ß˹DB | \xe9ab98e696af4442 + 4 | ¸ß˹DB | \xe9ab98e696af4442 + 6 | ¸ß˹DB | \xe9ab98e696af4442 (3 rows) SELECT * FROM t_charset_gbk WHERE a=_utf8mb4'高斯' ORDER BY id; @@ -5042,13 +5042,13 @@ ERROR: invalid byte sequence for encoding "GBK": 0x89 0x27 UPDATE t_charset_gbk SET a=_utf8mb4'高斯DB', b=(_utf8mb4'高斯DB')::varbinary(16) WHERE a=_utf8mb4'高斯DB'; SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+---------- - 1 | 高斯 | 高斯 - 2 | 高斯DB | 高斯DB - 3 | 高斯DB | 高斯DB - 4 | 高斯DB | 高斯DB - 6 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 1 | 高斯 | \xe9ab98e696af + 2 | 高斯DB | \xe9ab98e696af4442 + 3 | 高斯DB | \xe9ab98e696af4442 + 4 | 高斯DB | \xe9ab98e696af4442 + 6 | 高斯DB | \xe9ab98e696af4442 (5 rows) DELETE FROM t_charset_gbk WHERE a='高斯'; @@ -5062,27 +5062,27 @@ SELECT * FROM t_charset_gbk ORDER BY id; INSERT INTO t_charset_gbk(a,b) VALUES(CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)); INSERT INTO t_charset_gbk SELECT 0, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+---------- - 7 | 高斯DB | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 7 | 高斯DB | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) INSERT INTO t_charset_gbk(id,a,b) VALUES(7, CONCAT('高斯','DB'), (CONCAT('高斯','DB'))::varbinary(16)) ON DUPLICATE KEY UPDATE a=CONCAT('高斯','db'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+---------- - 7 | 高斯db | 高斯DB - 8 | 高斯DB | 高斯DB + id | a | b +----+----------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | 高斯DB | \xe9ab98e696af4442 (2 rows) UPDATE t_charset_gbk SET a=CONCAT('DB','高斯'), b=(CONCAT('DB','高斯'))::varbinary(16) WHERE a=CONCAT('高斯','DB'); SELECT * FROM t_charset_gbk ORDER BY id; - id | a | b -----+----------+---------- - 7 | 高斯db | 高斯DB - 8 | DB高斯 | DB高斯 + id | a | b +----+----------+-------------------- + 7 | 高斯db | \xe9ab98e696af4442 + 8 | DB高斯 | \x4442e9ab98e696af (2 rows) DROP TABLE IF EXISTS t_charset_gbk; @@ -5109,9 +5109,9 @@ SELECT futf8_bin, fgb18030_bin, fgb18030_chi, fbin FROM t_diff_charset_columns; - futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin ------------+-----------+----------+----------+--------------+--------------+---------- - 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB + futf8_bin | futf8_uni | fgbk_bin | fgbk_chi | fgb18030_bin | fgb18030_chi | fbin +-----------+-----------+----------+----------+--------------+--------------+-------------------- + 高斯DB | 高斯db | 高斯DB | 高斯db | 高斯DB | 高斯db | \xe9ab98e696af4442 (1 row) SELECT char_length(futf8_bin), diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index c8e7c8754..90b62f7a6 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -929,7 +929,7 @@ set dolphin.sql_mode = treat_bxconst_as_binary; select b'11100000111000'; ?column? ---------- - 88 + \x3838 (1 row) select conv(b'11100000111000', 10, 8); @@ -953,7 +953,7 @@ select conv(b'11100000111000'::int8, 20, 8); select x'4c'; ?column? ---------- - L + \x4c (1 row) select conv(x'4c', 10, 8); diff --git a/contrib/dolphin/expected/db_b_parser2.out b/contrib/dolphin/expected/db_b_parser2.out index eacc2e9a3..de0c19c29 100644 --- a/contrib/dolphin/expected/db_b_parser2.out +++ b/contrib/dolphin/expected/db_b_parser2.out @@ -746,12 +746,12 @@ insert into t_binary values(b'1',b'111',b'111'); insert into t_binary values(b'1',b'11111',b'111'); insert into t_binary values(b'1',b'111111111',b'111'); select *,length(a),length(b),length(c) from t_binary order by 1,2,3; - a | b | c | length | length | length -------+------+------------------------+--------+--------+-------- - \x01 | \x01 | \x07\0\0\0\0\0\0\0\0\0 | 1 | 2 | 10 - \x01 | \x07 | \x07\0\0\0\0\0\0\0\0\0 | 1 | 1 | 10 - \x01 | \x1F | \x07\0\0\0\0\0\0\0\0\0 | 1 | 1 | 10 - | | | | | + a | b | c | length | length | length +------+--------+------------------------+--------+--------+-------- + \x01 | \x01ff | \x07000000000000000000 | 1 | 2 | 10 + \x01 | \x07 | \x07000000000000000000 | 1 | 1 | 10 + \x01 | \x1f | \x07000000000000000000 | 1 | 1 | 10 + | | | | | (4 rows) drop table t_binary; diff --git a/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out b/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out index d69423193..88656a7a8 100644 --- a/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out +++ b/contrib/dolphin/expected/keyword_ignore_test/ignore_not_null_constraints.out @@ -966,9 +966,9 @@ insert ignore into t_binaryn values (null); WARNING: null value in column "c" violates not-null constraint DETAIL: Failing row contains (null). select *,hex(c) from t_binaryn; - c | hex ---------------+-------------- - \0\0\0\0\0\0 | 000000000000 + c | hex +----------------+-------------- + \x000000000000 | 000000000000 (1 row) insert into t_binaryn values(b'01'); @@ -978,10 +978,10 @@ DETAIL: Failing row contains (null). WARNING: null value in column "c" violates not-null constraint DETAIL: Failing row contains (null). select *,hex(c) from t_binaryn; - c | hex ---------------+-------------- - \0\0\0\0\0\0 | 000000000000 - \0\0\0\0\0\0 | 000000000000 + c | hex +----------------+-------------- + \x000000000000 | 000000000000 + \x000000000000 | 000000000000 (2 rows) -- mixture diff --git a/contrib/dolphin/expected/kwlist.out b/contrib/dolphin/expected/kwlist.out index f87b545a5..7f9ec1402 100644 --- a/contrib/dolphin/expected/kwlist.out +++ b/contrib/dolphin/expected/kwlist.out @@ -270,9 +270,9 @@ LINE 1: select case when end then binary else (binary) end from end; ^ CONTEXT: referenced column: binary select case when end then binary end else binary end end from end; - end ------ - 1 + end +------ + \x31 (1 row) /* only */ diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out index 6e0827320..e3289233e 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out @@ -539,9 +539,9 @@ select `binary`, `json`, `binary` < `json` as `binary `json` as `binary<>json`, `binary` = `json` as `binary=json`, `binary` <=> `json` as `binary<=>json` from test_json_table; - binary | json | binary>json | binary>=json | binaryjson | binary=json | binary<=>json ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- - 1.23a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f + binary | json | binary>json | binary>=json | binaryjson | binary=json | binary<=>json +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- + \x312e3233610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f (1 row) select `binary`, `json`, @@ -549,9 +549,9 @@ select `binary`, `json`, `json` < `binary` as `json `binary` as `json<>binary`, `json` = `binary` as `json=binary`, `json` <=> `binary` as `json<=>binary` from test_json_table; - binary | json | json>binary | json>=binary | jsonbinary | json=binary | json<=>binary ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- - 1.23a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + binary | json | json>binary | json>=binary | jsonbinary | json=binary | json<=>binary +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------+-------------+--------------+-------------+--------------+--------------+--------------+-------------+--------------- + \x312e3233610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `varbinary`, `json`, @@ -559,9 +559,9 @@ select `varbinary`, `json`, `varbinary` < `json` as `varbinary `json` as `varbinary<>json`, `varbinary` = `json` as `varbinary=json`, `varbinary` <=> `json` as `varbinary<=>json` from test_json_table; - varbinary | json | varbinary>json | varbinary>=json | varbinaryjson | varbinary=json | varbinary<=>json ------------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ - 1.23a | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f + varbinary | json | varbinary>json | varbinary>=json | varbinaryjson | varbinary=json | varbinary<=>json +--------------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ + \x312e323361 | {"a": 1, "b": 2} | f | f | t | t | t | t | f | f (1 row) select `varbinary`, `json`, @@ -569,9 +569,9 @@ select `varbinary`, `json`, `json` < `varbinary` as `json `varbinary` as `json<>varbinary`, `json` = `varbinary` as `json=varbinary`, `json` <=> `varbinary` as `json<=>varbinary` from test_json_table; - varbinary | json | json>varbinary | json>=varbinary | jsonvarbinary | json=varbinary | json<=>varbinary ------------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ - 1.23a | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f + varbinary | json | json>varbinary | json>=varbinary | jsonvarbinary | json=varbinary | json<=>varbinary +--------------+------------------+----------------+-----------------+----------------+-----------------+-----------------+-----------------+----------------+------------------ + \x312e323361 | {"a": 1, "b": 2} | t | t | f | f | t | t | f | f (1 row) select `tinyblob`, `json`, diff --git a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out index 94b906521..4dbf3f114 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out @@ -12,9 +12,9 @@ select length(compress('string for test compress function aaaaaaaaaaaaaaaaaaaaaa create table t1 (a binary(255), b text(255), c char(255)); insert into t1 (a,b,c) values (compress('string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb'),'string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb', 'string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb'); SELECT a FROM t1; - a ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - y\0\0\0x+.)KWH/R(I-.QH-(J-.VH+K.SH:PH\x02\x02\0\x01-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 + a +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x79000000789c2b2e29cacc4b5748cb2f5228492d2e5148cecf2d284a2d2e56482bcd4b2ec9cccf5348a43a504802020001942dcb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 (1 row) SELECT HEX(COMPRESS(b)) FROM t1; diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index 3a35acfcd..71b415f52 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -24,10 +24,10 @@ insert into binary_templates values ('aaaaaa', 'aaa', 'aaaaaa'); ERROR: The input length:6 exceeds the maximum length:5. CONTEXT: referenced column: c select * from binary_templates; - a | b | c -----------+---------+------- - \x616161 | aaa\0\0 | aaa - \x616161 | aaa\0\0 | aaaaa + a | b | c +----------+--------------+-------------- + \x616161 | \x6161610000 | \x616161 + \x616161 | \x6161610000 | \x6161616161 (2 rows) create table test_bytea (a bytea); @@ -110,36 +110,36 @@ EXECUTE insert_varbinary(1, 'aaaaaaaaaaa'::bytea); -- length 11 ERROR: The input length:11 exceeds the maximum length:10. CONTEXT: referenced column: field_name select * from t_binary_061; - id | field_name -----+------------------- - 1 | aaa\0\0\0\0\0\0\0 + id | field_name +----+------------------------ + 1 | \x61616100000000000000 (1 row) select * from t_varbinary_061; id | field_name ----+------------ - 1 | aaa + 1 | \x616161 (1 row) drop table if exists t_binary_061; drop table if exists t_varbinary_061; -- binary test enhance select binary '\t'; - binary ----------- - + binary +-------- + \x09 (1 row) select binary '\\'; binary -------- - \ + \x5c (1 row) select binary '\a'; binary -------- - a + \x61 (1 row) select binary '\b'; @@ -151,86 +151,85 @@ select binary '\b'; select binary '\n'; binary -------- - + - + \x0a (1 row) select binary '\r'; binary -------- - \r + \x0d (1 row) select binary '\v'; binary -------- - v + \x76 (1 row) select binary '\f'; binary -------- - f + \x66 (1 row) select binary '\"'; binary -------- - " + \x22 (1 row) select binary '\%'; binary -------- - \% + \x5c25 (1 row) select binary '\_'; binary -------- - \_ + \x5c5f (1 row) select binary '\0'; binary -------- - \0 + \x00 (1 row) select binary '\z'; binary -------- - z + \x7a (1 row) select binary '\pqy'; - binary --------- - pqy + binary +---------- + \x707179 (1 row) select binary 'æ•°æ®åº“'; - binary --------- - æ•°æ®åº“ + binary +---------------------- + \xe695b0e68daee5ba93 (1 row) select binary E'\t'; - binary ----------- - + binary +-------- + \x09 (1 row) select binary E'\\'; binary -------- - \ + \x5c (1 row) select binary E'\a'; binary -------- - a + \x61 (1 row) select binary E'\b'; @@ -242,44 +241,43 @@ select binary E'\b'; select binary E'\n'; binary -------- - + - + \x0a (1 row) select binary E'\r'; binary -------- - \r + \x0d (1 row) select binary E'\v'; binary -------- - v + \x76 (1 row) select binary E'\f'; binary -------- - \x0C + \x0c (1 row) select binary E'\"'; binary -------- - " + \x22 (1 row) select binary E'\%'; binary -------- - % + \x25 (1 row) select binary E'\_'; binary -------- - _ + \x5f (1 row) select binary E'\0'; @@ -287,69 +285,69 @@ ERROR: invalid byte sequence for encoding "UTF8": 0x00 select binary E'\z'; binary -------- - z + \x7a (1 row) select binary E'\pqy'; - binary --------- - pqy + binary +---------- + \x707179 (1 row) select binary E'æ•°æ®åº“'; - binary --------- - æ•°æ®åº“ + binary +---------------------- + \xe695b0e68daee5ba93 (1 row) -- binary type cast test select 'abc'::binary; - binary --------- - abc + binary +---------- + \x616263 (1 row) select 'abcdefgh'::binary; - binary ----------- - abcdefgh + binary +-------------------- + \x6162636465666768 (1 row) select 'abc'::binary(20); - binary ---------------------------------------- - abc\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 + binary +-------------------------------------------- + \x6162630000000000000000000000000000000000 (1 row) select 'a啊填啊'::binary; - binary ---------- - a啊填啊 + binary +------------------------ + \x61e5958ae5a1abe5958a (1 row) -- other type select 123::binary; - binary --------- - 123 + binary +---------- + \x313233 (1 row) select 123.456::binary; - binary ---------- - 123.456 + binary +------------------ + \x3132332e343536 (1 row) select '2020-01-01'::date::binary; - binary ------------- - 2020-01-01 + binary +------------------------ + \x323032302d30312d3031 (1 row) select '12:13:13'::time::binary; - binary ----------- - 12:13:13 + binary +-------------------- + \x31323a31333a3133 (1 row) --errreport @@ -424,9 +422,9 @@ select 'abc%' like binary 'abc|_' escape '|'; -- test binary expr gram; select binary sin(1); - sin -------------------- - 0.841470984807897 + sin +-------------------------------------- + \x302e383431343730393834383037383937 (1 row) drop table if exists t1; @@ -434,15 +432,15 @@ NOTICE: table "t1" does not exist, skipping create table t1(a int, b text); insert into t1 values(1,'test'); select binary a from t1; - a ---- - 1 + a +------ + \x31 (1 row) select binary b from t1; - b ------- - test + b +------------ + \x74657374 (1 row) select binary a = binary '3' from t1; @@ -459,22 +457,22 @@ insert into t1 values('ad','name2'); ERROR: The input length:2 exceeds the maximum length:1. CONTEXT: referenced column: a select * from t1; - a | b ----+------- - a | name1 + a | b +------+------- + \x61 | name1 (1 row) -- enhase origin request SELECT BINARY 'Geeksforgeeks'; - binary ---------------- - Geeksforgeeks + binary +------------------------------ + \x4765656b73666f726765656b73 (1 row) select binary repeat('a', 3); - repeat --------- - aaa + repeat +---------- + \x616161 (1 row) create table test33 (c binary(3)); @@ -527,9 +525,9 @@ select cast('' as binary(0)); (1 row) select cast('ab' as binary(12)); - binary ------------------------- - ab\0\0\0\0\0\0\0\0\0\0 + binary +---------------------------- + \x616200000000000000000000 (1 row) create table test34 (a binary(0)); diff --git a/contrib/dolphin/expected/test_blob.out b/contrib/dolphin/expected/test_blob.out index a7067971b..0344c0b04 100644 --- a/contrib/dolphin/expected/test_blob.out +++ b/contrib/dolphin/expected/test_blob.out @@ -1238,16 +1238,16 @@ c5 binary(5), c23 varbinary(50))default charset=utf8; insert t_dezebium_0007_02 values('1010101','1010101','1010101','1010101','ab','ab'); select * from t_dezebium_0007_02; - c1 | c2 | c3 | c4 | c5 | c23 ----------+---------+---------+---------+----------+----- - 1010101 | 1010101 | 1010101 | 1010101 | ab\0\0\0 | ab + c1 | c2 | c3 | c4 | c5 | c23 +---------+---------+---------+---------+----------------+----- + 1010101 | 1010101 | 1010101 | 1010101 | ab\000\000\000 | ab (1 row) set bytea_output=escape; select * from t_dezebium_0007_02; - c1 | c2 | c3 | c4 | c5 | c23 ----------+---------+---------+---------+----------+----- - 1010101 | 1010101 | 1010101 | 1010101 | ab\0\0\0 | ab + c1 | c2 | c3 | c4 | c5 | c23 +---------+---------+---------+---------+----------------+----- + 1010101 | 1010101 | 1010101 | 1010101 | ab\000\000\000 | ab (1 row) update t_dezebium_0007_02 set c2='101',c5='FG'; diff --git a/contrib/dolphin/expected/test_condition.out b/contrib/dolphin/expected/test_condition.out index 784f2bcae..19c092272 100644 --- a/contrib/dolphin/expected/test_condition.out +++ b/contrib/dolphin/expected/test_condition.out @@ -6313,73 +6313,73 @@ select ifnull(blb, txt) from typeset; select ifnull(bin, smint) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, anint) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, bgint) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, dcmal) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, nmric) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, flt) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, bt) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, dt) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, tmstp) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, tm) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, ch) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, vch) from typeset; ifnull -------- - 1 + \x31 (1 row) select ifnull(bin, blb) from typeset; @@ -6395,75 +6395,75 @@ select ifnull(bin, txt) from typeset; (1 row) select ifnull(vbin, smint) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, anint) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, bgint) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, dcmal) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, nmric) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, flt) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, bt) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, dt) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, tmstp) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, tm) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, ch) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, vch) from typeset; - ifnull ---------------------- - 2001-04-19 22:23:44 + ifnull +------------------------------------------ + \x323030312d30342d31392032323a32333a3434 (1 row) select ifnull(vbin, blb) from typeset; diff --git a/contrib/dolphin/include/plugin_protocol/bytestream.h b/contrib/dolphin/include/plugin_protocol/bytestream.h index 24ea88a6b..a001986d3 100644 --- a/contrib/dolphin/include/plugin_protocol/bytestream.h +++ b/contrib/dolphin/include/plugin_protocol/bytestream.h @@ -138,9 +138,12 @@ static inline void dq_append_string_len(StringInfo buf, const char *data, uint32 appendBinaryStringInfo(buf, data, len); } -static inline void dq_append_string_lenenc(StringInfo buf, const char *data) +static inline void dq_append_string_lenenc(StringInfo buf, const char *data, int dataLen = -1) { size_t len = data ? strlen(data) : 0; + if (dataLen != -1) { + len = dataLen; + } if (!data) { dq_append_int1(buf, NULL_STRING_MARK); } else if (len == 0) { diff --git a/contrib/dolphin/output/binary_export_test.source b/contrib/dolphin/output/binary_export_test.source index 804d13a8d..bc105c384 100644 --- a/contrib/dolphin/output/binary_export_test.source +++ b/contrib/dolphin/output/binary_export_test.source @@ -8,10 +8,9 @@ insert into t1 values (0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE, 0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE); select * from t1; - a | b ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - \0\x01\x02\x03\x04\x05\x06\x07\x08 +| \0\x01\x02\x03\x04\x05\x06\x07\x08 + - \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F | \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F + a | b +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe | \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe (1 row) -- test about copy @@ -19,10 +18,9 @@ copy t1 to '@abs_builddir@/data/binary.sql'; create table t2 (a binary(255), b varbinary(255)); copy t2 from '@abs_builddir@/data/binary.sql'; select * from t2; - a | b ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - \0\x01\x02\x03\x04\x05\x06\x07\x08 +| \0\x01\x02\x03\x04\x05\x06\x07\x08 + - \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F | \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F + a | b +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe | \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe (1 row) -- test about dump @@ -32,10 +30,9 @@ select * from t2; 0 \c b_target select * from t1; - a | b ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - \0\x01\x02\x03\x04\x05\x06\x07\x08 +| \0\x01\x02\x03\x04\x05\x06\x07\x08 + - \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F | \x0B\x0C\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7F + a | b +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe | \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe (1 row) \c contrib_regression diff --git a/contrib/dolphin/plugin_protocol/printtup.cpp b/contrib/dolphin/plugin_protocol/printtup.cpp index 430961f55..ab540fe5a 100644 --- a/contrib/dolphin/plugin_protocol/printtup.cpp +++ b/contrib/dolphin/plugin_protocol/printtup.cpp @@ -223,10 +223,19 @@ static void send_textproto(TupleTableSlot *slot, DR_printtup *myState, int natts */ attr = thisState->typisvarlena ? PointerGetDatum(PG_DETOAST_DATUM(origattr)) : origattr; - outputstr = OutputFunctionCall(&thisState->finfo, attr); - dq_append_string_lenenc(buf, outputstr); + Oid typeOid = slot->tts_tupleDescriptor->attrs[i].atttypid; + if (typeOid == BINARYOID || typeOid == VARBINARYOID || + typeOid == TINYBLOBOID || typeOid == MEDIUMBLOBOID || + typeOid == LONGBLOBOID || typeOid == BLOBOID || + typeOid == RAWOID || typeOid == BYTEAOID) { + bytea* barg = DatumGetByteaPP(attr); + dq_append_string_lenenc(buf, VARDATA_ANY(barg), VARSIZE_ANY_EXHDR(barg)); + } else { + outputstr = OutputFunctionCall(&thisState->finfo, attr); + dq_append_string_lenenc(buf, outputstr); - pfree(outputstr); + pfree(outputstr); + } /* Clean up detoasted copy, if any */ if (DatumGetPointer(attr) != DatumGetPointer(origattr)) { diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index c4c8b4a3e..8479957be 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -918,7 +918,7 @@ Datum rawin(PG_FUNCTION_ARGS) return result; } else { - return byteain(fcinfo); + return dolphin_binaryin(fcinfo); } } @@ -11005,12 +11005,6 @@ static Datum hex_dolphin_binaryout(PG_FUNCTION_ARGS) Datum dolphin_binaryout(PG_FUNCTION_ARGS) { - if (strcmp(u_sess->attr.attr_common.application_name, "gs_dump") == 0 || - strcmp(u_sess->attr.attr_common.application_name, "gs_dumpall") == 0 || - GetSessionContext()->isDoCopy) { - return hex_dolphin_binaryout(fcinfo); - } else { - return normal_dolphin_binaryout(fcinfo); - } + return byteaout(fcinfo); } #endif -- Gitee From 4a86569fb46ab6e158468ccf44fc6b597da74ff5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cross-=E7=BD=97?= <1165977584@qq.com> Date: Fri, 29 Dec 2023 01:32:18 +0000 Subject: [PATCH 156/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5=E5=86=85=E6=A0=B8?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=E4=BF=AE=E5=A4=8Dwhale.so=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E5=8A=A0=E8=BD=BD=E9=94=99=E8=AF=AF=20=E5=90=8C=E6=AD=A5?= =?UTF-8?q?=E5=86=85=E6=A0=B8=E4=BB=A3=E7=A0=81=E4=BF=AE=E5=A4=8Dwhale.so?= =?UTF-8?q?=E6=96=87=E4=BB=B6=E5=8A=A0=E8=BD=BD=E9=94=99=E8=AF=AF=20?= =?UTF-8?q?=E5=8F=82=E8=80=83=E8=87=AAhttps://gitee.com/opengauss/Plugin/p?= =?UTF-8?q?ulls/1250?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cross-ç½— <1165977584@qq.com> --- contrib/whale/include/plugin_utils/plpgsql.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/whale/include/plugin_utils/plpgsql.h b/contrib/whale/include/plugin_utils/plpgsql.h index 063779470..c9d9bcc4a 100644 --- a/contrib/whale/include/plugin_utils/plpgsql.h +++ b/contrib/whale/include/plugin_utils/plpgsql.h @@ -1860,7 +1860,8 @@ extern THR_LOCAL PLpgSQL_execstate* plpgsql_estate; */ #define BULK_COLLECT_MAX ((Size)0x3FFFFFF) /* maximum number of rows can be bulk collected (by 3FFFFFFF/16) */ -extern Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, bool dynexec_anonymous_block); +extern Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, + bool dynexec_anonymous_block, int* coverage = NULL); extern Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, char* source_text); extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata); extern void plpgsql_xact_cb(XactEvent event, void* arg); -- Gitee From 8ee8ad5332a1af322e85385da8d98baf37a592b7 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Tue, 2 Jan 2024 15:02:16 +0800 Subject: [PATCH 157/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=86=85=E5=AD=98?= =?UTF-8?q?=E6=B3=84=E6=BC=8F=E9=97=AE=E9=A2=98-ReplaceBCmptFuncName?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_parser/parse_expr.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index 3314b2ec4..23b5677d9 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -2254,7 +2254,6 @@ void ReplaceBCmptFuncName(List* names, char* objname, char* defaultname, char* r } else { /* should not happen */ } - pfree(objname); } } #endif -- Gitee From 4f7fa81a25434f7f41a255e11f6bfadb1e356ebc Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Tue, 2 Jan 2024 19:48:02 +0800 Subject: [PATCH 158/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dnumeric=E8=B4=9F?= =?UTF-8?q?=E5=80=BC=E6=BA=A2=E5=87=BA=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/convert_truncated_warning.out | 18 ++++++++++++++++++ contrib/dolphin/plugin_utils/adt/numeric.cpp | 6 +++++- .../dolphin/sql/convert_truncated_warning.sql | 2 ++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/convert_truncated_warning.out b/contrib/dolphin/expected/convert_truncated_warning.out index aa557e1c1..e86f6fcc0 100644 --- a/contrib/dolphin/expected/convert_truncated_warning.out +++ b/contrib/dolphin/expected/convert_truncated_warning.out @@ -136,6 +136,24 @@ CONTEXT: referenced column: numeric 99999999.99 (1 row) +SELECT CONVERT('-1111111111111111111111111111111111111111111', decimal); +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 0 must round to an absolute value less than 10^10. +CONTEXT: referenced column: numeric + numeric +------------- + -9999999999 +(1 row) + +SELECT CONVERT('-1111111111111111111111111111111111111111111', decimal(10, 2)); +WARNING: numeric field overflow +DETAIL: A field with precision 10, scale 2 must round to an absolute value less than 10^8. +CONTEXT: referenced column: numeric + numeric +-------------- + -99999999.99 +(1 row) + SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal); WARNING: bigint unsigned out of range CONTEXT: referenced column: bittouint8 diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index a715ac577..cf8342f04 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -5657,8 +5657,12 @@ static void apply_typmod(NumericVar* var, int32 typmod) maxdigits ? maxdigits : 1))); #ifdef DOLPHIN errno_t rc; - size_t maxlen = precision + 2; + size_t maxlen = precision + 3; char str[maxlen] = {}; + if (var->sign == NUMERIC_NEG) { + rc = strcat_s(str, maxlen, "-"); + securec_check(rc, "\0", "\0"); + } while (maxdigits-- > 0) { rc = strcat_s(str, maxlen, "9"); securec_check(rc, "\0", "\0"); diff --git a/contrib/dolphin/sql/convert_truncated_warning.sql b/contrib/dolphin/sql/convert_truncated_warning.sql index 3cd743e68..f792bf4a3 100644 --- a/contrib/dolphin/sql/convert_truncated_warning.sql +++ b/contrib/dolphin/sql/convert_truncated_warning.sql @@ -17,6 +17,8 @@ SELECT CONVERT(1111111111111111111111111111111111111111111, unsigned); SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', unsigned); SELECT CONVERT('1111111111111111111111111111111111111111111', decimal); SELECT CONVERT('1111111111111111111111111111111111111111111', decimal(10, 2)); +SELECT CONVERT('-1111111111111111111111111111111111111111111', decimal); +SELECT CONVERT('-1111111111111111111111111111111111111111111', decimal(10, 2)); SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal); SELECT CONVERT(b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111', decimal(10, 2)); SELECT CONVERT('.1.', decimal); -- Gitee From f9a706fa30d4a45fc4fdf5ed1efd8149102635b2 Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Wed, 3 Jan 2024 10:12:29 +0800 Subject: [PATCH 159/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Denum=E7=9A=84?= =?UTF-8?q?=E8=BE=93=E5=85=A5=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/test_mysql_enum.out | 241 +++++++++++++++++++ contrib/dolphin/plugin_utils/adt/enum.cpp | 92 +++++++ contrib/dolphin/sql/test_mysql_enum.sql | 67 ++++++ 3 files changed, 400 insertions(+) diff --git a/contrib/dolphin/expected/test_mysql_enum.out b/contrib/dolphin/expected/test_mysql_enum.out index 55a04b293..ebad8a8c3 100644 --- a/contrib/dolphin/expected/test_mysql_enum.out +++ b/contrib/dolphin/expected/test_mysql_enum.out @@ -539,6 +539,247 @@ create table t_drop_view(c1 int, c2 enum('a')); create view my_view as select * from t_drop_view; drop view my_view; drop table t_drop_view; +set dolphin.sql_mode to 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +drop table if exists test; +NOTICE: table "test" does not exist, skipping +create table test(color enum('red', 'green', 'blue', 'purple', 'yellow')); +insert into test values('red'); +insert into test values('green'); +insert into test values('orange'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('orange'); + ^ +CONTEXT: referenced column: color +insert into test values(0); +--?ERROR: invalid input value for enum.* +CONTEXT: referenced column: color +insert into test values('0'); +insert into test values(1); +insert into test values('1'); +insert into test values(1.2); +insert into test values('1.2'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('1.2'); + ^ +CONTEXT: referenced column: color +insert into test values(1.4); +insert into test values('1.4'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('1.4'); + ^ +CONTEXT: referenced column: color +insert into test values(1.5); +insert into test values('1.5'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('1.5'); + ^ +CONTEXT: referenced column: color +insert into test values(1.6); +insert into test values('1.6'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('1.6'); + ^ +CONTEXT: referenced column: color +insert into test values('a1'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('a1'); + ^ +CONTEXT: referenced column: color +insert into test values('1a'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('1a'); + ^ +CONTEXT: referenced column: color +insert into test values('a123'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('a123'); + ^ +CONTEXT: referenced column: color +insert into test values('123a'); +--?ERROR: invalid input value for enum.* +LINE 1: insert into test values('123a'); + ^ +CONTEXT: referenced column: color +select * from test; + color +------- + red + green + + red + red + red + red + red + red +(9 rows) + +delete from test; +insert ignore into test values('red'); +insert ignore into test values('green'); +insert ignore into test values('orange'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('orange'); + ^ +CONTEXT: referenced column: color +insert ignore into test values(0); +WARNING: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: color +insert ignore into test values('0'); +insert ignore into test values(1); +insert ignore into test values('1'); +insert ignore into test values(1.2); +insert ignore into test values('1.2'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('1.2'); + ^ +CONTEXT: referenced column: color +insert ignore into test values(1.4); +insert ignore into test values('1.4'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('1.4'); + ^ +CONTEXT: referenced column: color +insert ignore into test values(1.5); +insert ignore into test values('1.5'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('1.5'); + ^ +CONTEXT: referenced column: color +insert ignore into test values(1.6); +insert ignore into test values('1.6'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('1.6'); + ^ +CONTEXT: referenced column: color +insert ignore into test values('a1'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('a1'); + ^ +CONTEXT: referenced column: color +insert ignore into test values('1a'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('1a'); + ^ +CONTEXT: referenced column: color +insert ignore into test values('a123'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('a123'); + ^ +CONTEXT: referenced column: color +insert ignore into test values('123a'); +--?WARNING: invalid input value for enum .* +LINE 1: insert ignore into test values('123a'); + ^ +CONTEXT: referenced column: color +select * from test; + color +------- + red + green + + + + red + red + red + + red + + red + + red + + + + + +(19 rows) + +delete from test; +set dolphin.sql_mode to 'sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +insert into test values('red'); +insert into test values('green'); +insert into test values('orange'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('orange'); + ^ +CONTEXT: referenced column: color +insert into test values(0); +WARNING: Invalid input value for enum. In strict sql_mode, do not allow the value 0. +CONTEXT: referenced column: color +insert into test values('0'); +insert into test values(1); +insert into test values('1'); +insert into test values(1.2); +insert into test values('1.2'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('1.2'); + ^ +CONTEXT: referenced column: color +insert into test values(1.4); +insert into test values('1.4'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('1.4'); + ^ +CONTEXT: referenced column: color +insert into test values(1.5); +insert into test values('1.5'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('1.5'); + ^ +CONTEXT: referenced column: color +insert into test values(1.6); +insert into test values('1.6'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('1.6'); + ^ +CONTEXT: referenced column: color +insert into test values('a1'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('a1'); + ^ +CONTEXT: referenced column: color +insert into test values('1a'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('1a'); + ^ +CONTEXT: referenced column: color +insert into test values('a123'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('a123'); + ^ +CONTEXT: referenced column: color +insert into test values('123a'); +--?WARNING: invalid input value for enum .* +LINE 1: insert into test values('123a'); + ^ +CONTEXT: referenced column: color +select * from test; + color +------- + red + green + + + + red + red + red + + red + + red + + red + + + + + +(19 rows) + +drop table test cascade; drop schema db_b_new_gram_test3 cascade; reset current_schema; -- diff --git a/contrib/dolphin/plugin_utils/adt/enum.cpp b/contrib/dolphin/plugin_utils/adt/enum.cpp index 89dfca404..9077a6183 100644 --- a/contrib/dolphin/plugin_utils/adt/enum.cpp +++ b/contrib/dolphin/plugin_utils/adt/enum.cpp @@ -43,6 +43,8 @@ static ArrayType* enum_range_internal(Oid enumtypoid, Oid lower, Oid upper); #ifdef DOLPHIN static Oid get_enumid_with_collation(Oid enumtypoid, Oid collation, char* enum_name); static int compare_values_of_enum_with_collation(Oid arg1, Oid arg2, Oid collation); +#define DEC_BASE 10 +static uint64 pg_strntoul(const char *nptr, size_t l, char **endptr, int *err); #endif /* Basic I/O support */ @@ -73,6 +75,24 @@ Datum enum_in(PG_FUNCTION_ARGS) if (!HeapTupleIsValid(tup)) { /* In non-strict mode, allow enum values to be empty strings */ #ifdef DOLPHIN + char* new_name = NULL; + char* end; + int err = 0; + int length = strlen(name); + uint64 order = pg_strntoul(name, length, &end, &err); + bool parse_success = (!err && end == name+length); + order = parse_success ? order : 0; + if (parse_success && order == 0) { + return (Datum)0; + } + if (parse_success || !SQL_MODE_STRICT()) { + if ((new_name = getEnumLableByOrder(enumtypoid, order)) != NULL) { + tup = SearchSysCache2(ENUMTYPOIDNAME, ObjectIdGetDatum(enumtypoid), CStringGetDatum(new_name)); + } + } + } + + if (!HeapTupleIsValid(tup)) { int elevel = (fcinfo->can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR; ereport(elevel, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); @@ -1039,5 +1059,77 @@ Datum textenumle(PG_FUNCTION_ARGS) return DirectFunctionCall2Coll(text_le, PG_GET_COLLATION(), PG_GETARG_DATUM(0), PointerGetDatum(enumDatum)); } +static uint64 pg_strntoul(const char* nptr, size_t l, char** endptr, int* err) +{ + int negative; + uint32 cutoff; + uint cutlim; + uint32 i; + const char* s; + unsigned char c; + const char* save; + const char* e; + int overflow; + + *err = 0; /* Initialize error indicator */ + + s = nptr; + e = nptr + l; + + /* skip all space */ + for (; s < e && isspace(*s); s++) + ; + + if (s == e) { + goto noconv; + } + if (*s == '-') { + negative = 1; + ++s; + } else if (*s == '+') { + negative = 0; + ++s; + } else + negative = 0; + + save = s; + cutoff = ((uint32)~0L) / (uint32)DEC_BASE; + cutlim = (uint32)(((uint32)~0L) % (uint32)DEC_BASE); + overflow = 0; + i = 0; + + for (c = *s; s != e; c = *++s) { + if (c >= '0' && c <= '9') { + c -= '0'; + } else { + break; + } + if (i > cutoff || (i == cutoff && c > cutlim)) + overflow = 1; + else { + i *= DEC_BASE; + i += c; + } + } + + if (s == save) + goto noconv; + + if (endptr != NULL) + *endptr = (char *)s; + + if (overflow) { + err[0] = ERANGE; + return (~(uint32)0); + } + + return (negative ? -((int32)i) : (int32)i); + +noconv: + err[0] = EDOM; + if (endptr != NULL) + *endptr = (char *)nptr; + return 0L; +} #endif diff --git a/contrib/dolphin/sql/test_mysql_enum.sql b/contrib/dolphin/sql/test_mysql_enum.sql index 2a1e1656e..32dba0104 100644 --- a/contrib/dolphin/sql/test_mysql_enum.sql +++ b/contrib/dolphin/sql/test_mysql_enum.sql @@ -260,6 +260,73 @@ create table t_drop_view(c1 int, c2 enum('a')); create view my_view as select * from t_drop_view; drop view my_view; drop table t_drop_view; +set dolphin.sql_mode to 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +drop table if exists test; +create table test(color enum('red', 'green', 'blue', 'purple', 'yellow')); +insert into test values('red'); +insert into test values('green'); +insert into test values('orange'); +insert into test values(0); +insert into test values('0'); +insert into test values(1); +insert into test values('1'); +insert into test values(1.2); +insert into test values('1.2'); +insert into test values(1.4); +insert into test values('1.4'); +insert into test values(1.5); +insert into test values('1.5'); +insert into test values(1.6); +insert into test values('1.6'); +insert into test values('a1'); +insert into test values('1a'); +insert into test values('a123'); +insert into test values('123a'); +select * from test; +delete from test; +insert ignore into test values('red'); +insert ignore into test values('green'); +insert ignore into test values('orange'); +insert ignore into test values(0); +insert ignore into test values('0'); +insert ignore into test values(1); +insert ignore into test values('1'); +insert ignore into test values(1.2); +insert ignore into test values('1.2'); +insert ignore into test values(1.4); +insert ignore into test values('1.4'); +insert ignore into test values(1.5); +insert ignore into test values('1.5'); +insert ignore into test values(1.6); +insert ignore into test values('1.6'); +insert ignore into test values('a1'); +insert ignore into test values('1a'); +insert ignore into test values('a123'); +insert ignore into test values('123a'); +select * from test; +delete from test; +set dolphin.sql_mode to 'sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +insert into test values('red'); +insert into test values('green'); +insert into test values('orange'); +insert into test values(0); +insert into test values('0'); +insert into test values(1); +insert into test values('1'); +insert into test values(1.2); +insert into test values('1.2'); +insert into test values(1.4); +insert into test values('1.4'); +insert into test values(1.5); +insert into test values('1.5'); +insert into test values(1.6); +insert into test values('1.6'); +insert into test values('a1'); +insert into test values('1a'); +insert into test values('a123'); +insert into test values('123a'); +select * from test; +drop table test cascade; drop schema db_b_new_gram_test3 cascade; reset current_schema; -- Gitee From 9b966804a9cc4e9faaa34549872c152ebde7ce7a Mon Sep 17 00:00:00 2001 From: yuchao Date: Tue, 2 Jan 2024 16:25:48 +0800 Subject: [PATCH 160/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=85=BC=E5=AE=B9B?= =?UTF-8?q?=E5=BA=93=E5=88=9B=E5=BB=BA=E7=BB=84=E5=90=88=E5=94=AF=E4=B8=80?= =?UTF-8?q?=E7=B4=A2=E5=BC=95=E6=97=B6=E6=8C=87=E5=AE=9Atext=E5=88=97?= =?UTF-8?q?=E9=94=AE=E5=80=BC=E9=95=BF=E5=BA=A6=E6=8A=A5=E9=94=99=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/add_unique_index.out | 14 ++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_parser/gram.y | 13 +++++++++++++ contrib/dolphin/sql/add_unique_index.sql | 13 +++++++++++++ 4 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 contrib/dolphin/expected/add_unique_index.out create mode 100644 contrib/dolphin/sql/add_unique_index.sql diff --git a/contrib/dolphin/expected/add_unique_index.out b/contrib/dolphin/expected/add_unique_index.out new file mode 100644 index 000000000..6ced456a2 --- /dev/null +++ b/contrib/dolphin/expected/add_unique_index.out @@ -0,0 +1,14 @@ +create schema add_unique_index; +set current_schema = add_unique_index; +create table t_tinyint0018 ( + c1 tinyint, + c2 tinyint(1) default null, + c3 tinyint(10) not null default '0', + c4 tinyint default '0', + c5 text +); +alter table t_tinyint0018 add unique index i_tinyint0018(c1, c2, c5(10)); +NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "i_tinyint0018" for table "t_tinyint0018" +drop table t_tinyint0018 cascade; +reset current_schema; +drop schema add_unique_index; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index 97f75a84e..89ef39748 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -3,7 +3,7 @@ test: keyword_ignore_test/ignore_no_matched_partition keyword_ignore_test/ignore test: string_func_test/db_b_ascii_test string_func_test/db_b_left_right_test string_func_test/db_b_quote_test string_func_test/db_b_string_length_test string_func_test/db_b_substr_test string_func_test/db_b_trim_test string_func_test/db_b_compress_test string_func_test/db_b_weightstring_test string_func_test/db_b_insert_test string_func_test/db_b_nameconst_test -test: ast b_compatibility_time_type db_b_new_gram_test group_concat_test test_condition vec_engine test_uncommon_table_option +test: ast b_compatibility_time_type db_b_new_gram_test group_concat_test test_condition vec_engine test_uncommon_table_option add_unique_index test: db_b_parser1 db_b_parser2 db_b_parser3 db_b_parser4 second_microsecond test_set_password_for_user test_timestamp_overflow diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index b8843501b..1af7cd55b 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -19229,6 +19229,19 @@ constraint_elem: ColId con_asc_desc $$->ordering = SORTBY_DEFAULT; $$->nulls_ordering = SORTBY_NULLS_DEFAULT; } + | ColId '(' Iconst ')' opt_asc_desc + { + PrefixKey* pkey = makeNode(PrefixKey); + pkey->arg = (Expr*)makeColumnRef(pstrdup($1), NIL, @1, yyscanner); + pkey->length = $3; + $$ = makeNode(IndexElem); + $$->name = NULL; + $$->expr = (Node*)pkey; + $$->indexcolname = NULL; + $$->collation = NULL; + $$->opclass = NULL; + $$->ordering = (SortByDir)$5; + } | '(' a_expr ')' opt_asc_desc { #ifdef ENABLE_MULTIPLE_NODES diff --git a/contrib/dolphin/sql/add_unique_index.sql b/contrib/dolphin/sql/add_unique_index.sql new file mode 100644 index 000000000..08b67db57 --- /dev/null +++ b/contrib/dolphin/sql/add_unique_index.sql @@ -0,0 +1,13 @@ +create schema add_unique_index; +set current_schema = add_unique_index; +create table t_tinyint0018 ( + c1 tinyint, + c2 tinyint(1) default null, + c3 tinyint(10) not null default '0', + c4 tinyint default '0', + c5 text +); +alter table t_tinyint0018 add unique index i_tinyint0018(c1, c2, c5(10)); +drop table t_tinyint0018 cascade; +reset current_schema; +drop schema add_unique_index; -- Gitee From bb3e5d75bda96a89706554e4cfab771eae924c56 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 3 Jan 2024 16:50:44 +0800 Subject: [PATCH 161/434] Fix user var bug. --- contrib/dolphin/expected/db_b_parser3.out | 50 +++++++------- .../dolphin/expected/multi_select_in_proc.out | 12 ++-- .../expected/test_guc_select_and_set.out | 68 +++++++++++++++++++ contrib/dolphin/plugin_parser/scan.l | 11 +++ contrib/dolphin/sql/db_b_parser3.sql | 50 +++++++------- contrib/dolphin/sql/multi_select_in_proc.sql | 12 ++-- .../dolphin/sql/test_guc_select_and_set.sql | 14 ++++ 7 files changed, 155 insertions(+), 62 deletions(-) diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index 3478154b2..2ddae8819 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -202,31 +202,31 @@ WARNING: Truncated incorrect INTEGER value: '{"a": 1, "b": 2}' (1 row) select -random_bytes(`int1`)::binary, -random_bytes(`uint1`)::binary, -random_bytes(`int2`)::binary, -random_bytes(`uint2`)::binary, -random_bytes(`int4`)::binary, -random_bytes(`uint4`)::binary, -random_bytes(`int8`)::binary, -random_bytes(`uint8`)::binary, -random_bytes(`float4`)::binary, -random_bytes(`float8`)::binary, -random_bytes(`numeric`)::binary, -random_bytes(`bit1`)::binary, -random_bytes(`bit64`)::binary, -random_bytes(`boolean`)::binary, -random_bytes(`char`)::binary, -random_bytes(`varchar`)::binary, -random_bytes(`binary`)::binary, -random_bytes(`varbinary`)::binary, -random_bytes(`tinyblob`)::binary, -random_bytes(`blob`)::binary, -random_bytes(`mediumblob`)::binary, -random_bytes(`longblob`)::binary, -random_bytes(`text`)::binary, -random_bytes(`enum_t`)::binary, -random_bytes(`set_t`)::binary +random_bytes(`int1`), +random_bytes(`uint1`), +random_bytes(`int2`), +random_bytes(`uint2`), +random_bytes(`int4`), +random_bytes(`uint4`), +random_bytes(`int8`), +random_bytes(`uint8`), +random_bytes(`float4`), +random_bytes(`float8`), +random_bytes(`numeric`), +random_bytes(`bit1`), +random_bytes(`bit64`), +random_bytes(`boolean`), +random_bytes(`char`), +random_bytes(`varchar`), +random_bytes(`binary`), +random_bytes(`varbinary`), +random_bytes(`tinyblob`), +random_bytes(`blob`), +random_bytes(`mediumblob`), +random_bytes(`longblob`), +random_bytes(`text`), +random_bytes(`enum_t`), +random_bytes(`set_t`) from test_type_table; WARNING: invalid input syntax for type integer: "1.23a " CONTEXT: referenced column: random_bytes diff --git a/contrib/dolphin/expected/multi_select_in_proc.out b/contrib/dolphin/expected/multi_select_in_proc.out index 101b543ba..31fce43fe 100644 --- a/contrib/dolphin/expected/multi_select_in_proc.out +++ b/contrib/dolphin/expected/multi_select_in_proc.out @@ -383,18 +383,18 @@ begin set @x = 1; truncate t_tinyint0009; repeat - set @c1 = @x; - set @c2 = floor(0.1*(127-18+1))+18; - set @c3 = floor(0.1*(127-100+1))+100; - set @c4 = floor(0.1*(10000-127+1))+127; - set @c5 = concat('amy', @x); + set @c1=@x; + set @c2=floor(0.1*(127-18+1))+18; + set @c3=floor(0.1*(127-100+1))+100; + set @c4=floor(0.1*(10000-127+1))+127; + set @c5=concat('amy', @x); select @c1; select @c2; select @c3; select @c4; select @c5; insert into t_tinyint0009 values (@c1, @c2, @c3, @c4, @c5); - set @x = @x + 1; + set @x=@x+1; select @x; until @x > num end repeat; end; diff --git a/contrib/dolphin/expected/test_guc_select_and_set.out b/contrib/dolphin/expected/test_guc_select_and_set.out index f7f93df75..9d336d4c5 100644 --- a/contrib/dolphin/expected/test_guc_select_and_set.out +++ b/contrib/dolphin/expected/test_guc_select_and_set.out @@ -238,4 +238,72 @@ show lower_case_table_names; 0 (1 row) +set @ta=@@license; +set @tb=@ta; +select @ta; + @ta +-------------- + MulanPSL-2.0 +(1 row) + +select @ta=@@license; + ?column? +---------- + t +(1 row) + +select @ta!=@@license; + ?column? +---------- + f +(1 row) + +select @ta<>@@license; + ?column? +---------- + f +(1 row) + +select @ta>@@license; + ?column? +---------- + f +(1 row) + +select @ta<@@license; + ?column? +---------- + f +(1 row) + +select @ta=@tb; + ?column? +---------- + t +(1 row) + +select @ta!=@tb; + ?column? +---------- + f +(1 row) + +select @ta<>@tb; + ?column? +---------- + f +(1 row) + +select @ta>@tb; + ?column? +---------- + f +(1 row) + +select @ta<@tb; + ?column? +---------- + f +(1 row) + drop schema test_guc_select_and_set cascade; diff --git a/contrib/dolphin/plugin_parser/scan.l b/contrib/dolphin/plugin_parser/scan.l index c434b53dd..dd7e975ca 100644 --- a/contrib/dolphin/plugin_parser/scan.l +++ b/contrib/dolphin/plugin_parser/scan.l @@ -1036,6 +1036,17 @@ other . return SET_USER_IDENT; } } + + /* check whether the string end with '@' or '@@', if so, remove last one or two '@', treat them as user var */ + if (nchars >= 2 && yytext[nchars - 1] == '@') + { + /* remove two '@' at most */ + nchars--; + if (yytext[nchars - 1] == '@') + { + nchars--; + } + } } if (slashstar && dashdash) diff --git a/contrib/dolphin/sql/db_b_parser3.sql b/contrib/dolphin/sql/db_b_parser3.sql index 11e39f8d0..b8c2932c1 100644 --- a/contrib/dolphin/sql/db_b_parser3.sql +++ b/contrib/dolphin/sql/db_b_parser3.sql @@ -100,31 +100,31 @@ rand(`json`)=rand(cast(`json` as signed)) from test_type_table; select -random_bytes(`int1`)::binary, -random_bytes(`uint1`)::binary, -random_bytes(`int2`)::binary, -random_bytes(`uint2`)::binary, -random_bytes(`int4`)::binary, -random_bytes(`uint4`)::binary, -random_bytes(`int8`)::binary, -random_bytes(`uint8`)::binary, -random_bytes(`float4`)::binary, -random_bytes(`float8`)::binary, -random_bytes(`numeric`)::binary, -random_bytes(`bit1`)::binary, -random_bytes(`bit64`)::binary, -random_bytes(`boolean`)::binary, -random_bytes(`char`)::binary, -random_bytes(`varchar`)::binary, -random_bytes(`binary`)::binary, -random_bytes(`varbinary`)::binary, -random_bytes(`tinyblob`)::binary, -random_bytes(`blob`)::binary, -random_bytes(`mediumblob`)::binary, -random_bytes(`longblob`)::binary, -random_bytes(`text`)::binary, -random_bytes(`enum_t`)::binary, -random_bytes(`set_t`)::binary +random_bytes(`int1`), +random_bytes(`uint1`), +random_bytes(`int2`), +random_bytes(`uint2`), +random_bytes(`int4`), +random_bytes(`uint4`), +random_bytes(`int8`), +random_bytes(`uint8`), +random_bytes(`float4`), +random_bytes(`float8`), +random_bytes(`numeric`), +random_bytes(`bit1`), +random_bytes(`bit64`), +random_bytes(`boolean`), +random_bytes(`char`), +random_bytes(`varchar`), +random_bytes(`binary`), +random_bytes(`varbinary`), +random_bytes(`tinyblob`), +random_bytes(`blob`), +random_bytes(`mediumblob`), +random_bytes(`longblob`), +random_bytes(`text`), +random_bytes(`enum_t`), +random_bytes(`set_t`) from test_type_table; --error, cause value out of range diff --git a/contrib/dolphin/sql/multi_select_in_proc.sql b/contrib/dolphin/sql/multi_select_in_proc.sql index a2eb65e68..86579a3c1 100644 --- a/contrib/dolphin/sql/multi_select_in_proc.sql +++ b/contrib/dolphin/sql/multi_select_in_proc.sql @@ -256,18 +256,18 @@ begin set @x = 1; truncate t_tinyint0009; repeat - set @c1 = @x; - set @c2 = floor(0.1*(127-18+1))+18; - set @c3 = floor(0.1*(127-100+1))+100; - set @c4 = floor(0.1*(10000-127+1))+127; - set @c5 = concat('amy', @x); + set @c1=@x; + set @c2=floor(0.1*(127-18+1))+18; + set @c3=floor(0.1*(127-100+1))+100; + set @c4=floor(0.1*(10000-127+1))+127; + set @c5=concat('amy', @x); select @c1; select @c2; select @c3; select @c4; select @c5; insert into t_tinyint0009 values (@c1, @c2, @c3, @c4, @c5); - set @x = @x + 1; + set @x=@x+1; select @x; until @x > num end repeat; end; diff --git a/contrib/dolphin/sql/test_guc_select_and_set.sql b/contrib/dolphin/sql/test_guc_select_and_set.sql index 53ff6baf3..345f712a4 100644 --- a/contrib/dolphin/sql/test_guc_select_and_set.sql +++ b/contrib/dolphin/sql/test_guc_select_and_set.sql @@ -128,4 +128,18 @@ set lower_case_table_names = 0; show dolphin.lower_case_table_names; show lower_case_table_names; +set @ta=@@license; +set @tb=@ta; +select @ta; +select @ta=@@license; +select @ta!=@@license; +select @ta<>@@license; +select @ta>@@license; +select @ta<@@license; +select @ta=@tb; +select @ta!=@tb; +select @ta<>@tb; +select @ta>@tb; +select @ta<@tb; + drop schema test_guc_select_and_set cascade; -- Gitee From 4549ad1310a37c8617ce864458931d9ca33078d4 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 3 Jan 2024 21:23:20 +0800 Subject: [PATCH 162/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8R30W=E6=89=80=E7=A4=BA=E7=9A=84=E6=89=A7?= =?UTF-8?q?=E8=A1=8Csqlsmith=E4=BA=A7=E7=94=9Fcore=20dump=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E3=80=91:=20=E4=BF=AE=E5=A4=8DI8R30W=E6=89=80=E7=A4=BA?= =?UTF-8?q?=E7=9A=84=E6=89=A7=E8=A1=8Csqlsmith=E4=BA=A7=E7=94=9Fcore=20dum?= =?UTF-8?q?p=E9=97=AE=E9=A2=98=E3=80=82=20=E3=80=90=E6=A0=B9=E5=9B=A0?= =?UTF-8?q?=E5=88=86=E6=9E=90=E3=80=91:=20=E5=85=B6=E5=AE=9E=E4=B8=BB?= =?UTF-8?q?=E8=A6=81=E6=98=AFdolphin=5Ftypes=E8=BF=94=E5=9B=9E=E7=9A=84tex?= =?UTF-8?q?t[]=E5=86=85=E9=83=A8=E6=98=AFCtsring=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=EF=BC=8Cjson=5Fextract=5Fpath=5Fop=E7=94=A8text?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E5=8E=BB=E8=A7=A3=E9=87=8A=E5=AF=BC=E8=87=B4?= =?UTF-8?q?=E7=9A=84=E3=80=82=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9?= =?UTF-8?q?=E6=A1=88=E3=80=91:=20json=5Fextract=5Fpath=5Fop=E4=B8=AD?= =?UTF-8?q?=EF=BC=8C=E5=A6=82=E6=9E=9C=E6=98=AFCString=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E8=AF=9D=EF=BC=8C=E5=B0=B1=E8=B5=B0CSting=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E8=A7=A3=E9=87=8A=EF=BC=8C=E5=90=A6=E5=88=99=E5=B0=B1?= =?UTF-8?q?=E8=B5=B0text=E7=B1=BB=E5=9E=8B=E7=9A=84=E8=A7=A3=E9=87=8A?= =?UTF-8?q?=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82?= =?UTF-8?q?=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengaussorg/das?= =?UTF-8?q?hboard=3Fissue=3DI8R30W?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/json_object.out | 6 +++++ .../dolphin/plugin_utils/adt/jsonfuncs.cpp | 24 +++++++++++++++++++ contrib/dolphin/sql/json_object.sql | 1 + 3 files changed, 31 insertions(+) diff --git a/contrib/dolphin/expected/json_object.out b/contrib/dolphin/expected/json_object.out index 7c7e036cc..4542aa856 100644 --- a/contrib/dolphin/expected/json_object.out +++ b/contrib/dolphin/expected/json_object.out @@ -146,6 +146,12 @@ select json_extract_path_op(json_object('Name','Adam','Age',23,'Address','Chengd "Adam" (1 row) +select pg_catalog.json_extract_path_op(cast('[0, 0, 0, 0]' as json), cast(pg_catalog.dolphin_types() as _text)); + json_extract_path_op +---------------------- + +(1 row) + select json_extract_path_text(json_object('Name','Adam','Age',23,'Address','Chengdu'),'Name'); json_extract_path_text ------------------------ diff --git a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp index 65f715b42..81b42cd48 100644 --- a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp @@ -65,6 +65,9 @@ /* fake category for types that have a cast to json */ #define TYPCATEGORY_JSON_CAST 'c' +#define CSTRING_ELMLEN -2 +#define TEXT_ELMLEN -1 + TYPCATEGORY get_value_type(Oid val_type, Oid typoutput) { Oid castfunc = InvalidOid; @@ -1143,7 +1146,20 @@ static inline Datum get_path_all(FunctionCallInfo fcinfo, bool as_text) errmsg("cannot call function with null path elements"))); } +#ifdef DOLPHIN + // set the json element info as text type by default + Oid elmtype = TEXTOID; + int elmlen = TEXT_ELMLEN; + char elmalign = 'i'; + if (path->elemtype == CSTRINGOID) { + elmtype = CSTRINGOID; + elmlen = CSTRING_ELMLEN; + elmalign = 'c'; + } + deconstruct_array(path, elmtype, elmlen, false, elmalign, &pathtext, &pathnulls, &npath); +#else deconstruct_array(path, TEXTOID, -1, false, 'i', &pathtext, &pathnulls, &npath); +#endif /* * If the array is empty, return NULL; this is dubious but it's what 9.3 * did. @@ -1155,7 +1171,15 @@ static inline Datum get_path_all(FunctionCallInfo fcinfo, bool as_text) ipath = (int *)palloc(npath * sizeof(int)); for (i = 0; i < npath; i++) { +#ifdef DOLPHIN + if (path->elemtype == CSTRINGOID) { + tpath[i] = DatumGetCString(pathtext[i]); + } else { + tpath[i] = TextDatumGetCString(pathtext[i]); + } +#else tpath[i] = TextDatumGetCString(pathtext[i]); +#endif if (*tpath[i] == '\0') { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), diff --git a/contrib/dolphin/sql/json_object.sql b/contrib/dolphin/sql/json_object.sql index 2518d5532..647391df9 100644 --- a/contrib/dolphin/sql/json_object.sql +++ b/contrib/dolphin/sql/json_object.sql @@ -38,6 +38,7 @@ select json_object_field_text(json_object('Name','Adam','Age',23,'Address','Chen select json_object_field_text(json_object('Name','Adam','Age',23,'Address','Chengdu'),'Name'); select json_extract_path(json_object('Name','Adam','Age',23,'Address','Chengdu'),'Name'); select json_extract_path_op(json_object('Name','Adam','Age',23,'Address','Chengdu'),'{Name}'); +select pg_catalog.json_extract_path_op(cast('[0, 0, 0, 0]' as json), cast(pg_catalog.dolphin_types() as _text)); select json_extract_path_text(json_object('Name','Adam','Age',23,'Address','Chengdu'),'Name'); select json_extract_path_text_op(json_object('Name','Adam','Age',23,'Address','Chengdu'),'{Name}'); select * from json_each(json_object('Name','Adam','Age',23,'Address','Chengdu')); -- Gitee From 1ad8271b5b25365e23fcf38c6dbfa40e600fa0a7 Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 5 Jan 2024 15:09:38 +0800 Subject: [PATCH 163/434] Sync server code. 0009ff6df50e92c5994f3d8868d69be4e2037ccf --- contrib/dolphin/expected/partition_test4.out | 334 ++--- contrib/dolphin/expected/show_create.out | 230 +-- .../dolphin/expected/test_enum_collation.out | 4 +- .../dolphin/expected/uint_auto_increment.out | 74 +- contrib/dolphin/include/builtin_funcs.ini | 4 + .../dolphin/include/plugin_nodes/parsenodes.h | 2 + .../include/plugin_nodes/parsenodes_common.h | 1 + .../include/plugin_parser/parse_relation.h | 2 +- .../dolphin/include/plugin_parser/parsetree.h | 2 +- .../dolphin/include/plugin_utils/plpgsql.h | 10 + ...ble_partition_ddl_import_and_export.source | 1268 ++++++++--------- contrib/dolphin/plugin_executor/execQual.cpp | 1 + .../plugin_optimizer/commands/typecmds.cpp | 141 +- .../dolphin/plugin_optimizer/util/clauses.cpp | 7 + contrib/dolphin/plugin_parser/analyze.cpp | 86 ++ contrib/dolphin/plugin_parser/gram.y | 28 +- .../dolphin/plugin_parser/parse_relation.cpp | 8 +- .../dolphin/plugin_parser/parse_utilcmd.cpp | 5 +- contrib/dolphin/plugin_pl/plpgsql/src/gram.y | 11 +- .../plugin_pl/plpgsql/src/pl_handler.cpp | 426 +++++- .../dolphin/plugin_utils/adt/arrayfuncs.cpp | 52 +- .../dolphin/plugin_utils/adt/arrayutils.cpp | 5 - .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 11 +- .../dolphin/plugin_utils/adt/ruleutils.cpp | 10 +- contrib/dolphin/tablecmds.cpp | 301 +++- 25 files changed, 1959 insertions(+), 1064 deletions(-) diff --git a/contrib/dolphin/expected/partition_test4.out b/contrib/dolphin/expected/partition_test4.out index 131e721f0..90efa110c 100644 --- a/contrib/dolphin/expected/partition_test4.out +++ b/contrib/dolphin/expected/partition_test4.out @@ -76,22 +76,22 @@ select pg_get_tabledef('test_range_subpart'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (a) SUBPARTITION BY HASH (b) + ( + - PARTITION m1 VALUES LESS THAN (100) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (100) + ( + - SUBPARTITION m1_subpartdefault1 TABLESPACE pg_default + + SUBPARTITION m1_subpartdefault1 + ), + - PARTITION m2 VALUES LESS THAN (500) TABLESPACE pg_default + + PARTITION m2 VALUES LESS THAN (500) + ( + - SUBPARTITION m21 TABLESPACE pg_default, + - SUBPARTITION m22 TABLESPACE pg_default + + SUBPARTITION m21, + + SUBPARTITION m22 + ), + - PARTITION p3 VALUES LESS THAN (800) TABLESPACE pg_default + + PARTITION p3 VALUES LESS THAN (800) + ( + - SUBPARTITION p3_subpartdefault1 TABLESPACE pg_default + + SUBPARTITION p3_subpartdefault1 + ), + - PARTITION p4 VALUES LESS THAN (1200) TABLESPACE pg_default + + PARTITION p4 VALUES LESS THAN (1200) + ( + - SUBPARTITION s41 TABLESPACE pg_default + + SUBPARTITION s41 + ) + ) + ENABLE ROW MOVEMENT; + @@ -148,10 +148,10 @@ select pg_get_tabledef('test_range_part'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (a) + ( + - PARTITION m1 VALUES LESS THAN (100) TABLESPACE pg_default, + - PARTITION m2 VALUES LESS THAN (500) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (800) TABLESPACE pg_default, + - PARTITION p4 VALUES LESS THAN (1200) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (100), + + PARTITION m2 VALUES LESS THAN (500), + + PARTITION p3 VALUES LESS THAN (800), + + PARTITION p4 VALUES LESS THAN (1200) + ) + ENABLE ROW MOVEMENT; + ALTER TABLE test_range_part ADD CONSTRAINT test_range_part_pkey PRIMARY KEY USING btree (a); @@ -207,9 +207,9 @@ select pg_get_tabledef('test_list_part'); WITH (orientation=row, compression=no) + PARTITION BY LIST (a) + ( + - PARTITION m2 VALUES (1,4,7) TABLESPACE pg_default, + - PARTITION m1 VALUES (2,3,5,13) TABLESPACE pg_default, + - PARTITION p3 VALUES (9,10,11,12) TABLESPACE pg_default + + PARTITION m2 VALUES (1,4,7), + + PARTITION m1 VALUES (2,3,5,13), + + PARTITION p3 VALUES (9,10,11,12) + ) + ENABLE ROW MOVEMENT; + ALTER TABLE test_list_part ADD CONSTRAINT test_list_part_pkey PRIMARY KEY USING btree (a); @@ -264,18 +264,18 @@ select pg_get_tabledef('test_list_subpart'); WITH (orientation=row, compression=no) + PARTITION BY LIST (a) SUBPARTITION BY HASH (b) + ( + - PARTITION m2 VALUES (1,4,7) TABLESPACE pg_default + + PARTITION m2 VALUES (1,4,7) + ( + - SUBPARTITION m2_subpartdefault1 TABLESPACE pg_default + + SUBPARTITION m2_subpartdefault1 + ), + - PARTITION m1 VALUES (2,3,5,13) TABLESPACE pg_default + + PARTITION m1 VALUES (2,3,5,13) + ( + - SUBPARTITION m11 TABLESPACE pg_default, + - SUBPARTITION m12 TABLESPACE pg_default + + SUBPARTITION m11, + + SUBPARTITION m12 + ), + - PARTITION p3 VALUES (9,10,11,12) TABLESPACE pg_default + + PARTITION p3 VALUES (9,10,11,12) + ( + - SUBPARTITION p13 TABLESPACE pg_default + + SUBPARTITION p13 + ) + ) + ENABLE ROW MOVEMENT; + @@ -355,10 +355,10 @@ select pg_get_tabledef('test_part_ustore'); WITH (orientation=row, storage_type=ustore, compression=no) + PARTITION BY RANGE (a) + ( + - PARTITION m1 VALUES LESS THAN (100) TABLESPACE pg_default, + - PARTITION m2 VALUES LESS THAN (500) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (800) TABLESPACE pg_default, + - PARTITION p4 VALUES LESS THAN (1200) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (100), + + PARTITION m2 VALUES LESS THAN (500), + + PARTITION p3 VALUES LESS THAN (800), + + PARTITION p4 VALUES LESS THAN (1200) + ) + ENABLE ROW MOVEMENT; + ALTER TABLE test_part_ustore ADD CONSTRAINT test_part_ustore_pkey PRIMARY KEY USING ubtree (a) WITH (storage_type=USTORE); @@ -426,10 +426,10 @@ select pg_get_tabledef('test_part_segment'); WITH (orientation=row, segment=on, compression=no) + PARTITION BY RANGE (a) + ( + - PARTITION m1 VALUES LESS THAN (100) TABLESPACE pg_default, + - PARTITION m2 VALUES LESS THAN (500) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (800) TABLESPACE pg_default, + - PARTITION p4 VALUES LESS THAN (1200) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (100), + + PARTITION m2 VALUES LESS THAN (500), + + PARTITION p3 VALUES LESS THAN (800), + + PARTITION p4 VALUES LESS THAN (1200) + ) + ENABLE ROW MOVEMENT; + ALTER TABLE test_part_segment ADD CONSTRAINT test_part_segment_pkey PRIMARY KEY USING btree (a); @@ -504,20 +504,20 @@ select pg_get_tabledef('b_range_hash_t01'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + ( + - PARTITION p1 VALUES LESS THAN (100) TABLESPACE pg_default + + PARTITION p1 VALUES LESS THAN (100) + ( + - SUBPARTITION p1_1 TABLESPACE pg_default, + - SUBPARTITION p1_2 TABLESPACE pg_default + + SUBPARTITION p1_1, + + SUBPARTITION p1_2 + ), + - PARTITION p2 VALUES LESS THAN (200) TABLESPACE pg_default + + PARTITION p2 VALUES LESS THAN (200) + ( + - SUBPARTITION p2_1 TABLESPACE pg_default, + - SUBPARTITION p2_2 TABLESPACE pg_default + + SUBPARTITION p2_1, + + SUBPARTITION p2_2 + ), + - PARTITION p3 VALUES LESS THAN (300) TABLESPACE pg_default + + PARTITION p3 VALUES LESS THAN (300) + ( + - SUBPARTITION p3_1 TABLESPACE pg_default, + - SUBPARTITION p3_2 TABLESPACE pg_default + + SUBPARTITION p3_1, + + SUBPARTITION p3_2 + ) + ) + ENABLE ROW MOVEMENT; + @@ -552,25 +552,25 @@ select pg_get_tabledef('b_range_hash_t01'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + ( + - PARTITION m1 VALUES LESS THAN (50) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (50) + ( + - SUBPARTITION m1_1 TABLESPACE pg_default, + - SUBPARTITION m1_2 TABLESPACE pg_default, + - SUBPARTITION m1_3 TABLESPACE pg_default + + SUBPARTITION m1_1, + + SUBPARTITION m1_2, + + SUBPARTITION m1_3 + ), + - PARTITION m2 VALUES LESS THAN (100) TABLESPACE pg_default + + PARTITION m2 VALUES LESS THAN (100) + ( + - SUBPARTITION m2_subpartdefault1 TABLESPACE pg_default + + SUBPARTITION m2_subpartdefault1 + ), + - PARTITION p2 VALUES LESS THAN (200) TABLESPACE pg_default + + PARTITION p2 VALUES LESS THAN (200) + ( + - SUBPARTITION p2_1 TABLESPACE pg_default, + - SUBPARTITION p2_2 TABLESPACE pg_default + + SUBPARTITION p2_1, + + SUBPARTITION p2_2 + ), + - PARTITION p3 VALUES LESS THAN (300) TABLESPACE pg_default + + PARTITION p3 VALUES LESS THAN (300) + ( + - SUBPARTITION p3_1 TABLESPACE pg_default, + - SUBPARTITION p3_2 TABLESPACE pg_default + + SUBPARTITION p3_1, + + SUBPARTITION p3_2 + ) + ) + ENABLE ROW MOVEMENT; + @@ -615,96 +615,96 @@ create table part_key_range_t1(col1 date, col2 int) partition by range(year(col1 partition p2 values less than(3000) ); select pg_get_tabledef('part_key_range_t1'); - pg_get_tabledef ------------------------------------------------------------------ - SET search_path = partition_test4; + - CREATE TABLE part_key_range_t1 ( + - col1 date, + - col2 integer + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (year((col1)::timestamp without time zone)) + - ( + - PARTITION p1 VALUES LESS THAN (2000) TABLESPACE pg_default,+ - PARTITION p2 VALUES LESS THAN (3000) TABLESPACE pg_default + - ) + + pg_get_tabledef +---------------------------------------------------------------- + SET search_path = partition_test4; + + CREATE TABLE part_key_range_t1 ( + + col1 date, + + col2 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (year((col1)::timestamp without time zone))+ + ( + + PARTITION p1 VALUES LESS THAN (2000), + + PARTITION p2 VALUES LESS THAN (3000) + + ) + ENABLE ROW MOVEMENT; (1 row) alter table part_key_range_t1 add partition p3 values less than(4000); select pg_get_tabledef('part_key_range_t1'); - pg_get_tabledef ------------------------------------------------------------------ - SET search_path = partition_test4; + - CREATE TABLE part_key_range_t1 ( + - col1 date, + - col2 integer + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (year((col1)::timestamp without time zone)) + - ( + - PARTITION p1 VALUES LESS THAN (2000) TABLESPACE pg_default,+ - PARTITION p2 VALUES LESS THAN (3000) TABLESPACE pg_default,+ - PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - ) + + pg_get_tabledef +---------------------------------------------------------------- + SET search_path = partition_test4; + + CREATE TABLE part_key_range_t1 ( + + col1 date, + + col2 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (year((col1)::timestamp without time zone))+ + ( + + PARTITION p1 VALUES LESS THAN (2000), + + PARTITION p2 VALUES LESS THAN (3000), + + PARTITION p3 VALUES LESS THAN (4000) + + ) + ENABLE ROW MOVEMENT; (1 row) alter table part_key_range_t1 split partition p1 into (partition m1 values less than(1000), partition m2 values less than(2000)); select pg_get_tabledef('part_key_range_t1'); - pg_get_tabledef ------------------------------------------------------------------ - SET search_path = partition_test4; + - CREATE TABLE part_key_range_t1 ( + - col1 date, + - col2 integer + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (year((col1)::timestamp without time zone)) + - ( + - PARTITION m1 VALUES LESS THAN (1000) TABLESPACE pg_default,+ - PARTITION m2 VALUES LESS THAN (2000) TABLESPACE pg_default,+ - PARTITION p2 VALUES LESS THAN (3000) TABLESPACE pg_default,+ - PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - ) + + pg_get_tabledef +---------------------------------------------------------------- + SET search_path = partition_test4; + + CREATE TABLE part_key_range_t1 ( + + col1 date, + + col2 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (year((col1)::timestamp without time zone))+ + ( + + PARTITION m1 VALUES LESS THAN (1000), + + PARTITION m2 VALUES LESS THAN (2000), + + PARTITION p2 VALUES LESS THAN (3000), + + PARTITION p3 VALUES LESS THAN (4000) + + ) + ENABLE ROW MOVEMENT; (1 row) alter table part_key_range_t1 merge partitions m1,m2 into partition p1; select pg_get_tabledef('part_key_range_t1'); - pg_get_tabledef ------------------------------------------------------------------ - SET search_path = partition_test4; + - CREATE TABLE part_key_range_t1 ( + - col1 date, + - col2 integer + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (year((col1)::timestamp without time zone)) + - ( + - PARTITION p1 VALUES LESS THAN (2000) TABLESPACE pg_default,+ - PARTITION p2 VALUES LESS THAN (3000) TABLESPACE pg_default,+ - PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - ) + + pg_get_tabledef +---------------------------------------------------------------- + SET search_path = partition_test4; + + CREATE TABLE part_key_range_t1 ( + + col1 date, + + col2 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (year((col1)::timestamp without time zone))+ + ( + + PARTITION p1 VALUES LESS THAN (2000), + + PARTITION p2 VALUES LESS THAN (3000), + + PARTITION p3 VALUES LESS THAN (4000) + + ) + ENABLE ROW MOVEMENT; (1 row) alter table part_key_range_t1 reorganize partition p1,p2 into (partition m1 values less than(1000),partition m2 values less than(3000)); select pg_get_tabledef('part_key_range_t1'); - pg_get_tabledef ------------------------------------------------------------------ - SET search_path = partition_test4; + - CREATE TABLE part_key_range_t1 ( + - col1 date, + - col2 integer + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (year((col1)::timestamp without time zone)) + - ( + - PARTITION m1 VALUES LESS THAN (1000) TABLESPACE pg_default,+ - PARTITION m2 VALUES LESS THAN (3000) TABLESPACE pg_default,+ - PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - ) + + pg_get_tabledef +---------------------------------------------------------------- + SET search_path = partition_test4; + + CREATE TABLE part_key_range_t1 ( + + col1 date, + + col2 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (year((col1)::timestamp without time zone))+ + ( + + PARTITION m1 VALUES LESS THAN (1000), + + PARTITION m2 VALUES LESS THAN (3000), + + PARTITION p3 VALUES LESS THAN (4000) + + ) + ENABLE ROW MOVEMENT; (1 row) @@ -724,8 +724,8 @@ select pg_get_tabledef('part_key_range_t2'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (to_days((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES LESS THAN (60000) TABLESPACE pg_default, + - PARTITION p2 VALUES LESS THAN (80000) TABLESPACE pg_default + + PARTITION p1 VALUES LESS THAN (60000), + + PARTITION p2 VALUES LESS THAN (80000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -742,9 +742,9 @@ select pg_get_tabledef('part_key_range_t2'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (to_days((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES LESS THAN (60000) TABLESPACE pg_default, + - PARTITION p2 VALUES LESS THAN (80000) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (90000) TABLESPACE pg_default + + PARTITION p1 VALUES LESS THAN (60000), + + PARTITION p2 VALUES LESS THAN (80000), + + PARTITION p3 VALUES LESS THAN (90000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -761,10 +761,10 @@ select pg_get_tabledef('part_key_range_t2'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (to_days((col1)::timestamp without time zone))+ ( + - PARTITION m1 VALUES LESS THAN (50000) TABLESPACE pg_default, + - PARTITION m2 VALUES LESS THAN (60000) TABLESPACE pg_default, + - PARTITION p2 VALUES LESS THAN (80000) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (90000) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (50000), + + PARTITION m2 VALUES LESS THAN (60000), + + PARTITION p2 VALUES LESS THAN (80000), + + PARTITION p3 VALUES LESS THAN (90000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -781,9 +781,9 @@ select pg_get_tabledef('part_key_range_t2'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (to_days((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES LESS THAN (60000) TABLESPACE pg_default, + - PARTITION p2 VALUES LESS THAN (80000) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (90000) TABLESPACE pg_default + + PARTITION p1 VALUES LESS THAN (60000), + + PARTITION p2 VALUES LESS THAN (80000), + + PARTITION p3 VALUES LESS THAN (90000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -800,9 +800,9 @@ select pg_get_tabledef('part_key_range_t2'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (to_days((col1)::timestamp without time zone))+ ( + - PARTITION m1 VALUES LESS THAN (70000) TABLESPACE pg_default, + - PARTITION m2 VALUES LESS THAN (80000) TABLESPACE pg_default, + - PARTITION p3 VALUES LESS THAN (90000) TABLESPACE pg_default + + PARTITION m1 VALUES LESS THAN (70000), + + PARTITION m2 VALUES LESS THAN (80000), + + PARTITION p3 VALUES LESS THAN (90000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -823,8 +823,8 @@ select pg_get_tabledef('part_key_list_t1'); WITH (orientation=row, compression=no) + PARTITION BY LIST (year((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES (2000) TABLESPACE pg_default, + - PARTITION p2 VALUES (3000) TABLESPACE pg_default + + PARTITION p1 VALUES (2000), + + PARTITION p2 VALUES (3000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -841,9 +841,9 @@ select pg_get_tabledef('part_key_list_t1'); WITH (orientation=row, compression=no) + PARTITION BY LIST (year((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES (2000) TABLESPACE pg_default, + - PARTITION p2 VALUES (3000) TABLESPACE pg_default, + - PARTITION p3 VALUES (4000) TABLESPACE pg_default + + PARTITION p1 VALUES (2000), + + PARTITION p2 VALUES (3000), + + PARTITION p3 VALUES (4000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -860,9 +860,9 @@ select pg_get_tabledef('part_key_list_t1'); WITH (orientation=row, compression=no) + PARTITION BY LIST (year((col1)::timestamp without time zone))+ ( + - PARTITION m1 VALUES (1000,3000) TABLESPACE pg_default, + - PARTITION m2 VALUES (2000,5000) TABLESPACE pg_default, + - PARTITION p3 VALUES (4000) TABLESPACE pg_default + + PARTITION m1 VALUES (1000,3000), + + PARTITION m2 VALUES (2000,5000), + + PARTITION p3 VALUES (4000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -883,8 +883,8 @@ select pg_get_tabledef('part_key_list_t2'); WITH (orientation=row, compression=no) + PARTITION BY LIST (to_days((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES (60000) TABLESPACE pg_default, + - PARTITION p2 VALUES (80000) TABLESPACE pg_default + + PARTITION p1 VALUES (60000), + + PARTITION p2 VALUES (80000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -901,9 +901,9 @@ select pg_get_tabledef('part_key_list_t2'); WITH (orientation=row, compression=no) + PARTITION BY LIST (to_days((col1)::timestamp without time zone))+ ( + - PARTITION p1 VALUES (60000) TABLESPACE pg_default, + - PARTITION p2 VALUES (80000) TABLESPACE pg_default, + - PARTITION p3 VALUES (90000) TABLESPACE pg_default + + PARTITION p1 VALUES (60000), + + PARTITION p2 VALUES (80000), + + PARTITION p3 VALUES (90000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -920,9 +920,9 @@ select pg_get_tabledef('part_key_list_t2'); WITH (orientation=row, compression=no) + PARTITION BY LIST (to_days((col1)::timestamp without time zone))+ ( + - PARTITION m1 VALUES (10000,80000) TABLESPACE pg_default, + - PARTITION m2 VALUES (60000,50000) TABLESPACE pg_default, + - PARTITION p3 VALUES (90000) TABLESPACE pg_default + + PARTITION m1 VALUES (10000,80000), + + PARTITION m2 VALUES (60000,50000), + + PARTITION p3 VALUES (90000) + ) + ENABLE ROW MOVEMENT; (1 row) @@ -950,20 +950,20 @@ partition p2 values less than(63838026065), partition p3 values less than(63838026067) ); select pg_get_tabledef('b_range_at1'); - pg_get_tabledef ------------------------------------------------------------------------- - SET search_path = partition_test4; + - CREATE TABLE b_range_at1 ( + - c1 date, + - c2 integer + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (to_seconds(c1)) + - ( + - PARTITION p1 VALUES LESS THAN (63838026063) TABLESPACE pg_default,+ - PARTITION p2 VALUES LESS THAN (63838026065) TABLESPACE pg_default,+ - PARTITION p3 VALUES LESS THAN (63838026067) TABLESPACE pg_default + - ) + + pg_get_tabledef +-------------------------------------------------- + SET search_path = partition_test4; + + CREATE TABLE b_range_at1 ( + + c1 date, + + c2 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (to_seconds(c1)) + + ( + + PARTITION p1 VALUES LESS THAN (63838026063),+ + PARTITION p2 VALUES LESS THAN (63838026065),+ + PARTITION p3 VALUES LESS THAN (63838026067) + + ) + ENABLE ROW MOVEMENT; (1 row) diff --git a/contrib/dolphin/expected/show_create.out b/contrib/dolphin/expected/show_create.out index 9fe891e03..44bd02381 100644 --- a/contrib/dolphin/expected/show_create.out +++ b/contrib/dolphin/expected/show_create.out @@ -68,22 +68,22 @@ partition by range (id) partition table_range1_p4 values less than(maxvalue) ); show create table table_range1; - Table | Create Table ---------------+--------------------------------------------------------------------------------- - table_range1 | SET search_path = test_get_def; + - | CREATE TABLE table_range1 ( + - | id integer, + - | a date, + - | b character varying + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (id) + - | ( + - | PARTITION table_range1_p1 VALUES LESS THAN (10) TABLESPACE pg_default, + - | PARTITION table_range1_p2 VALUES LESS THAN (50) TABLESPACE pg_default, + - | PARTITION table_range1_p3 VALUES LESS THAN (100) TABLESPACE pg_default, + - | PARTITION table_range1_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + + Table | Create Table +--------------+----------------------------------------------------------- + table_range1 | SET search_path = test_get_def; + + | CREATE TABLE table_range1 ( + + | id integer, + + | a date, + + | b character varying + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (id) + + | ( + + | PARTITION table_range1_p1 VALUES LESS THAN (10), + + | PARTITION table_range1_p2 VALUES LESS THAN (50), + + | PARTITION table_range1_p3 VALUES LESS THAN (100), + + | PARTITION table_range1_p4 VALUES LESS THAN (MAXVALUE)+ + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -97,21 +97,21 @@ partition by list (id) partition table_list1_p3 values (9, 10, 11, 12) ); show create table table_list1; - Table | Create Table --------------+------------------------------------------------------------------------ - table_list1 | SET search_path = test_get_def; + - | CREATE TABLE table_list1 ( + - | id integer, + - | a date, + - | b character varying + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY LIST (id) + - | ( + - | PARTITION table_list1_p1 VALUES (1,2,3,4) TABLESPACE pg_default, + - | PARTITION table_list1_p2 VALUES (5,6,7,8) TABLESPACE pg_default, + - | PARTITION table_list1_p3 VALUES (9,10,11,12) TABLESPACE pg_default+ - | ) + + Table | Create Table +-------------+-------------------------------------------------- + table_list1 | SET search_path = test_get_def; + + | CREATE TABLE table_list1 ( + + | id integer, + + | a date, + + | b character varying + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY LIST (id) + + | ( + + | PARTITION table_list1_p1 VALUES (1,2,3,4), + + | PARTITION table_list1_p2 VALUES (5,6,7,8), + + | PARTITION table_list1_p3 VALUES (9,10,11,12)+ + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -125,21 +125,21 @@ partition by hash (id) partition table_hash1_p3 ); show create table table_hash1; - Table | Create Table --------------+----------------------------------------------------- - table_hash1 | SET search_path = test_get_def; + - | CREATE TABLE table_hash1 ( + - | id integer, + - | a date, + - | b character varying + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY HASH (id) + - | ( + - | PARTITION table_hash1_p1 TABLESPACE pg_default,+ - | PARTITION table_hash1_p2 TABLESPACE pg_default,+ - | PARTITION table_hash1_p3 TABLESPACE pg_default + - | ) + + Table | Create Table +-------------+---------------------------------------- + table_hash1 | SET search_path = test_get_def; + + | CREATE TABLE table_hash1 ( + + | id integer, + + | a date, + + | b character varying + + | ) + + | WITH (orientation=row, compression=no)+ + | PARTITION BY HASH (id) + + | ( + + | PARTITION table_hash1_p1, + + | PARTITION table_hash1_p2, + + | PARTITION table_hash1_p3 + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -202,46 +202,46 @@ show create table list_range_1; | WITH (orientation=row, compression=no) + | PARTITION BY LIST (col_1) SUBPARTITION BY RANGE (col_2) + | ( + - | PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) TABLESPACE pg_default + + | PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + | ( + - | SUBPARTITION p_range_1_1 VALUES LESS THAN (-10) TABLESPACE pg_default, + - | SUBPARTITION p_range_1_2 VALUES LESS THAN (0) TABLESPACE pg_default, + - | SUBPARTITION p_range_1_3 VALUES LESS THAN (10) TABLESPACE pg_default, + - | SUBPARTITION p_range_1_4 VALUES LESS THAN (20) TABLESPACE pg_default, + - | SUBPARTITION p_range_1_5 VALUES LESS THAN (50) TABLESPACE pg_default + + | SUBPARTITION p_range_1_1 VALUES LESS THAN (-10), + + | SUBPARTITION p_range_1_2 VALUES LESS THAN (0), + + | SUBPARTITION p_range_1_3 VALUES LESS THAN (10), + + | SUBPARTITION p_range_1_4 VALUES LESS THAN (20), + + | SUBPARTITION p_range_1_5 VALUES LESS THAN (50) + | ), + - | PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10) TABLESPACE pg_default + + | PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10) + | ( + - | SUBPARTITION p_list_2_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | SUBPARTITION p_list_2_subpartdefault1 VALUES LESS THAN (MAXVALUE) + | ), + - | PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) TABLESPACE pg_default + + | PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + | ( + - | SUBPARTITION p_range_3_1 VALUES LESS THAN (15) TABLESPACE pg_default, + - | SUBPARTITION p_range_3_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | SUBPARTITION p_range_3_1 VALUES LESS THAN (15), + + | SUBPARTITION p_range_3_2 VALUES LESS THAN (MAXVALUE) + | ), + - | PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) TABLESPACE pg_default + + | PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + | ( + - | SUBPARTITION p_range_4_1 VALUES LESS THAN (-10) TABLESPACE pg_default, + - | SUBPARTITION p_range_4_2 VALUES LESS THAN (0) TABLESPACE pg_default, + - | SUBPARTITION p_range_4_3 VALUES LESS THAN (10) TABLESPACE pg_default, + - | SUBPARTITION p_range_4_4 VALUES LESS THAN (20) TABLESPACE pg_default, + - | SUBPARTITION p_range_4_5 VALUES LESS THAN (50) TABLESPACE pg_default + + | SUBPARTITION p_range_4_1 VALUES LESS THAN (-10), + + | SUBPARTITION p_range_4_2 VALUES LESS THAN (0), + + | SUBPARTITION p_range_4_3 VALUES LESS THAN (10), + + | SUBPARTITION p_range_4_4 VALUES LESS THAN (20), + + | SUBPARTITION p_range_4_5 VALUES LESS THAN (50) + | ), + - | PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40) TABLESPACE pg_default + + | PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40) + | ( + - | SUBPARTITION p_list_5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | SUBPARTITION p_list_5_subpartdefault1 VALUES LESS THAN (MAXVALUE) + | ), + - | PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) TABLESPACE pg_default + + | PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + | ( + - | SUBPARTITION p_range_6_1 VALUES LESS THAN (-10) TABLESPACE pg_default, + - | SUBPARTITION p_range_6_2 VALUES LESS THAN (0) TABLESPACE pg_default, + - | SUBPARTITION p_range_6_3 VALUES LESS THAN (10) TABLESPACE pg_default, + - | SUBPARTITION p_range_6_4 VALUES LESS THAN (20) TABLESPACE pg_default, + - | SUBPARTITION p_range_6_5 VALUES LESS THAN (50) TABLESPACE pg_default + + | SUBPARTITION p_range_6_1 VALUES LESS THAN (-10), + + | SUBPARTITION p_range_6_2 VALUES LESS THAN (0), + + | SUBPARTITION p_range_6_3 VALUES LESS THAN (10), + + | SUBPARTITION p_range_6_4 VALUES LESS THAN (20), + + | SUBPARTITION p_range_6_5 VALUES LESS THAN (50) + | ), + - | PARTITION p_list_7 VALUES (DEFAULT) TABLESPACE pg_default + + | PARTITION p_list_7 VALUES (DEFAULT) + | ( + - | SUBPARTITION p_list_7_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | SUBPARTITION p_list_7_subpartdefault1 VALUES LESS THAN (MAXVALUE) + | ) + | ) + | ENABLE ROW MOVEMENT; + @@ -308,44 +308,44 @@ show create table list_hash_2; | WITH (orientation=row, compression=no) + | PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) + | ( + - | PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) TABLESPACE pg_default + + | PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + | ( + - | SUBPARTITION p_hash_1_1 TABLESPACE pg_default, + - | SUBPARTITION p_hash_1_2 TABLESPACE pg_default, + - | SUBPARTITION p_hash_1_3 TABLESPACE pg_default + + | SUBPARTITION p_hash_1_1, + + | SUBPARTITION p_hash_1_2, + + | SUBPARTITION p_hash_1_3 + | ), + - | PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10) TABLESPACE pg_default + + | PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10) + | ( + - | SUBPARTITION p_list_2_subpartdefault1 TABLESPACE pg_default + + | SUBPARTITION p_list_2_subpartdefault1 + | ), + - | PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) TABLESPACE pg_default + + | PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + | ( + - | SUBPARTITION p_hash_3_1 TABLESPACE pg_default, + - | SUBPARTITION p_hash_3_2 TABLESPACE pg_default + + | SUBPARTITION p_hash_3_1, + + | SUBPARTITION p_hash_3_2 + | ), + - | PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) TABLESPACE pg_default + + | PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + | ( + - | SUBPARTITION p_hash_4_1 TABLESPACE pg_default, + - | SUBPARTITION p_hash_4_2 TABLESPACE pg_default, + - | SUBPARTITION p_hash_4_3 TABLESPACE pg_default, + - | SUBPARTITION p_hash_4_4 TABLESPACE pg_default, + - | SUBPARTITION p_hash_4_5 TABLESPACE pg_default + + | SUBPARTITION p_hash_4_1, + + | SUBPARTITION p_hash_4_2, + + | SUBPARTITION p_hash_4_3, + + | SUBPARTITION p_hash_4_4, + + | SUBPARTITION p_hash_4_5 + | ), + - | PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40) TABLESPACE pg_default + + | PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40) + | ( + - | SUBPARTITION p_list_5_subpartdefault1 TABLESPACE pg_default + + | SUBPARTITION p_list_5_subpartdefault1 + | ), + - | PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) TABLESPACE pg_default + + | PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + | ( + - | SUBPARTITION p_hash_6_1 TABLESPACE pg_default, + - | SUBPARTITION p_hash_6_2 TABLESPACE pg_default, + - | SUBPARTITION p_hash_6_3 TABLESPACE pg_default, + - | SUBPARTITION p_hash_6_4 TABLESPACE pg_default, + - | SUBPARTITION p_hash_6_5 TABLESPACE pg_default + + | SUBPARTITION p_hash_6_1, + + | SUBPARTITION p_hash_6_2, + + | SUBPARTITION p_hash_6_3, + + | SUBPARTITION p_hash_6_4, + + | SUBPARTITION p_hash_6_5 + | ), + - | PARTITION p_list_7 VALUES (DEFAULT) TABLESPACE pg_default + + | PARTITION p_list_7 VALUES (DEFAULT) + | ( + - | SUBPARTITION p_list_7_subpartdefault1 TABLESPACE pg_default + + | SUBPARTITION p_list_7_subpartdefault1 + | ) + | ) + | ENABLE ROW MOVEMENT; + @@ -434,23 +434,23 @@ interval ('1 day') partition table_interval1_p4 values less than(maxvalue) ); show create table table_interval1; - Table | Create Table ------------------+----------------------------------------------------------------------------------------- - table_interval1 | SET search_path = test_get_def; + - | CREATE TABLE table_interval1 ( + - | id integer, + - | a date, + - | b character varying + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (a) + - | INTERVAL ('1 day') + - | ( + - | PARTITION table_interval1_p1 VALUES LESS THAN ('2020-03-01') TABLESPACE pg_default,+ - | PARTITION table_interval1_p2 VALUES LESS THAN ('2020-05-01') TABLESPACE pg_default,+ - | PARTITION table_interval1_p3 VALUES LESS THAN ('2020-07-01') TABLESPACE pg_default,+ - | PARTITION table_interval1_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ) + + Table | Create Table +-----------------+------------------------------------------------------------------- + table_interval1 | SET search_path = test_get_def; + + | CREATE TABLE table_interval1 ( + + | id integer, + + | a date, + + | b character varying + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (a) + + | INTERVAL ('1 day') + + | ( + + | PARTITION table_interval1_p1 VALUES LESS THAN ('2020-03-01'),+ + | PARTITION table_interval1_p2 VALUES LESS THAN ('2020-05-01'),+ + | PARTITION table_interval1_p3 VALUES LESS THAN ('2020-07-01'),+ + | PARTITION table_interval1_p4 VALUES LESS THAN (MAXVALUE) + + | ) + | ENABLE ROW MOVEMENT; (1 row) diff --git a/contrib/dolphin/expected/test_enum_collation.out b/contrib/dolphin/expected/test_enum_collation.out index 9b95ee2a0..e26825ea6 100644 --- a/contrib/dolphin/expected/test_enum_collation.out +++ b/contrib/dolphin/expected/test_enum_collation.out @@ -971,8 +971,8 @@ select pg_get_tabledef('test_part_tab1'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (c1) + ( + - PARTITION test_enum_tab_p1 VALUES LESS THAN (5) TABLESPACE pg_default, + - PARTITION test_enum_tab_p2 VALUES LESS THAN (10) TABLESPACE pg_default + + PARTITION test_enum_tab_p1 VALUES LESS THAN (5), + + PARTITION test_enum_tab_p2 VALUES LESS THAN (10) + ) + ENABLE ROW MOVEMENT; (1 row) diff --git a/contrib/dolphin/expected/uint_auto_increment.out b/contrib/dolphin/expected/uint_auto_increment.out index 351ca0653..1a17b8334 100644 --- a/contrib/dolphin/expected/uint_auto_increment.out +++ b/contrib/dolphin/expected/uint_auto_increment.out @@ -2157,34 +2157,34 @@ SELECT col1 FROM test_part_autoinc_pk ORDER BY 1; (18 rows) SELECT pg_catalog.pg_get_tabledef('test_part_autoinc_pk'); - pg_get_tabledef ------------------------------------------------------------------------------------ - SET search_path = uint_auto_increment; + - CREATE TABLE test_part_autoinc_pk ( + - col1 integer AUTO_INCREMENT NOT NULL, + - col2 integer NOT NULL, + - col3 integer NOT NULL, + - CONSTRAINT test_part_autoinc_pk_pkey PRIMARY KEY (col1, col2) + - ) AUTO_INCREMENT = 3002 + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (col1) SUBPARTITION BY HASH (col2) + - ( + - PARTITION col1_less_1000 VALUES LESS THAN (1000) TABLESPACE pg_default + - ( + - SUBPARTITION p1_col2_hash1 TABLESPACE pg_default, + - SUBPARTITION p1_col2_hash2 TABLESPACE pg_default + - ), + - PARTITION col1_mid_1000 VALUES LESS THAN (2000) TABLESPACE pg_default + - ( + - SUBPARTITION p2_col2_hash1 TABLESPACE pg_default, + - SUBPARTITION p2_col2_hash2 TABLESPACE pg_default + - ), + - PARTITION col1_greater_2000 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - ( + - SUBPARTITION p3_col2_hash1 TABLESPACE pg_default, + - SUBPARTITION p3_col2_hash2 TABLESPACE pg_default + - ) + - ) + + pg_get_tabledef +------------------------------------------------------------------- + SET search_path = uint_auto_increment; + + CREATE TABLE test_part_autoinc_pk ( + + col1 integer AUTO_INCREMENT NOT NULL, + + col2 integer NOT NULL, + + col3 integer NOT NULL, + + CONSTRAINT test_part_autoinc_pk_pkey PRIMARY KEY (col1, col2)+ + ) AUTO_INCREMENT = 3002 + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (col1) SUBPARTITION BY HASH (col2) + + ( + + PARTITION col1_less_1000 VALUES LESS THAN (1000) + + ( + + SUBPARTITION p1_col2_hash1, + + SUBPARTITION p1_col2_hash2 + + ), + + PARTITION col1_mid_1000 VALUES LESS THAN (2000) + + ( + + SUBPARTITION p2_col2_hash1, + + SUBPARTITION p2_col2_hash2 + + ), + + PARTITION col1_greater_2000 VALUES LESS THAN (MAXVALUE) + + ( + + SUBPARTITION p3_col2_hash1, + + SUBPARTITION p3_col2_hash2 + + ) + + ) + ENABLE ROW MOVEMENT; (1 row) @@ -2484,20 +2484,20 @@ SELECT pg_catalog.pg_get_tabledef('test_part_autoinc_unique'); WITH (orientation=row, compression=no) + PARTITION BY RANGE (col2) SUBPARTITION BY HASH (col3) + ( + - PARTITION col1_less_1000 VALUES LESS THAN (1000) TABLESPACE pg_default + + PARTITION col1_less_1000 VALUES LESS THAN (1000) + ( + - SUBPARTITION p1_col2_hash1 TABLESPACE pg_default, + - SUBPARTITION p1_col2_hash2 TABLESPACE pg_default + + SUBPARTITION p1_col2_hash1, + + SUBPARTITION p1_col2_hash2 + ), + - PARTITION col1_mid_1000 VALUES LESS THAN (2000) TABLESPACE pg_default + + PARTITION col1_mid_1000 VALUES LESS THAN (2000) + ( + - SUBPARTITION p2_col2_hash1 TABLESPACE pg_default, + - SUBPARTITION p2_col2_hash2 TABLESPACE pg_default + + SUBPARTITION p2_col2_hash1, + + SUBPARTITION p2_col2_hash2 + ), + - PARTITION col1_greater_2000 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + PARTITION col1_greater_2000 VALUES LESS THAN (MAXVALUE) + ( + - SUBPARTITION p3_col2_hash1 TABLESPACE pg_default, + - SUBPARTITION p3_col2_hash2 TABLESPACE pg_default + + SUBPARTITION p3_col2_hash1, + + SUBPARTITION p3_col2_hash2 + ) + ) + ENABLE ROW MOVEMENT; diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index 5affd20ce..ded5c7c4d 100755 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -3116,6 +3116,10 @@ "format_type", 1, AddBuiltinFunc(_0(1081), _1("format_type"), _2(2), _3(false), _4(false), _5(format_type), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 26, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("format_type"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("format a type oid and atttypmod to canonical SQL"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "generate_procoverage_report", 1, + AddBuiltinFunc(_0(5734), _1("generate_procoverage_report"), _2(2), _3(true), _4(false), _5(generate_procoverage_report), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 20, 20), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("generate_procoverage_report"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "generate_series", 8, AddBuiltinFunc(_0(938), _1("generate_series"), _2(3), _3(true), _4(true), _5(generate_series_timestamp), _6(1114), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 1114, 1114, 1186), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("generate_series_timestamp"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("non-persistent series generator"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes.h b/contrib/dolphin/include/plugin_nodes/parsenodes.h index 68a5c8119..179eaffec 100755 --- a/contrib/dolphin/include/plugin_nodes/parsenodes.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes.h @@ -1499,6 +1499,7 @@ typedef struct UnlistenStmt { */ typedef struct CompositeTypeStmt { NodeTag type; + bool replace; RangeVar* typevar; /* the composite type to be created */ List* coldeflist; /* list of ColumnDef nodes */ } CompositeTypeStmt; @@ -1509,6 +1510,7 @@ typedef struct CompositeTypeStmt { */ typedef struct TableOfTypeStmt { NodeTag type; + bool replace; List* typname; /* the table of type to be quoted */ TypeName* reftypname; /* the name of the type being referenced */ } TableOfTypeStmt; diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 6e41d464c..25f193d18 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -814,6 +814,7 @@ typedef struct AlterTableStmt { ObjectType relkind; /* type of object */ bool missing_ok; /* skip error if table missing */ bool fromCreate; /* from create stmt */ + bool fromReplace; /* from create or replace stmt */ bool need_rewrite_sql; /* after rewrite rule, need to rewrite query string */ } AlterTableStmt; diff --git a/contrib/dolphin/include/plugin_parser/parse_relation.h b/contrib/dolphin/include/plugin_parser/parse_relation.h index 92abe771e..c85a9e53e 100644 --- a/contrib/dolphin/include/plugin_parser/parse_relation.h +++ b/contrib/dolphin/include/plugin_parser/parse_relation.h @@ -58,7 +58,7 @@ extern Oid attnumTypeId(Relation rd, int attid); extern Oid attnumCollationId(Relation rd, int attid); extern bool GetPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); extern bool GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); -extern bool ValidateDependView(Oid view_oid, char objType); +extern bool ValidateDependView(Oid view_oid, char obj_type); #ifdef PGXC extern int specialAttNum(const char* attname); #endif diff --git a/contrib/dolphin/include/plugin_parser/parsetree.h b/contrib/dolphin/include/plugin_parser/parsetree.h index 05daf9515..6f54f4281 100644 --- a/contrib/dolphin/include/plugin_parser/parsetree.h +++ b/contrib/dolphin/include/plugin_parser/parsetree.h @@ -41,7 +41,7 @@ * Given an RTE and an attribute number, return the appropriate * variable name or alias for that attribute of that RTE. */ -extern char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum); +extern char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum, bool allowDroppeed = false); /* * Given an RTE and an attribute number, return the appropriate diff --git a/contrib/dolphin/include/plugin_utils/plpgsql.h b/contrib/dolphin/include/plugin_utils/plpgsql.h index 2740008c1..5ff2940aa 100644 --- a/contrib/dolphin/include/plugin_utils/plpgsql.h +++ b/contrib/dolphin/include/plugin_utils/plpgsql.h @@ -34,6 +34,7 @@ **********************************************************************/ #define TABLEOFINDEXBUCKETNUM 128 +#define MAX_INT32_LEN 11 /* * Compile status mark @@ -2061,6 +2062,14 @@ typedef struct CursorRecordType { Oid type_oid; } CursorRecordType; +typedef enum { + PRO_NAME_COL, + DB_NAME_COL, + COVERAGE_ARR_COL, + PRO_QUERYS_COL, + COVERAGE_COL +} CoverageColumn; + /* Quick access array state */ #define IS_ARRAY_STATE(state_list, state) ((state_list && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) ? \ (linitial_int(state_list) == state) : false) @@ -2112,6 +2121,7 @@ extern void examine_parameter_list(List* parameters, Oid languageOid, const char extern void compute_return_type( TypeName* returnType, Oid languageOid, Oid* prorettype_p, bool* returnsSet_p, bool fenced, int startLineNumber, TypeDependExtend* type_depend_extend, bool is_refresh_head); +extern CodeLine* debug_show_code_worker(Oid funcid, uint32* num, int* headerlines); void plpgsql_free_override_stack(int depth); #endif /* PLPGSQL_H */ diff --git a/contrib/dolphin/output/test_table_partition_ddl_import_and_export.source b/contrib/dolphin/output/test_table_partition_ddl_import_and_export.source index 59a210fff..1be3e7baa 100644 --- a/contrib/dolphin/output/test_table_partition_ddl_import_and_export.source +++ b/contrib/dolphin/output/test_table_partition_ddl_import_and_export.source @@ -157,72 +157,72 @@ create table t_create_table_partition_022 like t_create_table_partition_006 excl create table t_create_table_partition_023 like t_create_table_partition_006 excluding all; create table t_create_table_partition_024 like t_create_table_partition_006; show create table t_create_table_partition_001; - Table | Create Table -------------------------------+-------------------------------------------------------------------- - t_create_table_partition_001 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_001 ( + - | col1 integer, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) + - | ( + - | PARTITION p0 VALUES LESS THAN (18) TABLESPACE pg_default, + - | PARTITION p1 VALUES LESS THAN (35) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + + Table | Create Table +------------------------------+---------------------------------------------- + t_create_table_partition_001 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_001 ( + + | col1 integer, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) + + | ( + + | PARTITION p0 VALUES LESS THAN (18), + + | PARTITION p1 VALUES LESS THAN (35), + + | PARTITION p2 VALUES LESS THAN (MAXVALUE)+ + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_002; - Table | Create Table -------------------------------+----------------------------------------------------------------------------- - t_create_table_partition_002 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_002 ( + - | col1 integer, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p0 VALUES LESS THAN (18) TABLESPACE pg_default + - | ( + - | SUBPARTITION p0_0 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | SUBPARTITION p0_1 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | SUBPARTITION p0_2 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p1 VALUES LESS THAN (35) TABLESPACE pg_default + - | ( + - | SUBPARTITION p1_0 VALUES LESS THAN (5000) TABLESPACE pg_default, + - | SUBPARTITION p1_1 VALUES LESS THAN (6000) TABLESPACE pg_default, + - | SUBPARTITION p1_2 VALUES LESS THAN (8000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p2_0 VALUES LESS THAN (10000) TABLESPACE pg_default, + - | SUBPARTITION p2_1 VALUES LESS THAN (20000) TABLESPACE pg_default, + - | SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_002 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_002 ( + + | col1 integer, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p0 VALUES LESS THAN (18) + + | ( + + | SUBPARTITION p0_0 VALUES LESS THAN (1000), + + | SUBPARTITION p0_1 VALUES LESS THAN (2000), + + | SUBPARTITION p0_2 VALUES LESS THAN (4000) + + | ), + + | PARTITION p1 VALUES LESS THAN (35) + + | ( + + | SUBPARTITION p1_0 VALUES LESS THAN (5000), + + | SUBPARTITION p1_1 VALUES LESS THAN (6000), + + | SUBPARTITION p1_2 VALUES LESS THAN (8000) + + | ), + + | PARTITION p2 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p2_0 VALUES LESS THAN (10000), + + | SUBPARTITION p2_1 VALUES LESS THAN (20000), + + | SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_003; - Table | Create Table -------------------------------+------------------------------------------------------- - t_create_table_partition_003 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_003 ( + - | col1 integer, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY LIST (col1) + - | ( + - | PARTITION p1 VALUES (1000) TABLESPACE pg_default,+ - | PARTITION p2 VALUES (2000) TABLESPACE pg_default,+ - | PARTITION p3 VALUES (4000) TABLESPACE pg_default,+ - | PARTITION p4 VALUES (8000) TABLESPACE pg_default + - | ) + + Table | Create Table +------------------------------+--------------------------------------------- + t_create_table_partition_003 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_003 (+ + | col1 integer, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY LIST (col1) + + | ( + + | PARTITION p1 VALUES (1000), + + | PARTITION p2 VALUES (2000), + + | PARTITION p3 VALUES (4000), + + | PARTITION p4 VALUES (8000) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -236,10 +236,10 @@ show create table t_create_table_partition_004; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_004_c_idx ON t_create_table_partition_004 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -256,48 +256,48 @@ show create table t_create_table_partition_005; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_005_c_idx ON t_create_table_partition_005 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_006; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_006 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_006 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_006 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_006 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -311,10 +311,10 @@ show create table t_create_table_partition_007; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_007_c_idx ON t_create_table_partition_007 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -330,10 +330,10 @@ show create table t_create_table_partition_008; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_008_c_idx ON t_create_table_partition_008 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -349,10 +349,10 @@ show create table t_create_table_partition_009; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_009_c_idx ON t_create_table_partition_009 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -368,11 +368,11 @@ show create table t_create_table_partition_010; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_010_c_idx ON t_create_table_partition_010 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -388,11 +388,11 @@ show create table t_create_table_partition_011; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_011_c_idx ON t_create_table_partition_011 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -409,118 +409,118 @@ show create table t_create_table_partition_012; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_012_c_idx ON t_create_table_partition_012 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_013; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_013 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_013 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_013 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_013 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_014; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_014 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_014 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_014 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_014 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_015; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_015 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_015 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_015 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_015 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -534,10 +534,10 @@ show create table t_create_table_partition_016; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_016_c_idx ON t_create_table_partition_016 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -553,10 +553,10 @@ show create table t_create_table_partition_017; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_017_c_idx ON t_create_table_partition_017 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -572,31 +572,31 @@ show create table t_create_table_partition_018; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_018_c_idx ON t_create_table_partition_018 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_019; - Table | Create Table -------------------------------+-------------------------------------------------------------------- - t_create_table_partition_019 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_019 ( + - | c integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (c) + - | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + + Table | Create Table +------------------------------+---------------------------------------------- + t_create_table_partition_019 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_019 ( + + | c integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (c) + + | ( + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE)+ + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -610,11 +610,11 @@ show create table t_create_table_partition_020; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_020_c_idx ON t_create_table_partition_020 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -630,48 +630,48 @@ show create table t_create_table_partition_021; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_021_c_idx ON t_create_table_partition_021 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_022; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_022 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_022 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_022 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_022 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -687,37 +687,37 @@ show create table t_create_table_partition_023; (1 row) show create table t_create_table_partition_024; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_024 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_024 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_024 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_024 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -1664,72 +1664,72 @@ drop table t_alter_table_partition_034; 0 \c restore_table_partition_db show create table t_create_table_partition_001; - Table | Create Table -------------------------------+-------------------------------------------------------------------- - t_create_table_partition_001 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_001 ( + - | col1 integer, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) + - | ( + - | PARTITION p0 VALUES LESS THAN (18) TABLESPACE pg_default, + - | PARTITION p1 VALUES LESS THAN (35) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + + Table | Create Table +------------------------------+---------------------------------------------- + t_create_table_partition_001 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_001 ( + + | col1 integer, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) + + | ( + + | PARTITION p0 VALUES LESS THAN (18), + + | PARTITION p1 VALUES LESS THAN (35), + + | PARTITION p2 VALUES LESS THAN (MAXVALUE)+ + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_002; - Table | Create Table -------------------------------+----------------------------------------------------------------------------- - t_create_table_partition_002 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_002 ( + - | col1 integer, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p0 VALUES LESS THAN (18) TABLESPACE pg_default + - | ( + - | SUBPARTITION p0_0 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | SUBPARTITION p0_1 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | SUBPARTITION p0_2 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p1 VALUES LESS THAN (35) TABLESPACE pg_default + - | ( + - | SUBPARTITION p1_0 VALUES LESS THAN (5000) TABLESPACE pg_default, + - | SUBPARTITION p1_1 VALUES LESS THAN (6000) TABLESPACE pg_default, + - | SUBPARTITION p1_2 VALUES LESS THAN (8000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p2_0 VALUES LESS THAN (10000) TABLESPACE pg_default, + - | SUBPARTITION p2_1 VALUES LESS THAN (20000) TABLESPACE pg_default, + - | SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_002 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_002 ( + + | col1 integer, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p0 VALUES LESS THAN (18) + + | ( + + | SUBPARTITION p0_0 VALUES LESS THAN (1000), + + | SUBPARTITION p0_1 VALUES LESS THAN (2000), + + | SUBPARTITION p0_2 VALUES LESS THAN (4000) + + | ), + + | PARTITION p1 VALUES LESS THAN (35) + + | ( + + | SUBPARTITION p1_0 VALUES LESS THAN (5000), + + | SUBPARTITION p1_1 VALUES LESS THAN (6000), + + | SUBPARTITION p1_2 VALUES LESS THAN (8000) + + | ), + + | PARTITION p2 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p2_0 VALUES LESS THAN (10000), + + | SUBPARTITION p2_1 VALUES LESS THAN (20000), + + | SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_003; - Table | Create Table -------------------------------+------------------------------------------------------- - t_create_table_partition_003 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_003 ( + - | col1 integer, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY LIST (col1) + - | ( + - | PARTITION p1 VALUES (1000) TABLESPACE pg_default,+ - | PARTITION p2 VALUES (2000) TABLESPACE pg_default,+ - | PARTITION p3 VALUES (4000) TABLESPACE pg_default,+ - | PARTITION p4 VALUES (8000) TABLESPACE pg_default + - | ) + + Table | Create Table +------------------------------+--------------------------------------------- + t_create_table_partition_003 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_003 (+ + | col1 integer, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY LIST (col1) + + | ( + + | PARTITION p1 VALUES (1000), + + | PARTITION p2 VALUES (2000), + + | PARTITION p3 VALUES (4000), + + | PARTITION p4 VALUES (8000) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -1743,10 +1743,10 @@ show create table t_create_table_partition_004; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_004_c_idx ON t_create_table_partition_004 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -1763,48 +1763,48 @@ show create table t_create_table_partition_005; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_005_c_idx ON t_create_table_partition_005 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_006; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_006 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_006 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_006 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_006 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -1818,10 +1818,10 @@ show create table t_create_table_partition_007; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_007_c_idx ON t_create_table_partition_007 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -1837,10 +1837,10 @@ show create table t_create_table_partition_008; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_008_c_idx ON t_create_table_partition_008 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -1856,10 +1856,10 @@ show create table t_create_table_partition_009; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_009_c_idx ON t_create_table_partition_009 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -1875,11 +1875,11 @@ show create table t_create_table_partition_010; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_010_c_idx ON t_create_table_partition_010 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -1895,11 +1895,11 @@ show create table t_create_table_partition_011; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_011_c_idx ON t_create_table_partition_011 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -1916,118 +1916,118 @@ show create table t_create_table_partition_012; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_012_c_idx ON t_create_table_partition_012 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_013; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_013 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_013 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_013 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_013 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_014; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_014 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_014 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_014 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_014 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) show create table t_create_table_partition_015; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_015 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_015 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_015 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_015 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -2041,10 +2041,10 @@ show create table t_create_table_partition_016; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_016_c_idx ON t_create_table_partition_016 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -2060,10 +2060,10 @@ show create table t_create_table_partition_017; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_017_c_idx ON t_create_table_partition_017 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -2079,31 +2079,31 @@ show create table t_create_table_partition_018; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_018_c_idx ON t_create_table_partition_018 USING btree (c) LOCAL(PARTITION p1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_019; - Table | Create Table -------------------------------+-------------------------------------------------------------------- - t_create_table_partition_019 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_019 ( + - | c integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (c) + - | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + + Table | Create Table +------------------------------+---------------------------------------------- + t_create_table_partition_019 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_019 ( + + | c integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (c) + + | ( + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE)+ + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -2117,11 +2117,11 @@ show create table t_create_table_partition_020; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_020_c_idx ON t_create_table_partition_020 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; @@ -2137,48 +2137,48 @@ show create table t_create_table_partition_021; | WITH (orientation=row, compression=no) + | PARTITION BY RANGE (c) + | ( + - | PARTITION p1_0 VALUES LESS THAN (0) TABLESPACE pg_default, + - | PARTITION p1_1 VALUES LESS THAN (1000) TABLESPACE pg_default, + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default, + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default, + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + | PARTITION p1_0 VALUES LESS THAN (0), + + | PARTITION p1_1 VALUES LESS THAN (1000), + + | PARTITION p2 VALUES LESS THAN (2000), + + | PARTITION p3 VALUES LESS THAN (4000), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + | ) + | ENABLE ROW MOVEMENT; + | CREATE INDEX t_create_table_partition_021_c_idx ON t_create_table_partition_021 USING btree (c) LOCAL(PARTITION p1_0_c_idx, PARTITION p1_1_c_idx, PARTITION p2_c_idx, PARTITION p3_c_idx, PARTITION p4_c_idx) TABLESPACE pg_default; (1 row) show create table t_create_table_partition_022; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_022 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_022 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_022 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_022 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) @@ -2194,37 +2194,37 @@ show create table t_create_table_partition_023; (1 row) show create table t_create_table_partition_024; - Table | Create Table -------------------------------+---------------------------------------------------------------------------- - t_create_table_partition_024 | SET search_path = public; + - | CREATE TABLE t_create_table_partition_024 ( + - | col1 integer NOT NULL, + - | col2 integer + - | ) + - | WITH (orientation=row, compression=no) + - | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2) + - | ( + - | PARTITION p1 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p11 VALUES LESS THAN (500) TABLESPACE pg_default, + - | SUBPARTITION p12 VALUES LESS THAN (1000) TABLESPACE pg_default + - | ), + - | PARTITION p2 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p21 VALUES LESS THAN (1500) TABLESPACE pg_default, + - | SUBPARTITION p22 VALUES LESS THAN (2000) TABLESPACE pg_default + - | ), + - | PARTITION p3 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ( + - | SUBPARTITION p31 VALUES LESS THAN (3000) TABLESPACE pg_default, + - | SUBPARTITION p32 VALUES LESS THAN (4000) TABLESPACE pg_default + - | ), + - | PARTITION p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - | ( + - | SUBPARTITION p41 VALUES LESS THAN (8000) TABLESPACE pg_default, + - | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - | ) + - | ) + + Table | Create Table +------------------------------+-------------------------------------------------------- + t_create_table_partition_024 | SET search_path = public; + + | CREATE TABLE t_create_table_partition_024 ( + + | col1 integer NOT NULL, + + | col2 integer + + | ) + + | WITH (orientation=row, compression=no) + + | PARTITION BY RANGE (col1) SUBPARTITION BY RANGE (col2)+ + | ( + + | PARTITION p1 VALUES LESS THAN (1000) + + | ( + + | SUBPARTITION p11 VALUES LESS THAN (500), + + | SUBPARTITION p12 VALUES LESS THAN (1000) + + | ), + + | PARTITION p2 VALUES LESS THAN (2000) + + | ( + + | SUBPARTITION p21 VALUES LESS THAN (1500), + + | SUBPARTITION p22 VALUES LESS THAN (2000) + + | ), + + | PARTITION p3 VALUES LESS THAN (4000) + + | ( + + | SUBPARTITION p31 VALUES LESS THAN (3000), + + | SUBPARTITION p32 VALUES LESS THAN (4000) + + | ), + + | PARTITION p4 VALUES LESS THAN (MAXVALUE) + + | ( + + | SUBPARTITION p41 VALUES LESS THAN (8000), + + | SUBPARTITION p42 VALUES LESS THAN (MAXVALUE) + + | ) + + | ) + | ENABLE ROW MOVEMENT; (1 row) diff --git a/contrib/dolphin/plugin_executor/execQual.cpp b/contrib/dolphin/plugin_executor/execQual.cpp index 34a9d393d..6ae390e7c 100644 --- a/contrib/dolphin/plugin_executor/execQual.cpp +++ b/contrib/dolphin/plugin_executor/execQual.cpp @@ -1103,6 +1103,7 @@ static Datum ExecEvalConst(ExprState* exprstate, ExprContext* econtext, bool* is if (found) { if (entry->isParse) { con = (Const *)uservar->value; + entry->isParse = false; } else { Node *node = coerce_type(NULL, (Node *)entry->value, entry->value->consttype, ((Const *)uservar->value)->consttype, -1, COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); diff --git a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp index b3371e810..e8c8c6fb5 100644 --- a/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp +++ b/contrib/dolphin/plugin_optimizer/commands/typecmds.cpp @@ -83,6 +83,7 @@ #include "utils/rel_gs.h" #include "utils/syscache.h" #include "utils/snapmgr.h" +#include "utils/typcache.h" #include "catalog/gs_dependencies_fn.h" #include "catalog/pg_object.h" @@ -2088,6 +2089,75 @@ Oid AssignTypeArrayOid(void) return type_array_oid; } +static ObjectAddress ReplaceTableOfType(Oid oldTypeOid, Oid refTypeOid) +{ + Relation pg_type_desc = NULL; + HeapTuple typtuple = NULL; + Form_pg_type typform = NULL; + Oid old_elemtype = InvalidOid; + ObjectAddress address; + + ObjectAddressSet(address, TypeRelationId, oldTypeOid); + /* if any table depend on this type, report ERROR */ + ReplaceTypeCheckRef(&address); + + /* change typelem in pg_type */ + pg_type_desc = heap_open(TypeRelationId, RowExclusiveLock); + typtuple = SearchSysCacheCopy1(TYPEOID, ObjectIdGetDatum(oldTypeOid)); + if (!HeapTupleIsValid(typtuple)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for type %u", oldTypeOid))); + } + typform = (Form_pg_type)GETSTRUCT(typtuple); + if (typform->typtype != TYPTYPE_TABLEOF) { + tableam_tops_free_tuple(typtuple); + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("type already exists but not a table of type"))); + } + old_elemtype = typform->typelem; + typform->typelem = refTypeOid; + simple_heap_update(pg_type_desc, &typtuple->t_self, typtuple); + /* update the system catalog indexes */ + CatalogUpdateIndexes(pg_type_desc, typtuple); + + tableam_tops_free_tuple(typtuple); + heap_close(pg_type_desc, RowExclusiveLock); + + /* find record between type and old_elemtype in pg_depend, and remove it */ + Relation depRel = NULL; + ScanKeyData key[2]; + int nkeys = 2; + SysScanDesc scan = NULL; + HeapTuple tup = NULL; + depRel = heap_open(DependRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(TypeRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oldTypeOid)); + scan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, nkeys, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) { + Form_pg_depend foundDep = (Form_pg_depend)GETSTRUCT(tup); + if (foundDep->refobjid == old_elemtype) { + simple_heap_delete(depRel, &tup->t_self); + } + } + systable_endscan(scan); + heap_close(depRel, RowExclusiveLock); + + /* record with new elemtype */ + ObjectAddress myself, referenced; + myself.classId = TypeRelationId; + myself.objectId = oldTypeOid; + myself.objectSubId = 0; + referenced.classId = TypeRelationId; + referenced.objectId = refTypeOid; + referenced.objectSubId = 0; + recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); + + CommandCounterIncrement(); + + return address; +} + /* * DefineRange * Registers a new table of type. @@ -2135,7 +2205,7 @@ ObjectAddress DefineTableOfType(const TableOfTypeStmt* stmt) */ typoid = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum(typname), ObjectIdGetDatum(typeNamespace)); if (OidIsValid(typoid)) { - if (!moveArrayTypeName(typoid, typname, typeNamespace)) + if (!moveArrayTypeName(typoid, typname, typeNamespace) && !stmt->replace) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("type \"%s\" already exists", typname))); } @@ -2180,38 +2250,43 @@ ObjectAddress DefineTableOfType(const TableOfTypeStmt* stmt) } ReleaseSysCache(type_tup); - /* Create the pg_type entry */ - return TypeCreate(InvalidOid, /* no predetermined type OID */ - typname, /* type name */ - typeNamespace, /* namespace */ - InvalidOid, /* relation oid (n/a here) */ - 0, /* relation kind (ditto) */ - typowner, /* owner's ID */ - -1, /* internal size (always varlena) */ - TYPTYPE_TABLEOF, /* type-type (table of type) */ - TYPCATEGORY_TABLEOF, /* type-category (table of type) */ - false, /* table of types are never preferred */ - DEFAULT_TYPDELIM, /* array element delimiter */ - F_ARRAY_IN, /* array input proc */ - F_ARRAY_OUT, /* array output proc */ - F_ARRAY_RECV, /* array recv (bin) proc */ - F_ARRAY_SEND, /* array send (bin) proc */ - InvalidOid, /* typmodin procedure - none */ - InvalidOid, /* typmodout procedure - none */ - F_ARRAY_TYPANALYZE, /* array analyze procedure */ - refTypeOid, /* element type ID - none */ - false, /* this is not an array type */ - InvalidOid, /* array type we are about to create */ - InvalidOid, /* base type ID (only for domains) */ - NULL, /* never a default type value */ - NULL, /* no binary form available either */ - false, /* never passed by value */ - 'd', /* alignment */ - 'x', /* TOAST strategy (always extended) */ - -1, /* typMod (Domains only) */ - 0, /* Array dimensions of typbasetype */ - false, /* Type NOT NULL */ - InvalidOid); /* type's collation (ranges never have one) */ + if (OidIsValid(typoid) && get_typisdefined(typoid)) { + return ReplaceTableOfType(typoid, refTypeOid); + } else { + /* Create the pg_type entry */ + return TypeCreate(InvalidOid, /* no predetermined type OID */ + typname, /* type name */ + typeNamespace, /* namespace */ + InvalidOid, /* relation oid (n/a here) */ + 0, /* relation kind (ditto) */ + typowner, /* owner's ID */ + -1, /* internal size (always varlena) */ + TYPTYPE_TABLEOF, /* type-type (table of type) */ + TYPCATEGORY_TABLEOF, /* type-category (table of type) */ + false, /* table of types are never preferred */ + DEFAULT_TYPDELIM, /* array element delimiter */ + F_ARRAY_IN, /* array input proc */ + F_ARRAY_OUT, /* array output proc */ + F_ARRAY_RECV, /* array recv (bin) proc */ + F_ARRAY_SEND, /* array send (bin) proc */ + InvalidOid, /* typmodin procedure - none */ + InvalidOid, /* typmodout procedure - none */ + F_ARRAY_TYPANALYZE, /* array analyze procedure */ + refTypeOid, /* element type ID - none */ + false, /* this is not an array type */ + InvalidOid, /* array type we are about to create */ + InvalidOid, /* base type ID (only for domains) */ + NULL, /* never a default type value */ + NULL, /* no binary form available either */ + false, /* never passed by value */ + 'd', /* alignment */ + 'x', /* TOAST strategy (always extended) */ + -1, /* typMod (Domains only) */ + 0, /* Array dimensions of typbasetype */ + false, /* Type NOT NULL */ + InvalidOid); /* type's collation (ranges never have one) */ + } + } /* ------------------------------------------------------------------- diff --git a/contrib/dolphin/plugin_optimizer/util/clauses.cpp b/contrib/dolphin/plugin_optimizer/util/clauses.cpp index 5cae44e73..cf16ad318 100644 --- a/contrib/dolphin/plugin_optimizer/util/clauses.cpp +++ b/contrib/dolphin/plugin_optimizer/util/clauses.cpp @@ -3409,6 +3409,13 @@ Node* eval_const_expressions_mutator(Node* node, eval_const_expressions_context* break; } + if (IS_SPQ_COORDINATOR) { + /* recurse into query structure if requested */ + if (IsA(node, Query) && context->recurse_queries) { + return (Node *)query_tree_mutator((Query *)node, (Node * (*)(Node *, void *)) eval_const_expressions_mutator, (void *)context, 0); + } + } + ELOG_FIELD_NAME_START(IsA(node, TargetEntry) ? ((TargetEntry*)node)->resname : NULL); /* diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index 0a7fc7e59..102e1c075 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -31,6 +31,7 @@ #include "catalog/pg_inherits.h" #include "catalog/pg_inherits_fn.h" #include "catalog/pg_proc.h" +#include "catalog/pg_type_fn.h" #include "catalog/pgxc_class.h" #include "catalog/indexing.h" #include "catalog/namespace.h" @@ -83,6 +84,7 @@ #include "utils/rel.h" #include "utils/rel_gs.h" #include "utils/acl.h" +#include "utils/typcache.h" #include "commands/explain.h" #include "commands/sec_rls_cmds.h" #include "commands/typecmds.h" @@ -494,6 +496,86 @@ Query* transformVariableAlterEventStmt(ParseState* pstate, AlterEventStmt* stmt) return result; } +static Query* TransformCompositeTypeStmt(ParseState* pstate, CompositeTypeStmt* stmt) +{ + Query* result = makeNode(Query); + result->commandType = CMD_UTILITY; + + Oid old_type_oid = InvalidOid; + Oid typeNamespace = InvalidOid; + RangeVar* typevar = stmt->typevar; + + typeNamespace = RangeVarGetAndCheckCreationNamespace(typevar, NoLock, NULL, RELKIND_COMPOSITE_TYPE); + RangeVarAdjustRelationPersistence(typevar, typeNamespace); + old_type_oid = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum(typevar->relname), ObjectIdGetDatum(typeNamespace)); + + if (OidIsValid(old_type_oid) && stmt->replace) { + HeapTuple typtuple = NULL; + Form_pg_type typform = NULL; + typtuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(old_type_oid)); + if (!HeapTupleIsValid(typtuple)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for type %u", old_type_oid))); + } + typform = (Form_pg_type)GETSTRUCT(typtuple); + + /* shell type or autogenerated array type */ + if (moveArrayTypeName(old_type_oid, NameStr(typform->typname), typeNamespace)) { + ReleaseSysCache(typtuple); + result->utilityStmt = (Node*)stmt; + return result; + } + + if (TYPTYPE_COMPOSITE != typform->typtype) { + ReleaseSysCache(typtuple); + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("type already exists but not a composite type"))); + } + + ObjectAddress address; + ObjectAddressSet(address, TypeRelationId, old_type_oid); + /* Check if any table/function depends on this type */ + ReplaceTypeCheckRef(&address); + + TupleDesc tupledesc = lookup_rowtype_tupdesc(old_type_oid, typform->typtypmod); + + AlterTableStmt* alterStmt = makeNode(AlterTableStmt); + alterStmt->fromReplace = true; + alterStmt->relation = stmt->typevar; + alterStmt->relkind = OBJECT_TYPE; + /* lappend DROP attribute stmt */ + for (int i = 0; i < tupledesc->natts; i++) { + Form_pg_attribute oldAttribute = &tupledesc->attrs[i]; + if (oldAttribute->attisdropped) { + continue; + } + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_DropColumn; + n->name = NameStr(oldAttribute->attname); + n->behavior = DROP_RESTRICT; + n->missing_ok = FALSE; + alterStmt->cmds = lappend(alterStmt->cmds, n); + } + /* lappend ADD attribute stmt */ + ListCell* lc; + foreach(lc, stmt->coldeflist) { + ColumnDef* newAttr = (ColumnDef*)lfirst(lc); + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_AddColumn; + n->def = (Node*)newAttr; + n->behavior = DROP_RESTRICT; + alterStmt->cmds = lappend(alterStmt->cmds, n); + } + ReleaseTupleDesc(tupledesc); + ReleaseSysCache(typtuple); + + result->utilityStmt = (Node*)alterStmt; + } else { + result->utilityStmt = (Node*)stmt; + } + + return result; +} /* * transformStmt - @@ -605,6 +687,10 @@ Query* transformStmt(ParseState* pstate, Node* parseTree, bool isFirstNode, bool case T_AlterEventStmt: result = transformVariableAlterEventStmt(pstate, (AlterEventStmt*) parseTree); break; + + case T_CompositeTypeStmt: + result = TransformCompositeTypeStmt(pstate, (CompositeTypeStmt*) parseTree); + break; #ifdef DOLPHIN case T_CreateEnumStmt: enumName = strVal(llast(((CreateEnumStmt*)parseTree)->typname)); diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 1af7cd55b..5bc1b8339 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -15235,16 +15235,36 @@ DefineStmt: /* can't use qualified_name, sigh */ n->typevar = makeRangeVarFromAnyName($3, @3, yyscanner); + n->replace = false; n->coldeflist = $6; $$ = (Node *)n; } + | CREATE OR REPLACE TYPE_P any_name as_is '(' OptTableFuncElementList ')' + { + CompositeTypeStmt *n = makeNode(CompositeTypeStmt); + + /* can't use qualified_name, sigh */ + n->typevar = makeRangeVarFromAnyName($5, @5, yyscanner); + n->replace = true; + n->coldeflist = $8; + $$ = (Node *)n; + } | CREATE TYPE_P any_name as_is TABLE OF func_type { TableOfTypeStmt *n = makeNode(TableOfTypeStmt); + n->replace = false; n->typname = $3; n->reftypname = $7; $$ = (Node *)n; } + | CREATE OR REPLACE TYPE_P any_name as_is TABLE OF func_type + { + TableOfTypeStmt *n = makeNode(TableOfTypeStmt); + n->replace = true; + n->typname = $5; + n->reftypname = $9; + $$ = (Node *)n; + } | CREATE TYPE_P any_name as_is ENUM_P '(' opt_enum_val_list ')' { CreateEnumStmt *n = makeNode(CreateEnumStmt); @@ -21136,7 +21156,8 @@ pkg_body_subprogram: { } else if (block_level == 0 && tok != ';') { in_procedure = false; } - if (tok == ';') + if (tok == ';' || + (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && pre_tok == ';' && tok == IDENT && in_procedure)) { block_level = block_level - 1; if (block_level == 0) @@ -21796,6 +21817,11 @@ subprogram_body: { && tok != WHILE_P && tok != REPEAT) { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && blocklevel == 1 && pre_tok == ';') + { + proc_e = yylloc; + break; + } tok = END_P; continue; } diff --git a/contrib/dolphin/plugin_parser/parse_relation.cpp b/contrib/dolphin/plugin_parser/parse_relation.cpp index c16bc876a..e41b45eb6 100644 --- a/contrib/dolphin/plugin_parser/parse_relation.cpp +++ b/contrib/dolphin/plugin_parser/parse_relation.cpp @@ -1201,6 +1201,7 @@ bool ValidateDependView(Oid view_oid, char objType) /* create or replace view */ if (objType == OBJECT_TYPE_VIEW) { ReplaceViewQueryFirstAfter(query_str); + CommandCounterIncrement(); } return isValid; } @@ -1360,6 +1361,7 @@ Relation parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockm errmsg("relation \"%s\" has data only in database \"postgres\"", relation->relname), errhint("please use database \"postgres\""))); } + if (RelationGetRelkind(rel) == RELKIND_VIEW && RelationGetRelid(rel) >= FirstNormalObjectId && !ValidateDependView(RelationGetRelid(rel), OBJECT_TYPE_VIEW)) { @@ -1369,7 +1371,7 @@ Relation parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockm RelationGetRelationName(rel)), errhint("Please re-add missing table fields."))); } - + if (!u_sess->attr.attr_common.XactReadOnly && rel->rd_id == UserStatusRelationId) { TryUnlockAllAccounts(); } @@ -2670,7 +2672,7 @@ List* expandRelAttrs(ParseState* pstate, RangeTblEntry* rte, int rtindex, int su * * Must free the pointer after usage!!! */ -char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum) +char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum, bool allowDropped) { if (attnum == InvalidAttrNumber) { return pstrdup("*"); @@ -2690,7 +2692,7 @@ char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum) * built (which can easily happen for rules). */ if (rte->rtekind == RTE_RELATION) { - return get_relid_attribute_name(rte->relid, attnum); + return get_relid_attribute_name(rte->relid, attnum, allowDropped); } /* diff --git a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp index 25f96ee83..9aba0a40d 100644 --- a/contrib/dolphin/plugin_parser/parse_utilcmd.cpp +++ b/contrib/dolphin/plugin_parser/parse_utilcmd.cpp @@ -1157,10 +1157,11 @@ static void createSeqOwnedByTable(CreateStmtContext* cxt, ColumnDef* column, boo seqstmt->is_large = large; /* Assign UUID for create sequence */ + seqstmt->uuid = INVALIDSEQUUID; +#ifdef ENABLE_MUTIPLE_NODES if (!IS_SINGLE_NODE) seqstmt->uuid = gen_uuid(cxt->uuids); - else - seqstmt->uuid = INVALIDSEQUUID; +#endif /* * If this is ALTER ADD COLUMN, make sure the sequence will be owned diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y index 52d0c4a89..394cd8dd4 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/gram.y +++ b/contrib/dolphin/plugin_pl/plpgsql/src/gram.y @@ -9535,6 +9535,13 @@ get_proc_str(int tok) { blocklevel--; } + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && blocklevel == 1 && pre_tok == ';' && tok == T_WORD) + { + curloc = yylloc; + plpgsql_append_source_text(&ds, loc, curloc); + tok = yylex(); + break; + } } pre_tok = tok; @@ -11782,8 +11789,10 @@ read_into_array_table_scalar_list(char *initial_name, if (type_flag == PLPGSQL_TOK_TABLE_VAR) { isarrayelem = read_into_using_add_tableelem(fieldnames, varnos, &nfields, tmpdno, &tok); - } else { + } else if (type_flag == PLPGSQL_TOK_VARRAY_VAR) { isarrayelem = read_into_using_add_arrayelem(fieldnames, varnos, &nfields, tmpdno, &tok); + } else { + isarrayelem = false; } if (!isarrayelem) { diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp index bc680fbd5..1f6e01e37 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp @@ -59,8 +59,21 @@ PG_MODULE_MAGIC; #endif #define MAXSTRLEN ((1 << 11) - 1) +#define HTML_LT_LEN 4 +#define HTML_AMP_LEN 5 +#define HTML_QUOT_LEN 6 static void init_do_stmt(PLpgSQL_package *pkg, bool isCreate, ListCell *cell, int oldCompileStatus, PLpgSQL_compile_context *curr_compile, List *temp_tableof_index, MemoryContext oldcxt); +static void get_func_actual_rows(Oid funcid, uint32* rows); +static void get_proc_coverage(Oid func_oid, int* coverage); +static void generate_procoverage_html(int beginId, int endId, StringInfoData* result, bool isDefault); +static void generate_procoverage_table(char* value, StringInfoData* result, int index, + List** coverage_array, List** pro_querys); +static void generate_procoverage_rows(StringInfoData* result, List* coverage_array, List* pro_querys); +static void deconstruct_coverage_array(List** coverage_array, char* array_string); +static void deconstruct_querys_array(List** pro_querys, const char* querys_string); +static char* replace_html_entity(const char* input); + static void auditExecPLpgSQLFunction(PLpgSQL_function* func, AuditResult result) { char details[PGAUDIT_MAXLENGTH]; @@ -690,6 +703,75 @@ extern bool CheckElementParsetreeTag(Node* parsetree) return result; } +static PGconn* LoginDatabase(char* host, int port, char* user, char* password, + char* dbname, const char* progname, char* encoding) +{ + PGconn* conn = NULL; + char portValue[32]; +#define PARAMS_ARRAY_SIZE 10 + const char* keywords[PARAMS_ARRAY_SIZE]; + const char* values[PARAMS_ARRAY_SIZE]; + int count = 0; + int retryNum = 10; + int rc; + + rc = sprintf_s(portValue, sizeof(portValue), "%d", port); + securec_check_ss(rc, "\0", "\0"); + + keywords[0] = "host"; + values[0] = host; + keywords[1] = "port"; + values[1] = portValue; + keywords[2] = "user"; + values[2] = user; + keywords[3] = "password"; + values[3] = password; + keywords[4] = "dbname"; + values[4] = dbname; + keywords[5] = "fallback_application_name"; + values[5] = progname; + keywords[6] = "client_encoding"; + values[6] = encoding; + keywords[7] = "connect_timeout"; + values[7] = "5"; + keywords[8] = "options"; + /* this mode: remove timeout */ + values[8] = "-c xc_maintenance_mode=on"; + keywords[9] = NULL; + values[9] = NULL; + +retry: + /* try to connect to database */ + conn = PQconnectdbParams(keywords, values, true); + if (PQstatus(conn) != CONNECTION_OK) { + if (++count < retryNum) { + ereport(LOG, (errmsg("Could not connect to the %s, the connection info : %s", + dbname, PQerrorMessage(conn)))); + PQfinish(conn); + conn = NULL; + + /* sleep 0.1 s */ + pg_usleep(100000L); + goto retry; + } + + char connErrorMsg[1024] = {0}; + errno_t rc; + rc = snprintf_s(connErrorMsg, 1024, 1023, + "%s", PQerrorMessage(conn)); + securec_check_ss(rc, "\0", "\0"); + + PQfinish(conn); + conn = NULL; + ereport(ERROR, (errcode(ERRCODE_CONNECTION_TIMED_OUT), + (errmsg("Could not connect to the %s, " + "we have tried %d times, the connection info: %s", + dbname, count, connErrorMsg)))); + } + + return (conn); +} + Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) { bool nonatomic; @@ -710,6 +792,7 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) MemoryContext oldContext = CurrentMemoryContext; int pkgDatumsNumber = 0; bool savedisAllowCommitRollback = true; + bool enableProcCoverage = u_sess->attr.attr_common.enable_proc_coverage; /* * if the atomic stored in fcinfo is false means allow * commit/rollback within stored procedure. @@ -794,6 +877,15 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) */ firstLevelPkgOid = saveCallFromPkgOid(package_oid); bool saved_current_stp_with_exception = plpgsql_get_current_value_stp_with_exception(); + int *coverage = NULL; + if (enableProcCoverage) { + uint32 rows; + int ret; + get_func_actual_rows(func_oid, &rows); + coverage = (int*)palloc(sizeof(int) * rows); + ret = memset_s(coverage, sizeof(int) * rows, 0, sizeof(int) * rows); + securec_check(ret, "\0", "\0"); + } /* Find or compile the function */ if (func == NULL) { u_sess->plsql_cxt.compile_has_warning_info = false; @@ -891,7 +983,7 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) if (IsAutonomousTransaction(func->action->isAutonomous)) { retval = plpgsql_exec_autonm_function(func, fcinfo, NULL); } else { - retval = plpgsql_exec_function(func, fcinfo, false); + retval = plpgsql_exec_function(func, fcinfo, false, coverage); } /* Disconnecting and releasing resources */ DestoryAutonomousSession(false); @@ -963,6 +1055,11 @@ Datum b_plpgsql_call_handler(PG_FUNCTION_ARGS) clean_up_debug_server(func->debug, false, true); } #endif + if (enableProcCoverage && func->namespaceOid != PG_CATALOG_NAMESPACE) { + get_proc_coverage(func_oid, coverage); + pfree(coverage); + } + cursor_step = 0; /* cursor as an in parameter, its option shoule be return to the caller */ if (pkg != NULL) { @@ -1958,3 +2055,330 @@ static void init_do_stmt(PLpgSQL_package *pkg, bool isCreate, ListCell *cell, in } } } + +static void get_func_actual_rows(Oid funcid, uint32* rows) +{ + int headerlines = 0; + char* funcdef = pg_get_functiondef_worker(funcid, &headerlines); + if (funcdef == NULL) { + ereport(ERROR, + (errmodule(MOD_PLDEBUGGER), errcode(ERRCODE_TARGET_SERVER_NOT_ATTACHED), + errmsg("Unexpected NULL value for function definition"), + errdetail("N/A"), + errcause("Function definition is NULL"), + erraction("Re-create the function and retry"))); + } + int nLine = 0; + for (unsigned int i = 0; i < strlen(funcdef); i++) { + if (funcdef[i] == '\n') { + nLine++; + } + } + *rows = nLine; + pfree(funcdef); +} + +static void get_proc_coverage(Oid func_oid, int* coverage) +{ + PGconn* conn = NULL; + char* dbName = u_sess->proc_cxt.MyProcPort->database_name; + conn = LoginDatabase("localhost", g_instance.attr.attr_network.PostPortNumber, + NULL, NULL, "postgres", "gs_clean", "auto"); + + if (PQstatus(conn) == CONNECTION_OK) { + char proId[MAX_INT32_LEN]; + char* proNname = get_func_name(func_oid); + StringInfoData pro_querys; + StringInfoData pro_canbreak; + StringInfoData pro_coverage; + int rc = sprintf_s(proId, MAX_INT32_LEN, "%u", func_oid); + securec_check_ss(rc, "\0", "\0"); + const char* infoCodeValues[1]; + infoCodeValues[0] = proId; + uint32 rows; + int headerLines; + CodeLine* infoCode = debug_show_code_worker(func_oid, &rows, &headerLines); + + initStringInfo(&pro_querys); + initStringInfo(&pro_canbreak); + initStringInfo(&pro_coverage); + appendStringInfo(&pro_querys, "%s", infoCode[0].code); + appendStringInfo(&pro_canbreak, "{%s", infoCode[0].canBreak ? "true" : "false"); + appendStringInfo(&pro_coverage, "{%d", coverage[0]); + + for (int i = 1; i < rows; ++i) { + appendStringInfo(&pro_querys, "\n %s", infoCode[i].code); + appendStringInfo(&pro_canbreak, ",%s", infoCode[i].canBreak ? "true" : "false"); + appendStringInfo(&pro_coverage, ",%d", coverage[i]); + } + appendStringInfoString(&pro_canbreak, "}"); + appendStringInfoString(&pro_coverage, "}"); + + const char *insertQuery = "INSERT INTO coverage.proc_coverage" + "(pro_oid, pro_name, db_name, pro_querys, pro_canbreak, coverage) " + "VALUES ($1, $2, $3, $4, $5, $6)"; + const Oid paramTypes[6] = {OIDOID, TEXTOID, TEXTOID, TEXTOID, BOOLARRAYOID, INT4ARRAYOID}; + const int paramFormats[6] = {0}; + const char *paramValues[6]; + paramValues[0] = proId; + paramValues[1] = proNname; + paramValues[2] = dbName; + paramValues[3] = pro_querys.data; + paramValues[4] = pro_canbreak.data; + paramValues[5] = pro_coverage.data; + + PGresult *res = PQexecParams(conn, insertQuery, 6, paramTypes, paramValues, NULL, paramFormats, 0); + if (PQresultStatus(res) != PGRES_COMMAND_OK) { + ereport(WARNING, + (errcode(ERRCODE_INVALID_STATUS), errmsg("Insert failed: %s", PQerrorMessage(conn)))); + } + FreeStringInfo(&pro_querys); + FreeStringInfo(&pro_canbreak); + FreeStringInfo(&pro_coverage); + pfree(infoCode); + PQclear(res); + PQfinish(conn); + } else { + ereport(WARNING, + (errcode(ERRCODE_INVALID_STATUS), errmsg("Database connect failed: %s", PQerrorMessage(conn)))); + } +} + +Datum generate_procoverage_report(PG_FUNCTION_ARGS) +{ + int beginId = PG_GETARG_INT64(0); + int endId = PG_GETARG_INT64(1); + bool isDefault = false; + + if (!u_sess->attr.attr_common.enable_proc_coverage) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("GUC enable_proc_coverage not turned on"))); + } + if (beginId == -1 && endId == -1) { + isDefault = true; + } + if (!isDefault && (beginId < 1 || endId < 1 || beginId > endId)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Incorrect begin/end value"))); + } + + StringInfoData result; + const char* css = + "openGauss Procedure Coverage Report\n" + "\n" + "\n" + "\n"; + initStringInfo(&result); + appendStringInfo(&result, "%s

Procedure Coverage Report

\n", css); + generate_procoverage_html(beginId, endId, &result, isDefault); + appendStringInfoString(&result, ""); + PG_RETURN_TEXT_P(cstring_to_text(result.data)); +} + +static void generate_procoverage_html(int beginId, int endId, StringInfoData* result, bool isDefault) +{ + int rc = 0; + /* connect SPI to execute query */ + SPI_STACK_LOG("connect", NULL, NULL); + if ((rc = SPI_connect()) != SPI_OK_CONNECT) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("proc coverage SPI_connect failed: %s", SPI_result_code_string(rc)))); + } + + StringInfoData sql; + initStringInfo(&sql); + isDefault ? + appendStringInfoString( + &sql, "SELECT pro_name,db_name,coverage_arrays(pro_canbreak,pg_catalog.array_integer_sum(coverage)) " + "as coverage_array, " + "pro_querys, calculate_coverage(coverage_array) FROM coverage.proc_coverage " + "GROUP BY pro_oid, db_name, pro_name, pro_querys, pro_canbreak order by db_name") : + appendStringInfo( + &sql, "SELECT pro_name,db_name,coverage_arrays(pro_canbreak,pg_catalog.array_integer_sum(coverage)) " + "as coverage_array, " + "pro_querys, calculate_coverage(coverage_array) FROM coverage.proc_coverage " + "WHERE coverage_id BETWEEN %d AND %d GROUP BY pro_oid, db_name, pro_name, pro_querys, " + "pro_canbreak order by db_name", + beginId, endId); + + if (SPI_execute(sql.data, false, 0) != SPI_OK_SELECT) { + FreeStringInfo(&sql); + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("invalid query"))); + } + FreeStringInfo(&sql); + List* cstring_values = NIL; + if (SPI_tuptable != NULL) { + for (uint32 i = 0; i < SPI_processed; i++) { + List* row_string = NIL; + uint32 colNum = (uint32)SPI_tuptable->tupdesc->natts; + for (uint32 j = 1; j <= colNum; j++) { + char* value = SPI_getvalue(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, j); + row_string = lappend(row_string, value); + } + cstring_values = lappend(cstring_values, row_string); + } + } + + foreach_cell(outer_cell, cstring_values) { + List* row_list = (List*)lfirst(outer_cell); + List* coverage_array = NIL; + List* pro_querys = NIL; + int index = 0; + foreach_cell(inner_cell, row_list) { + char* value = (char*)lfirst(inner_cell); + generate_procoverage_table(value, result, index, &coverage_array, &pro_querys); + index++; + } + list_free_ext(coverage_array); + list_free_deep(pro_querys); + } + list_free_deep(cstring_values); + SPI_STACK_LOG("finish", NULL, NULL); + SPI_finish(); +} + +static void generate_procoverage_table(char* value, StringInfoData* result, + int index, List** coverage_array, List** pro_querys) +{ + switch (index) { + case PRO_NAME_COL: + appendStringInfo(result, "

%s()

\n", value); + break; + case DB_NAME_COL: + appendStringInfo(result, "

Database: %s

\n", value); + appendStringInfoString(result, "\n"); + break; + case COVERAGE_ARR_COL: + deconstruct_coverage_array(coverage_array, value); + break; + case PRO_QUERYS_COL: + deconstruct_querys_array(pro_querys, value); + break; + case COVERAGE_COL: { + char* endptr; + double coverageRate = strtod(value, &endptr); + generate_procoverage_rows(result, *coverage_array, *pro_querys); + appendStringInfo(result, "

Coverage rate: %.2f%%

\n", coverageRate * 100); + break; + } + default: + break; + } +} + +static void generate_procoverage_rows(StringInfoData* result, List* coverage_array, List* pro_querys) +{ + ListCell* cover_cell = list_head(coverage_array); + ListCell* query_cell = list_head(pro_querys); + int lineno = -3; + while (cover_cell != NULL && query_cell != NULL) { + int cover = lfirst_int(cover_cell); + char* query = (char*)lfirst(query_cell); + const char* trClass = (cover == -1 ? "unbreakable" : (cover == 0 ? "unexecuted" : "executed")); + appendStringInfo(result, "\n", trClass); + lineno < 1 ? appendStringInfoString(result, "\n") : + appendStringInfo(result, "\n", lineno); + cover == -1 ? appendStringInfoString(result, "\n") : appendStringInfo(result, "\n", cover); + appendStringInfo(result, "\n\n", query); + cover_cell = lnext(cover_cell); + query_cell = lnext(query_cell); + lineno++; + } + appendStringInfoString(result, "
%d%d%s
"); +} + +static void deconstruct_coverage_array(List** coverage_array, char* array_string) +{ + char *p = array_string; + int num; + if (*p++ != '{') { + return; + } + while (*p && *p != '}') { + while (*p == ',') { + p++; + } + if (sscanf_s(p, "%d", &num) == 1) { + *coverage_array = lappend_int(*coverage_array, num); + } + while (*p && *p != ',' && *p != '}') { + p++; + } + } +} + +static void deconstruct_querys_array(List** pro_querys, const char* querys_string) +{ + char* buffer = NULL; + char* token = NULL; + char* saveStr = NULL; + const char delim[2] = "\n"; + buffer = pstrdup(querys_string); + token = strtok_r(buffer, delim, &saveStr); + + while (token != NULL) { + char* copy = replace_html_entity(token); + *pro_querys = lappend(*pro_querys, copy); + token = strtok_r(NULL, delim, &saveStr); + } + pfree(buffer); +} + +static char* replace_html_entity(const char* input) +{ + size_t input_length = strlen(input); + size_t new_length = 0; + + for (size_t i = 0; i < input_length; i++) { + switch (input[i]) { + case '<': + case '>': + new_length += HTML_LT_LEN; + break; + case '&': + new_length += HTML_AMP_LEN; + break; + case '\"': + new_length += HTML_QUOT_LEN; + break; + default: + new_length += 1; + break; + } + } + char* result = (char*)palloc(new_length + 1); + + size_t j = 0; + for (size_t i = 0; i < input_length; i++) { + errno_t rc; + switch (input[i]) { + case '<': + rc = strcpy_s(&result[j], HTML_LT_LEN + 1, "<"); + securec_check_c(rc, "\0", "\0"); + j += HTML_LT_LEN; + break; + case '>': + rc = strcpy_s(&result[j], HTML_LT_LEN + 1, ">"); + securec_check_c(rc, "\0", "\0"); + j += HTML_LT_LEN; + break; + case '&': + rc = strcpy_s(&result[j], HTML_AMP_LEN + 1, "&"); + securec_check_c(rc, "\0", "\0"); + j += HTML_AMP_LEN; + break; + case '\"': + rc = strcpy_s(&result[j], HTML_QUOT_LEN + 1, """); + securec_check_c(rc, "\0", "\0"); + j += HTML_QUOT_LEN; + break; + default: + result[j] = input[i]; + j += 1; + break; + } + } + result[j] = '\0'; + return result; +} \ No newline at end of file diff --git a/contrib/dolphin/plugin_utils/adt/arrayfuncs.cpp b/contrib/dolphin/plugin_utils/adt/arrayfuncs.cpp index 6e6ceb3cd..d6bcdf3e3 100644 --- a/contrib/dolphin/plugin_utils/adt/arrayfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/arrayfuncs.cpp @@ -18,6 +18,7 @@ #include #include "catalog/pg_proc.h" +#include "common/int.h" #include "funcapi.h" #include "libpq/pqformat.h" #include "utils/array.h" @@ -3008,19 +3009,32 @@ ArrayType* array_set(ArrayType* array, int nSubscripts, const int* indx, Datum d addedbefore = addedafter = 0; /* - * Check subscripts + * Check subscripts. We assume the existing subscripts passed + * ArrayCheckBounds, so that dim[i] + lb[i] can be computed without + * overflow. But we must beware of other overflows in our calculations of + * new dim[] values. */ if (ndim == 1) { if (indx[0] < lb[0]) { - addedbefore = lb[0] - indx[0]; - dim[0] += addedbefore; + if (pg_sub_s32_overflow(lb[0], indx[0], &addedbefore) || + pg_add_s32_overflow(dim[0], addedbefore, &dim[0])) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed (%d)", + (int) MaxArraySize))); + lb[0] = indx[0]; if (addedbefore > 1) newhasnulls = true; /* will insert nulls */ } if (indx[0] >= (dim[0] + lb[0])) { - addedafter = indx[0] - (dim[0] + lb[0]) + 1; - dim[0] += addedafter; + if (pg_sub_s32_overflow(indx[0], dim[0] + lb[0], &addedafter) || + pg_add_s32_overflow(addedafter, 1, &addedafter) || + pg_add_s32_overflow(dim[0], addedafter, &dim[0])) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed (%d)", + (int) MaxArraySize))); if (addedafter > 1) newhasnulls = true; /* will insert nulls */ } @@ -3247,7 +3261,10 @@ ArrayType* array_set_slice(ArrayType* array, int nSubscripts, int* upperIndx, in addedbefore = addedafter = 0; /* - * Check subscripts + * Check subscripts. We assume the existing subscripts passed + * ArrayCheckBounds, so that dim[i] + lb[i] can be computed without + * overflow. But we must beware of other overflows in our calculations of + * new dim[] values. */ if (ndim == 1) { Assert(nSubscripts == 1); @@ -3255,17 +3272,26 @@ ArrayType* array_set_slice(ArrayType* array, int nSubscripts, int* upperIndx, in ereport( ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("upper bound cannot be less than lower bound"))); if (lowerIndx[0] < lb[0]) { - if (upperIndx[0] < lb[0] - 1) - newhasnulls = true; /* will insert nulls */ - addedbefore = lb[0] - lowerIndx[0]; - dim[0] += addedbefore; + if (pg_sub_s32_overflow(lb[0], lowerIndx[0], &addedbefore) || + pg_add_s32_overflow(dim[0], addedbefore, &dim[0])) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed (%d)", + (int) MaxArraySize))); lb[0] = lowerIndx[0]; + if (addedbefore > 1) + newhasnulls = true; /*will insert nulls*/ } if (upperIndx[0] >= (dim[0] + lb[0])) { - if (lowerIndx[0] > (dim[0] + lb[0])) + if (pg_sub_s32_overflow(upperIndx[0], dim[0] + lb[0], &addedafter) || + pg_add_s32_overflow(addedafter, 1, &addedafter) || + pg_add_s32_overflow(dim[0], addedafter, &dim[0])) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed (%d)", + (int) MaxArraySize))); + if (addedafter > 1) newhasnulls = true; /* will insert nulls */ - addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1; - dim[0] += addedafter; } } else { /* diff --git a/contrib/dolphin/plugin_utils/adt/arrayutils.cpp b/contrib/dolphin/plugin_utils/adt/arrayutils.cpp index b4dcfef09..8076eb89f 100644 --- a/contrib/dolphin/plugin_utils/adt/arrayutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/arrayutils.cpp @@ -58,10 +58,6 @@ int ArrayGetOffset0(int n, const int* tup, const int* scale) * This must do overflow checking, since it is used to validate that a user * dimensionality request doesn't overflow what we can handle. * - * We limit array sizes to at most about a quarter billion elements, - * so that it's not necessary to check for overflow in quite so many - * places --- for instance when palloc'ing Datum arrays. - * * The multiplication overflow check only works on machines that have int64 * arithmetic, but that is nearly all platforms these days, and doing check * divides for those that don't seems way too expensive. @@ -71,7 +67,6 @@ int ArrayGetNItems(int ndim, const int* dims) int32 ret; int i; -#define MaxArraySize ((Size)(MaxAllocSize / sizeof(Datum))) if (ndim <= 0) return 0; diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index 1595958e9..4a268e915 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -15426,19 +15426,25 @@ Datum query_all_drc_info(PG_FUNCTION_ARGS) if (!ENABLE_DMS) { ereport(ERROR, (errmsg("[SS] cannot query query_node_reform_info without shared storage deployment!"))); } + if (!SS_PRIMARY_MODE) { + ereport(WARNING, (errmsg("[SS] query only in primary node. current node is standby!"))); + } FuncCallContext *funcctx = NULL; if (SRF_IS_FIRSTCALL()) { funcctx = SRF_FIRSTCALL_INIT(); MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + unsigned long long *rowid = (unsigned long long *)palloc0(sizeof(unsigned long long)); + *rowid = 0; + funcctx->user_fctx = (void*)rowid; funcctx->tuple_desc = create_query_all_drc_info_tupdesc(); MemoryContextSwitchTo(oldcontext); } funcctx = SRF_PERCALL_SETUP(); dv_drc_buf_info drc_info = {0}; - unsigned long long rowid = funcctx->call_cntr; - dms_get_buf_res(&rowid, &drc_info, type); + unsigned long long *rowid = (unsigned long long *)funcctx->user_fctx; + dms_get_buf_res(rowid, &drc_info, type); Datum values[18]; bool nulls[18] = {false}; if (drc_info.is_valid) { @@ -15446,6 +15452,7 @@ Datum query_all_drc_info(PG_FUNCTION_ARGS) HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); } + pfree_ext(rowid); SRF_RETURN_DONE(funcctx); } diff --git a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp index 4dc571181..703ef75d0 100644 --- a/contrib/dolphin/plugin_utils/adt/ruleutils.cpp +++ b/contrib/dolphin/plugin_utils/adt/ruleutils.cpp @@ -1636,9 +1636,8 @@ static void AppendTablespaceInfo(const char *spcname, StringInfo buf, tableInfo { if (spcname != NULL) { appendStringInfo(buf, " TABLESPACE %s", quote_identifier(spcname)); - } else { - appendStringInfo(buf, " TABLESPACE pg_default"); } + /* If the tablespace is null, the table uses the default tablespace of the database or schema. */ } /* @@ -8900,7 +8899,7 @@ static char* get_variable( if (attnum == InvalidAttrNumber) attname = NULL; else - attname = get_rte_attribute_name(rte, attnum); + attname = get_rte_attribute_name(rte, attnum, true); if (refname && (context->varprefix || attname == NULL)) { if (schemaname != NULL) @@ -10691,6 +10690,11 @@ static void get_rule_expr(Node* node, deparse_context* context, bool showimplici appendStringInfo(buf, "(%d)", pkey->length); } break; +#ifdef USE_SPQ + case T_DMLActionExpr: + appendStringInfo(buf, "DMLAction"); + break; +#endif default: if (context->qrw_phase) appendStringInfo(buf, "", (int)nodeTag(node)); diff --git a/contrib/dolphin/tablecmds.cpp b/contrib/dolphin/tablecmds.cpp index 22c09bc69..9d26513c9 100644 --- a/contrib/dolphin/tablecmds.cpp +++ b/contrib/dolphin/tablecmds.cpp @@ -283,6 +283,7 @@ typedef struct NewColumnValue { > 0 denote modify with first|after */ char *col_name; AttrNumber generate_attnum; + bool is_updated; } NewColumnValue; /* @@ -484,12 +485,12 @@ static void validateCheckConstraintForBucket(Relation rel, Partition part, HeapT static void validateForeignKeyConstraint(char* conname, Relation rel, Relation pkrel, Oid pkindOid, Oid constraintOid); static void createForeignKeyTriggers( Relation rel, Oid refRelOid, Constraint* fkconstraint, Oid constraintOid, Oid indexOid); -static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bool recurse, LOCKMODE lockmode); +static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bool recurse, LOCKMODE lockmode, bool fromReplace); static bool ATCheckLedgerTableCmd(Relation rel, AlterTableCmd* cmd); static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recurse, bool recursing, LOCKMODE lockmode, bool isDeltaTable = false); -static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode); -static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode); +static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode, bool fromReplace); +static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode, bool fromReplace); static void ATRewriteTables(AlterTableStmt *parsetree, List** wqueue, LOCKMODE lockmode); static void ATRewriteTable(AlteredTableInfo* tab, Relation oldrel, Relation newrel); static void ATCStoreRewriteTable(AlteredTableInfo* tab, Relation heapRel, LOCKMODE lockMode, Oid targetTblspc); @@ -529,6 +530,8 @@ static void ExecChangeTableSpaceForCStorePartition(AlteredTableInfo*, LOCKMODE); static int GetAfterColumnAttnum(Oid attrelid, const char *after_name); static Node *UpdateVarattnoAfterAddColumn(Node *node, int startattnum, int endattnum, bool is_increase); +static void UpdatePgStatisticFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); +static void UpdatePgStatisticExtFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); static void UpdatePgDescriptionFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); static void UpdatePgAttributeFirstAfter(Relation attr_rel, Oid attrelid, int startattnum, int endattnum, bool is_increase); @@ -545,7 +548,7 @@ static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endat static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase); static ViewInfoForAdd *GetViewInfoFirstAfter(const char *rel_name, Oid objid, bool keep_star = false); -static List *CheckPgRewriteFirstAfter(Relation rel); + static void UpdateDependRefobjsubidFirstAfter(Relation rel, Oid myrelid, int curattnum, int newattnum, bool *has_depend); static void UpdateDependRefobjsubidToNewattnum(Relation rel, Oid myrelid, int curattnum, int newattnum); @@ -589,7 +592,7 @@ static bool CheckLastColumn(Relation rel, AttrNumber attrnum); static void ATPrepDropColumn( List** wqueue, Relation rel, bool recurse, bool recursing, AlterTableCmd* cmd, LOCKMODE lockmode); static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* colName, DropBehavior behavior, bool recurse, - bool recursing, bool missing_ok, LOCKMODE lockmode); + bool recursing, bool missing_ok, LOCKMODE lockmode, bool fromReplace); static ObjectAddress ATExecAddIndex(AlteredTableInfo* tab, Relation rel, IndexStmt* stmt, bool is_rebuild, LOCKMODE lockmode); static ObjectAddress ATExecAddConstraint(List** wqueue, AlteredTableInfo* tab, Relation rel, Constraint* newConstraint, bool recurse, bool is_readd, LOCKMODE lockmode); @@ -2078,6 +2081,25 @@ void UpdatePartKeyExpr(Relation rel, PartitionState *partTableState, Oid partOid } } +/*Check whether tables or partition stored in segment are created +in limited tablespaces */ +void CheckSegmentIsInLimitTablespace(char* tablespacename, char* relname) +{ + Oid tablespaceId = InvalidOid; + if (tablespacename != NULL) { + tablespaceId = get_tablespace_oid(tablespacename, false); + } + Oid tbspcId = (tablespaceId == InvalidOid) ? u_sess->proc_cxt.MyDatabaseTableSpace : tablespaceId; + uint64 tablespaceMaxSize = 0; + bool isLimit = TableSpaceUsageManager::IsLimited(tbspcId, &tablespaceMaxSize); + if (isLimit) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_SEGMENT_PAGE), + errmsg("The partition %s do not support segment-page storage", relname != NULL ? relname : ""), + errdetail("Segment-page storage doest not support limited tablespace \"%s\"", get_tablespace_name(tbspcId)), + errhint("use default or unlimited user defined tablespace before using segment-page storage."))); + } +} + /* ---------------------------------------------------------------- * DefineRelation * Creates a new relation. @@ -2947,6 +2969,40 @@ ObjectAddress DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, Object errdetail("Segment-page storage doest not support limited tablespace \"%s\"", get_tablespace_name(tbspcId)), errhint("use default or unlimited user defined tablespace before using segment-page storage."))); } + if (stmt->partTableState) { + ListCell* cell = NULL; + Oid partTablespaceId = InvalidOid; + char* partitionName = NULL; + char* tablespacename = NULL; + foreach (cell, stmt->partTableState->partitionList) { + char* partitionName = NULL; + char* tablespacename = NULL; + if (IsA((lfirst(cell)), IntervalPartitionDefState)) { + IntervalPartitionDefState* partition = (IntervalPartitionDefState*)lfirst(cell); + ListCell* speccell = NULL; + foreach(speccell, partition->intervalTablespaces) { + tablespacename = ((Value*)lfirst(speccell))->val.str; + CheckSegmentIsInLimitTablespace(tablespacename, NULL); + } + continue; + } else if (IsA((lfirst(cell)), RangePartitionDefState) || IsA((lfirst(cell)), HashPartitionDefState) || IsA((lfirst(cell)), ListPartitionDefState)) { + PartitionDefState* partition = (PartitionDefState*)lfirst(cell); + tablespacename = partition->tablespacename; + partitionName = partition->partitionName; + } else if (IsA((lfirst(cell)), RangePartitionStartEndDefState)) { + RangePartitionStartEndDefState* partition = (RangePartitionStartEndDefState*)lfirst(cell); + tablespacename = partition->tableSpaceName; + partitionName = partition->partitionName; + } else if (IsA((lfirst(cell)), RangePartitionindexDefState)) { + RangePartitionindexDefState* partition = (RangePartitionindexDefState*)lfirst(cell); + tablespacename = partition->tablespace; + partitionName = partition->name; + } else { + Assert(false); + } + CheckSegmentIsInLimitTablespace(tablespacename, partitionName); + } + } } if (ENABLE_DMS && !u_sess->attr.attr_common.IsInplaceUpgrade) { @@ -8039,7 +8095,7 @@ void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt* stmt) // Next version remove hack patch for 'ALTER FOREIGN TABLE ... ADD NODE' if (stmt->cmds != NIL) { /* process 'ALTER TABLE' cmd */ - ATController(stmt, rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt), lockmode); + ATController(stmt, rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt), lockmode, stmt->fromReplace); if (enable_plpgsql_gsdependency_guc()) { (void)gsplsql_build_ref_type_dependency(get_rel_type_id(relid)); } @@ -8058,7 +8114,7 @@ void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt* stmt) /* open error table releation, closed in ATController */ Relation errtablerel = relation_open(errtableid, lockmode); - ATController(stmt, errtablerel, addNodeCmds, interpretInhOption(stmt->relation->inhOpt), lockmode); + ATController(stmt, errtablerel, addNodeCmds, interpretInhOption(stmt->relation->inhOpt), lockmode, stmt->fromReplace); } list_free_ext(addNodeCmds); } @@ -8093,7 +8149,7 @@ void AlterTableInternal(Oid relid, List* cmds, bool recurse) rel = relation_open(relid, lockmode); EventTriggerAlterTableRelid(relid); - ATController(NULL, rel, cmds, recurse, lockmode); + ATController(NULL, rel, cmds, recurse, lockmode, false); } static LOCKMODE set_lockmode(LOCKMODE mode, LOCKMODE cmd_mode) @@ -8197,7 +8253,7 @@ LOCKMODE AlterTableGetLockLevel(List* cmds) return lockmode; } -static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bool recurse, LOCKMODE lockmode) +static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bool recurse, LOCKMODE lockmode, bool fromReplace) { List* wqueue = NIL; ListCell* lcmd = NULL; @@ -8287,7 +8343,7 @@ static void ATController(AlterTableStmt *parsetree, Relation rel, List* cmds, bo relation_close(rel, NoLock); /* Phase 2: update system catalogs */ - ATRewriteCatalogs(&wqueue, lockmode); + ATRewriteCatalogs(&wqueue, lockmode, fromReplace); #ifdef PGXC /* Invalidate cache for redistributed relation */ @@ -8893,7 +8949,7 @@ static void UpdateGeneratedExpr(AlteredTableInfo* tab) * dispatched in a "safe" execution order (designed to avoid unnecessary * conflicts). */ -static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode) +static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode, bool fromReplace) { int pass; ListCell* ltab = NULL; @@ -8922,7 +8978,7 @@ static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode) rel = relation_open(tab->relid, NoLock); foreach (lcmd, subcmds) - ATExecCmd(wqueue, tab, rel, (AlterTableCmd*)lfirst(lcmd), lockmode); + ATExecCmd(wqueue, tab, rel, (AlterTableCmd*)lfirst(lcmd), lockmode, fromReplace); /* * After the ALTER TYPE pass, do cleanup work (this is not done in @@ -9142,7 +9198,7 @@ static void ATCreateColumComments(Oid relOid, ColumnDef* columnDef) /* * ATExecCmd: dispatch a subcommand to appropriate execution routine */ -static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode) +static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterTableCmd* cmd, LOCKMODE lockmode, bool fromReplace) { ObjectAddress address = InvalidObjectAddress; elog(ES_LOGLEVEL, "[ATExecCmd] cmd subtype: %d", cmd->subtype); @@ -9206,10 +9262,10 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT address = ATExecSetStorage(rel, cmd->name, cmd->def, lockmode); break; case AT_DropColumn: /* DROP COLUMN */ - address = ATExecDropColumn(wqueue, rel, cmd->name, cmd->behavior, false, false, cmd->missing_ok, lockmode); + address = ATExecDropColumn(wqueue, rel, cmd->name, cmd->behavior, false, false, cmd->missing_ok, lockmode, fromReplace); break; case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ - address = ATExecDropColumn(wqueue, rel, cmd->name, cmd->behavior, true, false, cmd->missing_ok, lockmode); + address = ATExecDropColumn(wqueue, rel, cmd->name, cmd->behavior, true, false, cmd->missing_ok, lockmode, fromReplace); break; case AT_DropPartition: /* drop partition */ ATExecDropPartition(rel, cmd); @@ -11390,6 +11446,111 @@ static Node *UpdateVarattnoAfterAddColumn(Node *node, int startattnum, int endat return NULL; } +/* + * update pg_statistic + * 1. add column with first or after column + * 2. modify column to first or after column + */ +static void UpdatePgStatisticFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) +{ + Relation stat_rel; + HeapTuple stat_tuple; + ScanKeyData key[3]; + SysScanDesc scan; + Form_pg_statistic stat_form; + + stat_rel = heap_open(StatisticRelationId, RowExclusiveLock); + + for (int i = (is_increase ? endattnum : startattnum); + (is_increase ? i >= startattnum : i <= endattnum); (is_increase ? i-- : i++)) { + ScanKeyInit(&key[0], Anum_pg_statistic_starelid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(rel))); + ScanKeyInit(&key[1], Anum_pg_statistic_starelkind, BTEqualStrategyNumber, F_CHAREQ, ObjectIdGetDatum(STARELKIND_CLASS)); + ScanKeyInit(&key[2], Anum_pg_statistic_staattnum, BTEqualStrategyNumber, F_INT2EQ, Int16GetDatum(i)); + + scan = systable_beginscan(stat_rel, StatisticRelidKindAttnumInhIndexId, true, NULL, 3, key); + + while (HeapTupleIsValid(stat_tuple = systable_getnext(scan))) { + Datum values[Natts_pg_statistic] = { 0 }; + bool nulls[Natts_pg_statistic] = { 0 }; + bool replaces[Natts_pg_statistic] = { 0 }; + HeapTuple new_stat_tuple; + + stat_form = (Form_pg_statistic)GETSTRUCT(stat_tuple); + + values[Anum_pg_statistic_staattnum - 1] = is_increase ? Int16GetDatum(stat_form->staattnum + 1) : + Int16GetDatum(stat_form->staattnum - 1); + replaces[Anum_pg_statistic_staattnum - 1] = true; + + new_stat_tuple = heap_modify_tuple(stat_tuple, RelationGetDescr(stat_rel), values, nulls, replaces); + simple_heap_update(stat_rel, &new_stat_tuple->t_self, new_stat_tuple); + CatalogUpdateIndexes(stat_rel, new_stat_tuple); + + heap_freetuple_ext(new_stat_tuple); + } + systable_endscan(scan); + } + heap_close(stat_rel, RowExclusiveLock); +} + +/* + * update pg_statistic_ext + * 1. add column with first or after column + * 2. modify column to first or after column + */ +static void UpdatePgStatisticExtFirstAfter(Relation rel, int startattnum, int endattnum, bool is_increase) +{ + Relation stat_ext_rel; + HeapTuple stat_ext_tuple; + ScanKeyData key[2]; + SysScanDesc scan; + int curattnum = is_increase ? endattnum + 1 : startattnum - 1; + int newattnum = is_increase ? startattnum : endattnum; + + ScanKeyInit(&key[0], Anum_pg_statistic_ext_starelid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(rel))); + ScanKeyInit(&key[1], Anum_pg_statistic_ext_starelkind, BTEqualStrategyNumber, F_CHAREQ, ObjectIdGetDatum(STARELKIND_CLASS)); + stat_ext_rel = heap_open(StatisticExtRelationId, RowExclusiveLock); + scan = systable_beginscan(stat_ext_rel, StatisticExtRelidKindInhKeyIndexId, true, NULL, 2, key); + + while (HeapTupleIsValid(stat_ext_tuple = systable_getnext(scan))) { + bool is_null = false; + Datum values[Natts_pg_statistic_ext] = { 0 }; + bool nulls[Natts_pg_statistic_ext] = { 0 }; + bool replaces[Natts_pg_statistic_ext] = { 0 }; + int2vector *stakey = NULL; + int2vector *new_stakey = NULL; + HeapTuple new_stat_ext_tuple; + + Datum stakey_datum = fastgetattr(stat_ext_tuple, Anum_pg_statistic_ext_stakey, RelationGetDescr(stat_ext_rel), &is_null); + stakey = (int2vector *)DatumGetPointer(stakey_datum); + int2 *stakey_values = (int2 *)palloc0(stakey->dim1 * sizeof(int2)); + for (int i = 0; i < stakey->dim1; i++) { + if (stakey->values[i] >= startattnum && stakey->values[i] <= endattnum) { + stakey_values[i] = is_increase ? (stakey->values[i] + 1) : (stakey->values[i] - 1); + } else if (stakey->values[i] == curattnum) { + stakey_values[i] = newattnum; + } else { + stakey_values[i] = stakey->values[i]; + } + } + new_stakey = buildint2vector(stakey_values, stakey->dim1); + values[Anum_pg_statistic_ext_stakey - 1] = PointerGetDatum(new_stakey); + replaces[Anum_pg_statistic_ext_stakey - 1] = true; + + new_stat_ext_tuple = heap_modify_tuple(stat_ext_tuple, RelationGetDescr(stat_ext_rel), values, nulls, replaces); + simple_heap_update(stat_ext_rel, &new_stat_ext_tuple->t_self, new_stat_ext_tuple); + CatalogUpdateIndexes(stat_ext_rel, new_stat_ext_tuple); + + pfree_ext(new_stakey); + pfree_ext(stakey_values); + heap_freetuple_ext(new_stat_ext_tuple); + } + + systable_endscan(scan); + heap_close(stat_ext_rel, RowExclusiveLock); +} + /* * update pg_description * 1. add column with first or after col_name. @@ -11545,16 +11706,17 @@ static void UpdatePgIndexFirstAfter(Relation rel, int startattnum, int endattnum AssertEreport(!is_null, MOD_OPT, ""); indkey = (int2vector *)DatumGetPointer(indkey_datum); Assert(indkey->dim1 == numatts); - new_indkey = buildint2vector(NULL, numatts); + int2 *indkey_values = (int2 *)palloc0(numatts * sizeof(int2)); for (int i = 0; i < numatts; i++) { if (indkey->values[i] >= startattnum && indkey->values[i] <= endattnum) { - new_indkey->values[i] = is_increase ? (indkey->values[i] + 1) : (indkey->values[i] - 1); + indkey_values[i] = is_increase ? (indkey->values[i] + 1) : (indkey->values[i] - 1); } else if (indkey->values[i] == curattnum) { - new_indkey->values[i] = newattnum; + indkey_values[i] = newattnum; } else { - new_indkey->values[i] = indkey->values[i]; + indkey_values[i] = indkey->values[i]; } } + new_indkey = buildint2vector(indkey_values, numatts); values[Anum_pg_index_indkey - 1] = PointerGetDatum(new_indkey); replaces[Anum_pg_index_indkey - 1] = true; @@ -11603,6 +11765,7 @@ static void UpdatePgIndexFirstAfter(Relation rel, int startattnum, int endattnum CatalogUpdateIndexes(index_rel, new_index_tuple); pfree_ext(new_indkey); + pfree_ext(indkey_values); heap_freetuple_ext(new_index_tuple); } @@ -11843,7 +12006,7 @@ static void UpdateIndexFirstAfter(Relation rel) HeapTuple index_tuple; ScanKeyData key; SysScanDesc scan; - Form_pg_index index_form; + MemoryContext oldcontext; /* Prepare to scan pg_index for entries having indrelid = this rel. */ ScanKeyInit(&key, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, @@ -11852,11 +12015,15 @@ static void UpdateIndexFirstAfter(Relation rel) scan = systable_beginscan(pg_index_rel, IndexIndrelidIndexId, true, NULL, 1, &key); while (HeapTupleIsValid(index_tuple = systable_getnext(scan))) { - index_form = (Form_pg_index)GETSTRUCT(index_tuple); + Form_pg_index index_form = (Form_pg_index)GETSTRUCT(index_tuple); table_index_rel = index_open(index_form->indexrelid, RowExclusiveLock); - table_index_rel->rd_index = index_form; + oldcontext = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + pfree_ext(table_index_rel->rd_indextuple); + table_index_rel->rd_indextuple = heap_copytuple(index_tuple); + table_index_rel->rd_index = (Form_pg_index)GETSTRUCT(table_index_rel->rd_indextuple); + (void)MemoryContextSwitchTo(oldcontext); index_close(table_index_rel, RowExclusiveLock); } @@ -12039,23 +12206,25 @@ static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endat HeapTuple new_par_tuple; partkey = (int2vector *)DatumGetPointer(partkey_datum); - new_partKey = buildint2vector(NULL, partkey->dim1); + int2 *partkey_values = (int2 *)palloc0(partkey->dim1 * sizeof(int2)); + for (int i = 0; i < partkey->dim1; i++) { if (partkey->values[i] >= startattnum && partkey->values[i] <= endattnum) { - new_partKey->values[i] = is_increase ? (partkey->values[i] + 1) : (partkey->values[i] - 1); + partkey_values[i] = is_increase ? (partkey->values[i] + 1) : (partkey->values[i] - 1); } else if (partkey->values[i] == curattnum) { if (is_modified) { if (has_partition != NULL) { *has_partition = true; } - new_partKey->values[i] = 0; + partkey_values[i] = 0; } else { - new_partKey->values[i] = newattnum; + partkey_values[i] = newattnum; } } else { - new_partKey->values[i] = partkey->values[i]; + partkey_values[i] = partkey->values[i]; } } + new_partKey = buildint2vector(partkey_values, partkey->dim1); values[Anum_pg_partition_partkey - 1] = PointerGetDatum(new_partKey); replaces[Anum_pg_partition_partkey - 1] = true; @@ -12064,6 +12233,7 @@ static void UpdatePgPartitionFirstAfter(Relation rel, int startattnum, int endat CatalogUpdateIndexes(par_rel, new_par_tuple); pfree_ext(new_partKey); + pfree_ext(partkey_values); heap_freetuple_ext(new_par_tuple); } } @@ -12197,6 +12367,7 @@ static List *CheckPgRewriteFirstAfter(Relation rel) return query_str; } + void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute attForm, int2 old_attnum, char** attName, List **old_query_str) { @@ -12226,19 +12397,43 @@ void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute char *evActionString = TextDatumGetCString(evActiomDatum); List *evAction = (List *)stringToNode(evActionString); Query* query = (Query*)linitial(evAction); - // change query targetEntry + // change querytree's targetEntry and RTE ListCell* lc = NULL; foreach (lc, query->targetList) { TargetEntry* tle = (TargetEntry*)lfirst(lc); + Index rtevarno = 0; + AttrNumber rtevarattno = 0; if (nodeTag((Node*)tle->expr) == T_Var && tle->resorigtbl == rel_oid && - ((Var*)tle->expr)->varoattno == old_attnum) { + tle->resorigcol == old_attnum) { + ListCell* rtelc = NULL; + tle->resorigcol = attForm->attnum; Var *var = (Var *)tle->expr; - var->varattno = attForm->attnum; - var->varoattno = attForm->attnum; + rtevarno = var->varno; + rtevarattno = var->varattno; var->vartype = attForm->atttypid; var->vartypmod = attForm->atttypmod; + var->varcollid = attForm->attcollation; *attName = pstrdup(tle->resname); } + // change rtable entry + if (rtevarno == 0 || rtevarattno == 0) { + continue; + } + RangeTblEntry* rte = rt_fetch(rtevarno, query->rtable); + if (!rte || rte->alias != NULL || rte->rtekind != RTE_JOIN || rte->joinaliasvars == NIL) { + Var *var = (Var *)tle->expr; + var->varattno = attForm->attnum; + var->varoattno = attForm->attnum; + continue; + } + Var* aliasvar = (Var*)list_nth(rte->joinaliasvars, rtevarattno - 1); + if (IsA(aliasvar, Var)) { + aliasvar->varattno = attForm->attnum; + aliasvar->varoattno = attForm->attnum; + aliasvar->vartype = attForm->atttypid; + aliasvar->vartypmod = attForm->atttypmod; + aliasvar->varcollid = attForm->attcollation; + } } char* actiontree = nodeToString((Node*)evAction); HeapTuple new_dep_tuple; @@ -12337,21 +12532,24 @@ static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattn bool nulls[Natts_pg_trigger] = { 0 }; bool replaces[Natts_pg_trigger] = { 0 }; HeapTuple new_tri_tuple; + int2vector *new_tgattr = NULL; + int2 *tgattr_values = NULL; Datum tgattr_datum = fastgetattr(tri_tuple, Anum_pg_trigger_tgattr, tri_rel->rd_att, &is_null); if (!is_null) { int2vector *tgattr = (int2vector *)DatumGetPointer(tgattr_datum); - int2vector *new_tgattr = buildint2vector(NULL, tgattr->dim1); + int2 *tgattr_values = (int2 *)palloc0(tgattr->dim1 * sizeof(int2)); for (int i = 0; i < tgattr->dim1; i++) { if (tgattr->values[i] >= startattnum && tgattr->values[i] <= endattnum) { - new_tgattr->values[i] = is_increase ? (tgattr->values[i] + 1) : (tgattr->values[i] - 1); + tgattr_values[i] = is_increase ? (tgattr->values[i] + 1) : (tgattr->values[i] - 1); } else if (tgattr->values[i] == curattnum) { - new_tgattr->values[i] = newattnum; + tgattr_values[i] = newattnum; } else { - new_tgattr->values[i] = tgattr->values[i]; + tgattr_values[i] = tgattr->values[i]; } } + new_tgattr = buildint2vector(tgattr_values, tgattr->dim1); values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(new_tgattr); replaces[Anum_pg_trigger_tgattr - 1] = true; } @@ -12375,6 +12573,10 @@ static void UpdatePgTriggerFirstAfter(Relation rel, int startattnum, int endattn new_tri_tuple = heap_modify_tuple(tri_tuple, RelationGetDescr(tri_rel), values, nulls, replaces); simple_heap_update(tri_rel, &new_tri_tuple->t_self, new_tri_tuple); CatalogUpdateIndexes(tri_rel, new_tri_tuple); + + pfree_ext(tgattr_values); + pfree_ext(new_tgattr); + heap_freetuple_ext(new_tri_tuple); } systable_endscan(scan); @@ -12425,6 +12627,8 @@ static void UpdatePgRlspolicyFirstAfter(Relation rel, int startattnum, int endat new_rls_tuple = heap_modify_tuple(rls_tuple, RelationGetDescr(rls_rel), values, nulls, replaces); simple_heap_update(rls_rel, &new_rls_tuple->t_self, new_rls_tuple); CatalogUpdateIndexes(rls_rel, new_rls_tuple); + + heap_freetuple_ext(new_rls_tuple); } systable_endscan(scan); @@ -12671,6 +12875,8 @@ static ObjectAddress ATExecAddColumn(List** wqueue, AlteredTableInfo* tab, Relat if (is_addloc) { UpdatePgAttributeFirstAfter(attrdesc, myrelid, newattnum, currattnum, true); UpdatePgDescriptionFirstAfter(rel, newattnum, currattnum, true); + UpdatePgStatisticFirstAfter(rel, newattnum, currattnum, true); + UpdatePgStatisticExtFirstAfter(rel, newattnum, currattnum, true); UpdatePgIndexFirstAfter(rel, newattnum, currattnum, true); UpdatePgConstraintFirstAfter(rel, newattnum, currattnum, true); UpdatePgConstraintConfkeyFirstAfter(rel, newattnum, currattnum, true); @@ -13814,7 +14020,7 @@ static void ResetTempAutoIncrement(Relation rel, AttrNumber attnum) * Return value is that of the dropped column. */ static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* colName, DropBehavior behavior, bool recurse, - bool recursing, bool missing_ok, LOCKMODE lockmode) + bool recursing, bool missing_ok, LOCKMODE lockmode, bool fromReplace) { HeapTuple tuple; Form_pg_attribute targetatt; @@ -13877,7 +14083,7 @@ static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* * We have to check if the drop column is the last column. * If it is, not allow to drop it. */ - if (GetLocatorType(rel->rd_id) != LOCATOR_TYPE_HASH) { + if (!fromReplace && GetLocatorType(rel->rd_id) != LOCATOR_TYPE_HASH) { bool lastColumn = CheckLastColumn(rel, attnum); if (lastColumn) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("must have at least one column"))); @@ -13956,7 +14162,7 @@ static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* /* * Delete this column of the delta table. */ - ATExecDropColumn(wqueue, childrel, colName, behavior, true, true, false, lockmode); + ATExecDropColumn(wqueue, childrel, colName, behavior, true, true, false, lockmode, fromReplace); } else if (childatt->attinhcount == 1 && !childatt->attislocal) { /* * If the child column has other definition sources, just @@ -13965,7 +14171,7 @@ static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* * Time to delete this child column, too */ - ATExecDropColumn(wqueue, childrel, colName, behavior, true, true, false, lockmode); + ATExecDropColumn(wqueue, childrel, colName, behavior, true, true, false, lockmode, fromReplace); } else { /* Child column must survive my deletion */ childatt->attinhcount--; @@ -14051,7 +14257,7 @@ static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* Oid tag_relid = get_tag_relid(RelationGetRelationName(rel), rel->rd_rel->relnamespace); Relation tagrel = heap_open(tag_relid, lockmode); CheckTableNotInUse(tagrel, "ALTER TABLE"); - ATExecDropColumn(wqueue, tagrel, colName, behavior, false, false, true, lockmode); + ATExecDropColumn(wqueue, tagrel, colName, behavior, false, false, true, lockmode, fromReplace); TagsCacheMgr::GetInstance().clear(); heap_close(tagrel, NoLock); @@ -14059,7 +14265,7 @@ static ObjectAddress ATExecDropColumn(List** wqueue, Relation rel, const char* /* if drop TSField columns, update delta table simultaneously */ Relation delta_rel = Tsdb::RelationGetDeltaRelation(rel, lockmode); CheckTableNotInUse(delta_rel, "ALTER TABLE"); - ATExecDropColumn(wqueue, delta_rel, colName, behavior, false, false, true, lockmode); + ATExecDropColumn(wqueue, delta_rel, colName, behavior, false, false, true, lockmode, fromReplace); heap_close(delta_rel, NoLock); } } @@ -16298,6 +16504,7 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation newval->newattnum = 0; newval->col_name = pstrdup(colName); newval->generate_attnum = 0; + newval->is_updated = false; tab->newvals = lappend(tab->newvals, newval); if (ATColumnChangeRequiresRewrite(transform, attnum)) @@ -16725,6 +16932,8 @@ static void AlterColumnToFirstAfter(AlteredTableInfo* tab, Relation rel, AlterTa UpdatePgPartitionFirstAfter(rel, startattnum, endattnum, is_increase, true, &has_partition); UpdatePgAttributeFirstAfter(attr_rel, myrelid, startattnum, endattnum, is_increase); UpdatePgDescriptionFirstAfter(rel, startattnum, endattnum, is_increase); + UpdatePgStatisticFirstAfter(rel, startattnum, endattnum, is_increase); + UpdatePgStatisticExtFirstAfter(rel, startattnum, endattnum, is_increase); UpdatePgIndexFirstAfter(rel, startattnum, endattnum, is_increase); UpdatePgConstraintFirstAfter(rel, startattnum, endattnum, is_increase); UpdatePgConstraintConfkeyFirstAfter(rel, startattnum, endattnum, is_increase); @@ -16815,7 +17024,7 @@ static void UpdateNewvalsAttnum(AlteredTableInfo* tab, Relation rel, AlterTableC continue; } - if (strcmp(ex->col_name, col_name) == 0) { + if (strcmp(ex->col_name, col_name) == 0 && !ex->is_updated) { HeapTuple heap_tup; Form_pg_attribute att_tup; @@ -16827,8 +17036,10 @@ static void UpdateNewvalsAttnum(AlteredTableInfo* tab, Relation rel, AlterTableC att_tup = (Form_pg_attribute)GETSTRUCT(heap_tup); ex->attnum = att_tup->attnum; ex->newattnum = GetNewattnumFirstAfter(rel, cmd, ex->attnum); + ex->is_updated = true; tableam_tops_free_tuple(heap_tup); + return; } } } @@ -17252,6 +17463,10 @@ static ObjectAddress ATExecAlterColumnType(AlteredTableInfo* tab, Relation rel, heap_close(depRel, RowExclusiveLock); + if (tab->is_first_after) { + UpdateNewvalsAttnum(tab, rel, cmd, colName); + } + /* * Here we go --- change the recorded column type and collation. (Note * heapTup is a copy of the syscache entry, so okay to scribble on.) @@ -33713,7 +33928,7 @@ static void ATExecAlterModifyColumn(AlteredTableInfo* tab, Relation rel, AlterTa tab->new_notnull = true; } - if (is_first_after) { + if (is_first_after || tab->is_first_after) { UpdateNewvalsAttnum(tab, rel, cmd, col_name); } -- Gitee From d64a876d89dc88b77c38b6fa4cea238a39bd5f25 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Fri, 5 Jan 2024 18:02:30 +0800 Subject: [PATCH 164/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E6=A0=B9=E6=8D=AECBB=E7=9A=84=E7=BB=93=E8=AE=BA=EF=BC=8C?= =?UTF-8?q?=E6=9C=88=E4=BB=BD=E5=92=8C=E6=97=A5=E6=9C=9F=E5=87=BA=E7=8E=B0?= =?UTF-8?q?0=E7=9A=84=E6=97=B6=E5=80=99=EF=BC=8C=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E5=A4=84=E7=90=86=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20I8RJRO=E6=89=80?= =?UTF-8?q?=E7=A4=BA=E7=9A=840=E6=9C=88=E6=88=96=E8=80=850=E6=97=A5?= =?UTF-8?q?=E6=97=A0=E6=B3=95=E8=A7=A3=E5=86=B3=EF=BC=8C=E6=8C=89=E7=85=A7?= =?UTF-8?q?CBB=E7=9A=84=E7=BB=93=E8=AE=BA=EF=BC=8C=E6=94=B9=E6=88=90?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E5=A4=84=E7=90=86=E3=80=82=20=E3=80=90?= =?UTF-8?q?=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20date=E5=87=BA?= =?UTF-8?q?=E7=8E=B00=E6=9C=88=E6=88=96=E8=80=850=E6=97=A5=EF=BC=8C?= =?UTF-8?q?=E5=BD=93=E5=89=8D=E6=B2=A1=E6=B3=95=E5=A4=84=E7=90=86=E6=AD=A4?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E7=9A=84=E6=97=A5=E6=9C=9F=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20CBB?= =?UTF-8?q?=E7=9A=84=E7=BB=93=E8=AE=BA=EF=BC=8C=E6=94=B9=E6=88=90=E6=8A=A5?= =?UTF-8?q?=E9=94=99=E5=A4=84=E7=90=86=E3=80=82=20=E3=80=90=E5=85=B3?= =?UTF-8?q?=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e?= =?UTF-8?q?.gitee.com/opengaussorg/dashboard=3Fissue=3DI8RJRO?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 14 +++-- .../b_compatibility_time_funcs.out | 7 +-- .../b_compatibility_time_funcs3.out | 61 +++++++++++++++++++ .../dolphin/include/plugin_utils/timestamp.h | 2 + .../dolphin/plugin_utils/adt/timestamp.cpp | 46 +++++++++++++- .../b_compatibility_time_funcs3.sql | 15 +++++ 6 files changed, 133 insertions(+), 12 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index e5b5b8404..710af5768 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -2050,6 +2050,10 @@ WARNING: date/time field value out of range: "2000-00-00 10:01" LINE 1: INSERT INTO t_NO_ZERO_DATE_datetime(v) VALUES('2000-00-00 10... ^ CONTEXT: referenced column: v +ERROR: The value of month or day cannot be zero. +LINE 1: INSERT INTO t_NO_ZERO_DATE_datetime(v) VALUES('2000-00-00 10... + ^ +CONTEXT: referenced column: v INSERT INTO t_NO_ZERO_DATE_timestamp(v) VALUES('0000-00-00 10:01'); INSERT INTO t_NO_ZERO_DATE_timestamp(v) VALUES('0000-00-01 10:01'); WARNING: date/time field value out of range: "0000-00-01 10:01" @@ -2066,6 +2070,10 @@ WARNING: date/time field value out of range: "2000-00-00 10:01" LINE 1: INSERT INTO t_NO_ZERO_DATE_timestamp(v) VALUES('2000-00-00 1... ^ CONTEXT: referenced column: v +ERROR: The value of month or day cannot be zero. +LINE 1: INSERT INTO t_NO_ZERO_DATE_timestamp(v) VALUES('2000-00-00 1... + ^ +CONTEXT: referenced column: v SET dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group'; INSERT INTO t_NO_ZERO_DATE_date(v) VALUES(0); INSERT INTO t_NO_ZERO_DATE_date(v) VALUES('0000-00-00'); @@ -2131,22 +2139,20 @@ SELECT * FROM t_NO_ZERO_DATE_date order by v; SELECT * FROM t_NO_ZERO_DATE_datetime order by v; v --------------------- - 0000-00-00 00:00:00 0000-00-00 00:00:00 0000-00-00 00:00:00 0000-00-00 10:01:00 0000-00-00 10:01:00 -(5 rows) +(4 rows) SELECT * FROM t_NO_ZERO_DATE_timestamp order by v; v ------------------------------ - 0000-00-00 00:00:00+08:05:43 0000-00-00 00:00:00+08:05:43 0000-00-00 00:00:00+08:05:43 0000-00-00 10:01:00+08:05:43 0000-00-00 10:01:00+08:05:43 -(5 rows) +(4 rows) DROP TABLE t_NO_ZERO_DATE_date; DROP TABLE t_NO_ZERO_DATE_datetime; diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index bb90bde20..47d70e7f3 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -771,13 +771,8 @@ select time_bool('-838:59:59'); select timestamp('2022-05'); WARNING: invalid input syntax for type timestamp: "2022-05" CONTEXT: referenced column: timestamp -WARNING: date/time field value out of range +ERROR: The value of month or day cannot be zero. CONTEXT: referenced column: timestamp - timestamp ------------ - -(1 row) - select timestamp('2022-05-05'); timestamp --------------------- diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index 142c7fb06..5a7d8a39e 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -1614,6 +1614,67 @@ select * from t2; drop table if exists t1; drop table if exists t2; +-- 0 day or 0 month +select cast(10000 as datetime); +WARNING: timestamp out of range +CONTEXT: referenced column: timestamp +ERROR: The value of month or day cannot be zero. +CONTEXT: referenced column: timestamp +select cast(10100 as datetime); +ERROR: The value of month or day cannot be zero. +CONTEXT: referenced column: timestamp +select cast(10001 as datetime); +ERROR: The value of month or day cannot be zero. +CONTEXT: referenced column: timestamp +select cast(10000 as timestamp with time zone); +WARNING: timestamp out of range +CONTEXT: referenced column: timestamptz +ERROR: The value of month or day cannot be zero. +CONTEXT: referenced column: timestamptz +select cast(10100 as timestamp with time zone); +ERROR: The value of month or day cannot be zero. +CONTEXT: referenced column: timestamptz +select cast(10001 as timestamp with time zone); +ERROR: The value of month or day cannot be zero. +CONTEXT: referenced column: timestamptz +select cast('10000' as datetime); +WARNING: date/time field value out of range: "10000" +LINE 1: select cast('10000' as datetime); + ^ +CONTEXT: referenced column: timestamp +ERROR: The value of month or day cannot be zero. +LINE 1: select cast('10000' as datetime); + ^ +CONTEXT: referenced column: timestamp +select cast('10100' as datetime); +ERROR: The value of month or day cannot be zero. +LINE 1: select cast('10100' as datetime); + ^ +CONTEXT: referenced column: timestamp +select cast('10001' as datetime); +ERROR: The value of month or day cannot be zero. +LINE 1: select cast('10001' as datetime); + ^ +CONTEXT: referenced column: timestamp +select cast('10000' as timestamp with time zone); +WARNING: date/time field value out of range: "10000" +LINE 1: select cast('10000' as timestamp with time zone); + ^ +CONTEXT: referenced column: timestamptz +ERROR: The value of month or day cannot be zero. +LINE 1: select cast('10000' as timestamp with time zone); + ^ +CONTEXT: referenced column: timestamptz +select cast('10100' as timestamp with time zone); +ERROR: The value of month or day cannot be zero. +LINE 1: select cast('10100' as timestamp with time zone); + ^ +CONTEXT: referenced column: timestamptz +select cast('10001' as timestamp with time zone); +ERROR: The value of month or day cannot be zero. +LINE 1: select cast('10001' as timestamp with time zone); + ^ +CONTEXT: referenced column: timestamptz drop schema b_time_funcs3 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table func_test3 diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index af4fff1ad..6f526972b 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -159,6 +159,8 @@ static inline bool non_zero_date(const pg_tm *ltime) extern TimeADT adjust_time_range_with_warn(TimeADT time, bool can_ignore); extern "C" DLL_PUBLIC Datum time_cast_implicit(PG_FUNCTION_ARGS); +extern void check_zero_month_day(pg_tm *tm, bool can_ignore); + #endif extern Datum datetime_text(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 97e3218c5..3bab766f7 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -564,6 +564,11 @@ Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) Timestamp result; fsec_t fsec; struct pg_tm tt, *tm = &tt; +#ifdef DOLPHIN + error_t rc = EOK; + rc = memset_s(tm, sizeof(pg_tm), 0, sizeof(pg_tm)); + securec_check(rc, "\0", "\0"); +#endif int tz; int dtype; int nf; @@ -623,8 +628,14 @@ Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) } if (dterr != 0) { DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, is_date_sconst); +#ifdef DOLPHIN + check_zero_month_day(tm, fcinfo->can_ignore); +#endif PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } +#ifdef DOLPHIN + check_zero_month_day(tm, fcinfo->can_ignore); +#endif switch (dtype) { case DTK_DATE: if (tm2timestamp(tm, fsec, NULL, &result) != 0) @@ -901,6 +912,11 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t { Timestamp result; struct pg_tm tt, *tm = &tt; +#ifdef DOLPHIN + error_t rc = EOK; + rc = memset_s(tm, sizeof(pg_tm), 0, sizeof(pg_tm)); + securec_check(rc, "\0", "\0"); +#endif int tz; int level = can_ignore || !SQL_MODE_STRICT() ? WARNING : ERROR; if (ts < B_FORMAT_DATE_INT_MIN) { @@ -940,15 +956,20 @@ static Timestamp int64_b_format_timestamp_internal(bool hasTz, int64 ts, fsec_t time = ts % 1000000; /* extract time: hhmmss */ date = ts / 1000000; /* extract date: YYMMDD or YYYYMMDD */ } - if (int32_b_format_time_internal(tm, true, time, &fsec) || int32_b_format_date_internal(tm, date, true)){ + if (int32_b_format_time_internal(tm, true, time, &fsec) || int32_b_format_date_internal(tm, date, true)) { ereport(level, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); #ifdef DOLPHIN *time_error_type = TIME_INCORRECT; + check_zero_month_day(tm, can_ignore); #endif return TIMESTAMP_ZERO; } +#ifdef DOLPHIN + check_zero_month_day(tm, can_ignore); +#endif + if (hasTz) { /* b format timestamp type */ tz = DetermineTimeZoneOffset(tm, session_timezone); @@ -1005,7 +1026,8 @@ Datum timestamp_to_datum_with_null_result(PG_FUNCTION_ARGS, bool hasTz, int64 ts { TimeErrorType time_error_type = TIME_CORRECT; int ts_cnt = 0; - int64 result = integer_b_format_timestamp(hasTz, ts, fcinfo->can_ignore, &time_error_type, &ts_cnt); + /* only when datetime cmp will access here, so can_ignore can be set as true defaultly*/ + int64 result = integer_b_format_timestamp(hasTz, ts, true, &time_error_type, &ts_cnt); if (ts_cnt > TIMESTAMP_YYYYMMDDhhmmss_LEN) { PG_RETURN_TIMESTAMP(B_FORMAT_TIMESTAMP_MAX_VALUE); } @@ -1659,6 +1681,11 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr TimestampTz result; fsec_t fsec; struct pg_tm tt, *tm = &tt; +#ifdef DOLPHIN + error_t rc = EOK; + rc = memset_s(tm, sizeof(pg_tm), 0, sizeof(pg_tm)); + securec_check(rc, "\0", "\0"); +#endif int tz; int invalid_tz; int dtype; @@ -1707,9 +1734,13 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr DateTimeParseError(dterr, str, "timestamp", is_timestamptz_sconst || fcinfo->can_ignore); #ifdef DOLPHIN *time_error_type = TIME_INCORRECT; + check_zero_month_day(tm, fcinfo->can_ignore); #endif PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } +#ifdef DOLPHIN + check_zero_month_day(tm, fcinfo->can_ignore); +#endif switch (dtype) { case DTK_DATE: if (tm2timestamp(tm, fsec, &tz, &result) != 0) @@ -11733,6 +11764,17 @@ Datum timestamp_bool(PG_FUNCTION_ARGS) PG_RETURN_BOOL(tmp ? true : false); } + +void check_zero_month_day(pg_tm *tm, bool can_ignore) +{ + if (!can_ignore && (tm->tm_mon == 0 || tm->tm_mday == 0) + && tm->tm_year != 0) { + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("The value of month or day cannot be zero."))); + } +} + #endif #endif diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index 620477a9f..261d519f5 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -426,5 +426,20 @@ select * from t2; drop table if exists t1; drop table if exists t2; +-- 0 day or 0 month +select cast(10000 as datetime); +select cast(10100 as datetime); +select cast(10001 as datetime); +select cast(10000 as timestamp with time zone); +select cast(10100 as timestamp with time zone); +select cast(10001 as timestamp with time zone); + +select cast('10000' as datetime); +select cast('10100' as datetime); +select cast('10001' as datetime); +select cast('10000' as timestamp with time zone); +select cast('10100' as timestamp with time zone); +select cast('10001' as timestamp with time zone); + drop schema b_time_funcs3 cascade; reset current_schema; -- Gitee From 1375b27a76883aabe7dbac26bfdefce1a19293fd Mon Sep 17 00:00:00 2001 From: liruixiang <461834084@qq.com> Date: Fri, 5 Jan 2024 18:50:35 +0800 Subject: [PATCH 165/434] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=AF=B9drop=20table?= =?UTF-8?q?s=E7=9A=84=E8=AF=AD=E6=B3=95=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/drop_tables.out | 22 +++++++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_parser/gram.y | 1 + contrib/dolphin/sql/drop_tables.sql | 24 +++++++++++++++++++++++ 4 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 contrib/dolphin/expected/drop_tables.out create mode 100644 contrib/dolphin/sql/drop_tables.sql diff --git a/contrib/dolphin/expected/drop_tables.out b/contrib/dolphin/expected/drop_tables.out new file mode 100644 index 000000000..cb1ce694b --- /dev/null +++ b/contrib/dolphin/expected/drop_tables.out @@ -0,0 +1,22 @@ +create schema test_drop_tables; +set current_schema='test_drop_tables'; +create table t1(a int); +create table t2(a int); +create table t3(a int); +drop tables t1,t2; +drop tables t3; +create table test_if_exists(a int); +drop tables if exists test_if_exists,not_exist_table1,not_exist_table2; +NOTICE: table "not_exist_table1" does not exist, skipping +NOTICE: table "not_exist_table2" does not exist, skipping +drop table if exists test_if_exists; +NOTICE: table "test_if_exists" does not exist, skipping +create table test_direct_drop(a int); +drop tables test_direct_drop,not_exist_table1,not_exist_table2; +ERROR: table "not_exist_table1" does not exist +drop table if exists test_direct_drop; +create table test_restrict(a int); +drop table test_restrict restrict; +create table test_cascade(a int); +drop table test_cascade cascade; +DROP SCHEMA test_drop_tables CASCADE; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index 89ef39748..adb7b40b4 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -91,7 +91,7 @@ test: case_sensitive_test/alter_table case_sensitive_test/reindex_internal case_ test: case_sensitive_test/create_view1 case_sensitive_test/create_view2 case_sensitive_test/create_view3 case_sensitive_test/create_view4 case_sensitive_test/create_view5 case_sensitive_test/view_test -test: case_sensitive_test/drop +test: case_sensitive_test/drop drop_tables # This use case uses the \dn command to output all the schemas. # If not placed in a separate test case group, diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 1af7cd55b..f637a8a24 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -15919,6 +15919,7 @@ opt_temporary: | /* EMPTY */ { $$ = NULL; } ; dolphin_drop_type: opt_temporary TABLE { $$ = OBJECT_TABLE; } + | opt_temporary TABLES { $$ = OBJECT_TABLE; } | SCHEMA { $$ = OBJECT_SCHEMA; } | VIEW { $$ = OBJECT_VIEW; } diff --git a/contrib/dolphin/sql/drop_tables.sql b/contrib/dolphin/sql/drop_tables.sql new file mode 100644 index 000000000..76571cb27 --- /dev/null +++ b/contrib/dolphin/sql/drop_tables.sql @@ -0,0 +1,24 @@ +create schema test_drop_tables; +set current_schema='test_drop_tables'; + +create table t1(a int); +create table t2(a int); +create table t3(a int); +drop tables t1,t2; +drop tables t3; + +create table test_if_exists(a int); +drop tables if exists test_if_exists,not_exist_table1,not_exist_table2; +drop table if exists test_if_exists; + +create table test_direct_drop(a int); +drop tables test_direct_drop,not_exist_table1,not_exist_table2; +drop table if exists test_direct_drop; + +create table test_restrict(a int); +drop table test_restrict restrict; + +create table test_cascade(a int); +drop table test_cascade cascade; + +DROP SCHEMA test_drop_tables CASCADE; \ No newline at end of file -- Gitee From b4d2269a13daf228cf4079ee6e4ace5d1cf91722 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Mon, 8 Jan 2024 15:10:29 +0800 Subject: [PATCH 166/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dselect=20cast(3.14159265=20as=20datetime)?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E4=B8=8Bmysql=E6=98=BE=E7=A4=BANULL=EF=BC=8C?= =?UTF-8?q?OG=E6=98=BE=E7=A4=BA0=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E5=9C=A8times?= =?UTF-8?q?tamp=5Finternal=E5=8C=BA=E5=88=86=E6=9F=A5=E8=AF=A2=E8=BF=98?= =?UTF-8?q?=E6=98=AF=E9=9D=9E=E4=B8=A5=E6=A0=BC=E5=86=99=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=EF=BC=8C=E6=9F=A5=E8=AF=A2=E7=9A=84=E8=AF=9D=E8=BF=94=E5=9B=9E?= =?UTF-8?q?NULL=EF=BC=8C=E9=9D=9E=E4=B8=A5=E6=A0=BC=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=E7=9A=84=E8=AF=9D=E8=BF=94=E5=9B=9E0.=20=E3=80=90=E6=A0=B9?= =?UTF-8?q?=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20=E5=BD=93=E5=89=8DOpenG?= =?UTF-8?q?uass=E7=9A=84timestamp=5Finternal=E5=A6=82=E6=9E=9C=E9=81=87?= =?UTF-8?q?=E5=88=B0=E9=94=99=E8=AF=AF=EF=BC=8C=E5=B0=B1=E7=9B=B4=E6=8E=A5?= =?UTF-8?q?=E8=BF=94=E5=9B=9E0=E4=BA=86=EF=BC=8C=E4=BD=86=E6=98=AFmysql?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E7=9A=84=E8=AF=9D=E4=B8=80=E8=88=AC=E6=98=AF?= =?UTF-8?q?=E8=BF=94=E5=9B=9ENULL=EF=BC=8C=E9=9D=9E=E4=B8=A5=E6=A0=BC?= =?UTF-8?q?=E5=86=99=E7=9A=84=E6=A8=A1=E5=BC=8F=E4=B8=8B=E6=89=8D=E8=BF=94?= =?UTF-8?q?=E5=9B=9E0.=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88?= =?UTF-8?q?=E3=80=91:=20=E5=9C=A8timestamp=5Finternal=E5=8C=BA=E5=88=86?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E8=BF=98=E6=98=AF=E9=9D=9E=E4=B8=A5=E6=A0=BC?= =?UTF-8?q?=E5=86=99=E6=A8=A1=E5=BC=8F=EF=BC=8C=E6=9F=A5=E8=AF=A2=E7=9A=84?= =?UTF-8?q?=E8=AF=9D=E8=BF=94=E5=9B=9ENULL=EF=BC=8C=E9=9D=9E=E4=B8=A5?= =?UTF-8?q?=E6=A0=BC=E6=A8=A1=E5=BC=8F=E7=9A=84=E8=AF=9D=E8=BF=94=E5=9B=9E?= =?UTF-8?q?0.=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82=E6=88=96issu?= =?UTF-8?q?e=E3=80=91:=20https://e.gitee.com/opengaussorg/dashboard=3Fissu?= =?UTF-8?q?e=3DI8SZA8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 71 +++++++++++ .../dolphin/include/plugin_utils/timestamp.h | 7 +- .../dolphin/plugin_utils/adt/timestamp.cpp | 113 ++++++++++++++---- .../b_compatibility_time_funcs3.sql | 15 +++ 4 files changed, 183 insertions(+), 23 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index 5a7d8a39e..eba2785b0 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -1675,6 +1675,77 @@ ERROR: The value of month or day cannot be zero. LINE 1: select cast('10001' as timestamp with time zone); ^ CONTEXT: referenced column: timestamptz +set dolphin.b_compatibility_mode = true; +select cast(3.1415926 as datetime); +WARNING: invalid input syntax for type timestamp: "000003.1415926" +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select cast(3.1415926::float8 as datetime); +WARNING: invalid input syntax for type timestamp: "000003.1415926" +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select cast('3.1415926' as datetime); +WARNING: invalid input syntax for type timestamp: "3.1415926" +LINE 1: select cast('3.1415926' as datetime); + ^ +CONTEXT: referenced column: timestamp + timestamp +----------- + +(1 row) + +select cast(3.1415926 as timestamp with time zone); +WARNING: invalid input syntax for type timestamp: "000003.1415926" +CONTEXT: referenced column: timestamptz + timestamptz +------------- + +(1 row) + +select cast(3.1415926::float8 as timestamp with time zone); +WARNING: invalid input syntax for type timestamp: "000003.1415926" +CONTEXT: referenced column: timestamptz + timestamptz +------------- + +(1 row) + +select cast('3.1415926' as timestamp with time zone); +WARNING: invalid input syntax for type timestamp: "3.1415926" +LINE 1: select cast('3.1415926' as timestamp with time zone); + ^ +CONTEXT: referenced column: timestamptz + timestamptz +------------- + +(1 row) + +create table t1(a datetime, b timestamp with time zone); +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into t1 values ('3.1415926', '3.1415926'); +WARNING: invalid input syntax for type timestamp: "3.1415926" +LINE 1: insert into t1 values ('3.1415926', '3.1415926'); + ^ +CONTEXT: referenced column: a +WARNING: invalid input syntax for type timestamp: "3.1415926" +LINE 1: insert into t1 values ('3.1415926', '3.1415926'); + ^ +CONTEXT: referenced column: b +select * from t1; + a | b +---------------------+------------------------ + 0000-00-00 00:00:00 | 0000-00-00 00:00:00-08 +(1 row) + +drop table if exists t1; drop schema b_time_funcs3 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table func_test3 diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 6f526972b..dc1859c45 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -44,6 +44,8 @@ do { #define FRAC_PRECISION 6 #define TIMESTAMP_MAX_PRECISION 6 +#define TIMESTAMP_WITH_FORMAT_ARG_LEN 4 + #ifdef DOLPHIN #define B_MAX_NUMBER_DATETIME INT64CONST(99991231235959) /* 9999-12-31 23:59:59 */ #define B_NORMAL_NUMBER_DATETIME INT64CONST(10000101000000) /* 1000-01-01 00:00:00 */ @@ -137,7 +139,6 @@ extern bool datetime_in_with_sql_mode_internal(char *str, struct pg_tm *tm, fsec extern bool datetime_in_range(Timestamp datetime); extern int128 timestamp_int128(Timestamp timestamp); extern int128 timestamptz_int128(TimestampTz timestampTz); -extern Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst); extern TimestampTz time2timestamptz(TimeADT timeVal); extern TimestampTz timetz2timestamptz(TimeTzADT* timetzVal); typedef enum { @@ -146,8 +147,8 @@ typedef enum { TIME_INCORRECT } TimeErrorType; - -Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErrorType* time_error_type); +extern Datum timestamp_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type); +extern Datum timestamptz_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type); extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 3bab766f7..21dbd8185 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -333,6 +333,12 @@ extern "C" DLL_PUBLIC Datum timestamp_agg_finalfn(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(timestamp_cast); extern "C" DLL_PUBLIC Datum timestamp_cast(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(timestamp_explicit); +extern "C" DLL_PUBLIC Datum timestamp_explicit(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(timestamptz_explicit); +extern "C" DLL_PUBLIC Datum timestamptz_explicit(PG_FUNCTION_ARGS); + PG_FUNCTION_INFO_V1_PUBLIC(convert_datetime_double); extern "C" DLL_PUBLIC Datum convert_datetime_double(PG_FUNCTION_ARGS); @@ -540,19 +546,39 @@ bool TimestampTypeCheck(char* str, bool can_ignore, struct pg_tm* tm, Timestamp /* timestamp_in() * Convert a string to internal form. + * timestamp_in case: cast('xx' as datetime),datetime(xxx),xx::datetime + * timestamp_cast case: dattime'xxx' + * text_time_explicit case: cast(xx as datetime) */ Datum timestamp_in(PG_FUNCTION_ARGS) #ifdef DOLPHIN { - return timestamp_internal(fcinfo, false); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = timestamp_internal(fcinfo, TIME_IN, &time_error_type); + if ((fcinfo->ccontext == COERCION_IMPLICIT || fcinfo->ccontext == COERCION_EXPLICIT) && + time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return datum_internal; } Datum timestamp_cast(PG_FUNCTION_ARGS) { - return timestamp_internal(fcinfo, true); + TimeErrorType time_error_type = TIME_CORRECT; + return timestamp_internal(fcinfo, TIME_CAST, &time_error_type); } -Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) +Datum timestamp_explicit(PG_FUNCTION_ARGS) +{ + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = timestamp_internal(fcinfo, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return datum_internal; +} + +Datum timestamp_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type) #endif { char* str = PG_GETARG_CSTRING(0); @@ -581,7 +607,7 @@ Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) /* * this case is used for timestamp format is specified. */ - if (4 == PG_NARGS() && !is_date_sconst) { + if (TIMESTAMP_WITH_FORMAT_ARG_LEN == PG_NARGS() && time_cast_type == TIME_IN) { timestamp_fmt = PG_GETARG_CSTRING(3); if (timestamp_fmt == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), errmsg("specified timestamp format is null"))); @@ -611,7 +637,11 @@ Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) */ dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr != 0) { - DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, is_date_sconst); + DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, time_cast_type == TIME_CAST); +#ifdef DOLPHIN + *time_error_type = SQL_MODE_NOT_STRICT_ON_INSERT() || fcinfo->can_ignore ? + TIME_CORRECT : TIME_INCORRECT; +#endif /* * if error ignorable, function DateTimeParseError reports warning instead, then return current timestamp. */ @@ -627,9 +657,11 @@ Datum timestamp_internal(PG_FUNCTION_ARGS, bool is_date_sconst) } } if (dterr != 0) { - DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, is_date_sconst); + DateTimeParseErrorWithFlag(dterr, str, "timestamp", fcinfo->can_ignore, time_cast_type == TIME_CAST); #ifdef DOLPHIN check_zero_month_day(tm, fcinfo->can_ignore); + *time_error_type = SQL_MODE_NOT_STRICT_ON_INSERT() || fcinfo->can_ignore ? + TIME_CORRECT : TIME_INCORRECT; #endif PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } @@ -759,7 +791,14 @@ Datum float8_b_format_datetime(PG_FUNCTION_ARGS) char *str = DatumGetCString(DirectFunctionCall1(float8out, Float8GetDatum(n))); char buf[MAXDATELEN + 1]; fillZeroBeforeNumericTimestamp(str, buf); - return DirectFunctionCall3(timestamp_in, CStringGetDatum(buf), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + bool isRetNull = false; + Datum result = DirectCall3(&isRetNull, timestamp_explicit, InvalidOid, CStringGetDatum(buf), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (isRetNull) { + PG_RETURN_NULL(); + } else { + return result; + } } Datum float8_b_format_timestamp(PG_FUNCTION_ARGS) @@ -768,7 +807,14 @@ Datum float8_b_format_timestamp(PG_FUNCTION_ARGS) char *str = DatumGetCString(DirectFunctionCall1(float8out, Float8GetDatum(n))); char buf[MAXDATELEN + 1]; fillZeroBeforeNumericTimestamp(str, buf); - return DirectFunctionCall3(timestamptz_in, CStringGetDatum(buf), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + bool isRetNull = false; + Datum result = DirectCall3(&isRetNull, timestamptz_explicit, InvalidOid, CStringGetDatum(buf), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (isRetNull) { + PG_RETURN_NULL(); + } else { + return result; + } } Datum numeric_b_format_date(PG_FUNCTION_ARGS) @@ -785,7 +831,14 @@ Datum numeric_b_format_datetime(PG_FUNCTION_ARGS) char *str = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(n))); char buf[MAXDATELEN + 1]; fillZeroBeforeNumericTimestamp(str, buf); - return DirectFunctionCall3(timestamp_in, CStringGetDatum(buf), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + bool isRetNull = false; + Datum result = DirectCall3(&isRetNull, timestamp_explicit, InvalidOid, CStringGetDatum(buf), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (isRetNull) { + PG_RETURN_NULL(); + } else { + return result; + } } Datum numeric_b_format_timestamp(PG_FUNCTION_ARGS) @@ -794,7 +847,14 @@ Datum numeric_b_format_timestamp(PG_FUNCTION_ARGS) char *str = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(n))); char buf[MAXDATELEN + 1]; fillZeroBeforeNumericTimestamp(str, buf); - return DirectFunctionCall3(timestamptz_in, CStringGetDatum(buf), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + bool isRetNull = false; + Datum result = DirectCall3(&isRetNull, timestamptz_explicit, InvalidOid, CStringGetDatum(buf), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (isRetNull) { + PG_RETURN_NULL(); + } else { + return result; + } } #ifdef DOLPHIN @@ -813,7 +873,12 @@ int NumberTimestamp(char *str, pg_tm *tm, fsec_t *fsec) } /* validate len */ if (len > TIMESTAMP_YYYYMMDDhhmmss_LEN) { - ereport(ERROR, +#ifdef DOLPHIN + if (ENABLE_B_CMPT_MODE) + return DTERR_FIELD_OVERFLOW; + else +#endif + ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range: \"%s\"", str))); } /* treat as date */ @@ -885,6 +950,11 @@ int NumberTimestamp(char *str, pg_tm *tm, fsec_t *fsec) *tcp = '\0'; dterr = NumberTime(true, time_str, tm, fsec); if (dterr) { +#ifdef DOLPHIN + if (ENABLE_B_CMPT_MODE) + return dterr; + else +#endif ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range: \"%s\"", str))); } @@ -1651,7 +1721,7 @@ Datum timestamptz_in(PG_FUNCTION_ARGS) #ifdef DOLPHIN { TimeErrorType time_error_type = TIME_CORRECT; - Datum result = timestamptz_internal(fcinfo, false, &time_error_type); + Datum result = timestamptz_internal(fcinfo, TIME_IN, &time_error_type); if (time_error_type == TIME_INCORRECT && (fcinfo->ccontext == COERCION_IMPLICIT || fcinfo->ccontext == COERCION_EXPLICIT)) { PG_RETURN_NULL(); @@ -1659,17 +1729,17 @@ Datum timestamptz_in(PG_FUNCTION_ARGS) return result; } -Datum timestamptz_implicit(PG_FUNCTION_ARGS) +Datum timestamptz_explicit(PG_FUNCTION_ARGS) { TimeErrorType time_error_type = TIME_CORRECT; - Datum result = timestamptz_internal(fcinfo, true, &time_error_type); + Datum result = timestamptz_internal(fcinfo, TEXT_TIME_EXPLICIT, &time_error_type); if (time_error_type == TIME_INCORRECT) { PG_RETURN_NULL(); } return result; } -Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErrorType* time_error_type) +Datum timestamptz_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type) #endif { char* str = PG_GETARG_CSTRING(0); @@ -1711,12 +1781,12 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr } else { dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); if (dterr != 0) { - DateTimeParseError(dterr, str, "timestamp", is_timestamptz_sconst || fcinfo->can_ignore); + DateTimeParseError(dterr, str, "timestamp", (time_cast_type == TIME_CAST_IMPLICIT) || fcinfo->can_ignore); /* * if error ignorable, function DateTimeParseError reports warning instead, then return current timestamp. */ #ifdef DOLPHIN - *time_error_type = TIME_INCORRECT; + *time_error_type = SQL_MODE_NOT_STRICT_ON_INSERT() || fcinfo->can_ignore ? TIME_CORRECT : TIME_INCORRECT; #endif PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } @@ -1731,10 +1801,10 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr } } if (dterr != 0) { - DateTimeParseError(dterr, str, "timestamp", is_timestamptz_sconst || fcinfo->can_ignore); + DateTimeParseError(dterr, str, "timestamp", (time_cast_type == TIME_CAST_IMPLICIT) || fcinfo->can_ignore); #ifdef DOLPHIN - *time_error_type = TIME_INCORRECT; check_zero_month_day(tm, fcinfo->can_ignore); + *time_error_type = SQL_MODE_NOT_STRICT_ON_INSERT() || fcinfo->can_ignore ? TIME_CORRECT : TIME_INCORRECT; #endif PG_RETURN_TIMESTAMP(TIMESTAMP_ZERO); } @@ -1743,9 +1813,12 @@ Datum timestamptz_internal(PG_FUNCTION_ARGS, bool is_timestamptz_sconst, TimeErr #endif switch (dtype) { case DTK_DATE: - if (tm2timestamp(tm, fsec, &tz, &result) != 0) + if (tm2timestamp(tm, fsec, &tz, &result) != 0) { ereport(errlevel, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range: \"%s\"", str))); + *time_error_type = SQL_MODE_NOT_STRICT_ON_INSERT() || fcinfo->can_ignore ? + TIME_CORRECT : TIME_INCORRECT; + } break; case DTK_EPOCH: diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index 261d519f5..f24b8649c 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -441,5 +441,20 @@ select cast('10000' as timestamp with time zone); select cast('10100' as timestamp with time zone); select cast('10001' as timestamp with time zone); +set dolphin.b_compatibility_mode = true; +select cast(3.1415926 as datetime); +select cast(3.1415926::float8 as datetime); +select cast('3.1415926' as datetime); +select cast(3.1415926 as timestamp with time zone); +select cast(3.1415926::float8 as timestamp with time zone); +select cast('3.1415926' as timestamp with time zone); + +create table t1(a datetime, b timestamp with time zone); +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into t1 values ('3.1415926', '3.1415926'); +select * from t1; +drop table if exists t1; + + drop schema b_time_funcs3 cascade; reset current_schema; -- Gitee From 88fcfd9745cdee3039b4a9169fd6cc48758fd561 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Tue, 9 Jan 2024 15:24:37 +0800 Subject: [PATCH 167/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91=E5=A4=9A=E6=9C=BA=E5=B9=B6=E8=A1=8CDML?= =?UTF-8?q?=E6=80=A7=E8=83=BD=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/include/spq/spq_mutate.h | 1 + contrib/spq_plugin/src/guc_spq.cpp | 32 +++++---- contrib/spq_plugin/src/spq/spq_mutate.cpp | 60 +++++++++++++--- contrib/spq_plugin/src/spq/spq_plan.cpp | 4 +- .../src/parser/CParseHandlerHint.cpp | 14 +++- .../libspqopt/include/spqopt/engine/CHint.h | 71 +++++++++++++++++++ .../libspqopt/src/operators/CPhysicalDML.cpp | 51 ++++++++++++- .../spq_optimizer_util/utils/COptTasks.cpp | 16 +++++ 8 files changed, 221 insertions(+), 28 deletions(-) diff --git a/contrib/spq_plugin/include/spq/spq_mutate.h b/contrib/spq_plugin/include/spq/spq_mutate.h index 9b425161e..d8932c9af 100644 --- a/contrib/spq_plugin/include/spq/spq_mutate.h +++ b/contrib/spq_plugin/include/spq/spq_mutate.h @@ -23,6 +23,7 @@ typedef struct SpqSliceContext { PlannedStmt *result; int curentIndex; + int dmlcount; } SpqSliceContext; extern void collect_shareinput_producers(PlannerInfo *root, Plan *plan); diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 0cbaa49de..40029908a 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1199,6 +1199,26 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_remove_delete_redundant_motion", + "Remove delete redundant motion when delete_dop_num == read_dop_num.", + NULL, + &u_sess->attr.attr_spq.spq_enable_remove_delete_redundant_motion, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_remove_update_redundant_motion", + "Remove update redundant motion when update_dop_num == read_dop_num.", + NULL, + &u_sess->attr.attr_spq.spq_enable_remove_update_redundant_motion, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesInt() @@ -1419,18 +1439,6 @@ static void InitSpqConfigureNamesInt() NULL, NULL, NULL); - DefineCustomIntVariable("spqplugin.spq_wr_node_index", - "DML : Sets the write node' index according to cluster_map", - NULL, - &u_sess->attr.attr_spq.spq_wr_node_index, - 0, - 0, - 128, - PGC_USERSET, - 0, - NULL, - NULL, - NULL); } static void InitSpqConfigureNamesReal() diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index ed72c1130..74aa88b56 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -731,8 +731,14 @@ Plan* make_sort(Motion *motion, Plan *subplan) return (Plan*)node; } -Plan* make_dml_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedStmt *result) +Plan* make_dml_stream(PlannerInfo* root, Plan *subplan, Motion *motion, SpqSliceContext *cxt) { + PlannedStmt *result = cxt->result; + PlanSlice *slices = &(result->slices[motion->motionID]); + PlanSlice *parentSlices = &(result->slices[slices->parentIndex]); + if (check_slice_dop(slices, subplan, result) == false) { + ereport(ERROR, (errmsg("check_slice_dop in remote check fail motion[%d]", motion->motionID))); + } Stream* stream = makeNode(Stream); Plan* plan = &stream->scan.plan; Distribution* distribution = ng_get_dest_distribution(subplan); @@ -762,6 +768,7 @@ Plan* make_dml_stream(PlannerInfo* root, Plan *subplan, Motion *motion, PlannedS if (motion->sendSorted) { return make_sort(motion, (Plan*)stream); } + cxt->dmlcount = slices->numsegments > 1 ? cxt->dmlcount : 1; return (Plan*)stream; } @@ -874,9 +881,11 @@ Plan* make_gather_stream(PlannerInfo* root, Plan *subplan, Motion *motion, Plann } return (Plan*)stream_node; } -Plan *make_gather_remote_top(Plan *lefttree, PlannedStmt *result) +Plan *make_gather_remote_top(Plan *lefttree, int num_node) { - PlanSlice *slices = &(result->slices[0]); + if (num_node == 0) { + ereport(ERROR, (errmsg("make_gather_remote_top num node is 0"))); + } RemoteQuery* remote_query = makeNode(RemoteQuery); remote_query->combine_type = COMBINE_TYPE_NONE; remote_query->base_tlist = NIL; @@ -892,7 +901,7 @@ Plan *make_gather_remote_top(Plan *lefttree, PlannedStmt *result) remote_query->scan.plan.plan_width = lefttree->plan_width; remote_query->sort = NULL; remote_query->scan.plan.dop = 1; - remote_query->nodeCount = slices->numsegments > 1 ? t_thrd.spq_ctx.num_nodes : 1; + remote_query->nodeCount = num_node; return (Plan*)remote_query; } @@ -920,7 +929,7 @@ Plan *tran_motion_to_stream(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, } if (fromdml && backtop) { top = backtop; - return make_dml_stream(root, subplan, motion, cxt->result); + return make_dml_stream(root, subplan, motion, cxt); } // no need check motion->motionID again in below func; @@ -1025,10 +1034,38 @@ static void InitRemoteNodeDefinition(PlannedStmt* planstmt) planstmt->num_nodes = 0; return; } - int nodes_size = sizeof(NodeDefinition) * t_thrd.spq_ctx.num_nodes; - planstmt->num_nodes = t_thrd.spq_ctx.num_nodes; - planstmt->nodesDefinition = (NodeDefinition *)palloc0(nodes_size); - memcpy_s(planstmt->nodesDefinition, nodes_size, t_thrd.spq_ctx.nodesDefinition, nodes_size); + if ((planstmt->commandType == CMD_INSERT || planstmt->commandType == CMD_UPDATE || planstmt->commandType == CMD_DELETE) && + planstmt->write_node_index >= 0 && planstmt->write_node_index < t_thrd.spq_ctx.num_nodes && + IsA(planstmt->planTree, RemoteQuery) && ((RemoteQuery*)planstmt->planTree)->nodeCount == 1) { + RemoteQuery *remote_query = (RemoteQuery *) planstmt->planTree; + int nodes_size = sizeof(NodeDefinition); + planstmt->num_nodes = 1; + planstmt->nodesDefinition = (NodeDefinition *) palloc0(nodes_size); + memcpy_s(planstmt->nodesDefinition, nodes_size, &t_thrd.spq_ctx.nodesDefinition[planstmt->write_node_index], + nodes_size); + } else { + int nodes_size = sizeof(NodeDefinition) * t_thrd.spq_ctx.num_nodes; + planstmt->num_nodes = t_thrd.spq_ctx.num_nodes; + planstmt->nodesDefinition = (NodeDefinition *) palloc0(nodes_size); + memcpy_s(planstmt->nodesDefinition, nodes_size, t_thrd.spq_ctx.nodesDefinition, nodes_size); + } +} + +void set_write_node_index(PlannedStmt* planstmt) +{ + const char* write_node_name = GetConfigOption("pgxc_node_name", false, false); + int node_count = t_thrd.spq_ctx.num_nodes; + for (int i = 0; i < node_count; i++) { + const char* node_name = t_thrd.spq_ctx.nodesDefinition[i].nodename.data; + if (strcmp(node_name, write_node_name) == 0) { + planstmt->write_node_index = i; + break; + } + } + if (planstmt->write_node_index == -1) { + ereport(ERROR, (errmsg("cannot find write node in cluster_map"))); + } + } Plan *replace_motion_dml(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, bool &top) { @@ -1045,18 +1082,19 @@ Plan *replace_motion_dml(PlannerInfo* root, SpqSliceContext *cxt, Plan *plan, bo } subplan_id++; } - Plan *remote_query_plan = make_gather_remote_top(plan, cxt->result); ModifyTable* node = (ModifyTable*)plan; ListCell* l = NULL; foreach (l, node->plans) { Plan *subplan = (Plan*)lfirst(l); if (subplan) { + cxt->dmlcount = t_thrd.spq_ctx.num_nodes; subplan = replace_motion_stream_recurse(root, cxt, subplan, top, true); lfirst(l) = subplan; } } plan->dop = 1; plan->parallel_enabled = (plan->dop > 1); + Plan* remote_query_plan = make_gather_remote_top(plan, cxt->dmlcount); return remote_query_plan; } @@ -1073,12 +1111,14 @@ void make_spq_remote_query(PlannerInfo *root, PlannedStmt *result, PlannerGlobal SpqSliceContext sliceCxt; sliceCxt.result = result; sliceCxt.curentIndex = 0; + result->write_node_index = -1; /* whether select's part top stream has appeared, * top stream is to send scaning data to qc to modify table*/ if (root->parse->commandType == CMD_SELECT) { result->planTree = replace_motion_stream_recurse(root, &sliceCxt, result->planTree, top); } else { result->planTree = replace_motion_dml(root, &sliceCxt, result->planTree, top); + set_write_node_index(result); } // should fix all? //result->planTree = set_plan_references(root, result->planTree); diff --git a/contrib/spq_plugin/src/spq/spq_plan.cpp b/contrib/spq_plugin/src/spq/spq_plan.cpp index 1b8dde74d..88bc3f637 100644 --- a/contrib/spq_plugin/src/spq/spq_plan.cpp +++ b/contrib/spq_plugin/src/spq/spq_plan.cpp @@ -178,9 +178,7 @@ Node *plan_tree_mutator(Node *node, Node *(*mutator)(Node *, void *), void *cont FLATCOPY(newmt, mt, ModifyTable); PLANMUTATE(newmt, mt); MUTATE(newmt->plans, mt->plans, List *); - // MUTATE(newmt->onConflictSet, mt->onConflictSet, List *); - // MUTATE(newmt->onConflictWhere, mt->onConflictWhere , Node *); - // MUTATE(newmt->withCheckOptionLists, mt->withCheckOptionLists, List *); + MUTATE(newmt->withCheckOptionLists, mt->withCheckOptionLists, List *); MUTATE(newmt->returningLists, mt->returningLists, List *); return (Node *)newmt; } break; diff --git a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerHint.cpp b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerHint.cpp index fb0c53fd9..93a416a67 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerHint.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libnaucrates/src/parser/CParseHandlerHint.cpp @@ -21,6 +21,11 @@ using namespace spqdxl; using namespace spqopt; +#define DEFAULT_INSERT_DOP_NUM ULONG(1) +#define DEFAULT_UPDATE_DOP_NUM ULONG(1) +#define DEFAULT_SELECT_DOP_NUM ULONG(1) +#define DEFAULT_DELETE_DOP_NUM ULONG(1) + XERCES_CPP_NAMESPACE_USE //--------------------------------------------------------------------------- @@ -116,7 +121,14 @@ CParseHandlerHint::StartElement(const XMLCh *const, //element_uri, m_hint = SPQOS_NEW(m_mp) CHint( join_arity_for_associativity_commutativity, array_expansion_threshold, join_order_dp_threshold, broadcast_threshold, enforce_constraint_on_dml, - push_group_by_below_setop_threshold, xform_bind_threshold, skew_factor); + push_group_by_below_setop_threshold, xform_bind_threshold, + DEFAULT_INSERT_DOP_NUM, + DEFAULT_UPDATE_DOP_NUM, + DEFAULT_SELECT_DOP_NUM, + DEFAULT_DELETE_DOP_NUM, + true, + true, + skew_factor); } //--------------------------------------------------------------------------- diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h b/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h index e6d0fa153..c9bc801a3 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h @@ -21,6 +21,11 @@ #define XFORM_BIND_THRESHOLD ULONG(0) #define SKEW_FACTOR ULONG(0) +#define MAX_INSERT_DOP_NUM ULONG(128) +#define MAX_UPDATE_DOP_NUM ULONG(128) +#define MAX_SELECT_DOP_NUM ULONG(128) +#define MAX_DELETE_DOP_NUM ULONG(128) + namespace spqopt { @@ -51,6 +56,18 @@ private: ULONG m_ulXform_bind_threshold; + ULONG m_ulInsertDopNum; + + ULONG m_ulUpdateDopNum; + + ULONG m_ulSelectDopNum; + + ULONG m_ulDeleteDopNum; + + BOOL m_fRemoveUpdateRedundantMotion; + + BOOL m_fRemoveDeleteRedundantMotion; + // private copy ctor CHint(const CHint &); ULONG m_ulSkewFactor; @@ -61,6 +78,12 @@ public: ULONG array_expansion_threshold, ULONG ulJoinOrderDPLimit, ULONG broadcast_threshold, BOOL enforce_constraint_on_dml, ULONG push_group_by_below_setop_threshold, ULONG xform_bind_threshold, + ULONG insert_dop_num, + ULONG update_dop_num, + ULONG select_dop_num, + ULONG delete_dop_num, + BOOL remove_update_redundant_motion, + BOOL remove_delete_redundant_motion, ULONG skew_factor) : m_ulJoinArityForAssociativityCommutativity( join_arity_for_associativity_commutativity), @@ -71,6 +94,12 @@ public: m_ulPushGroupByBelowSetopThreshold( push_group_by_below_setop_threshold), m_ulXform_bind_threshold(xform_bind_threshold), + m_ulInsertDopNum(insert_dop_num), + m_ulUpdateDopNum(update_dop_num), + m_ulSelectDopNum(select_dop_num), + m_ulDeleteDopNum(delete_dop_num), + m_fRemoveUpdateRedundantMotion(remove_update_redundant_motion), + m_fRemoveDeleteRedundantMotion(remove_delete_redundant_motion), m_ulSkewFactor(skew_factor) { } @@ -129,6 +158,42 @@ public: return m_ulPushGroupByBelowSetopThreshold; } + ULONG + UlInsertDopNum() const + { + return m_ulInsertDopNum; + } + + ULONG + UlUpdateDopNum() const + { + return m_ulUpdateDopNum; + } + + ULONG + UlSelectDopNum() const + { + return m_ulSelectDopNum; + } + + ULONG + UlDeleteDopNum() const + { + return m_ulDeleteDopNum; + } + + BOOL + FRemoveUpdateRedundantMotion() const + { + return m_fRemoveUpdateRedundantMotion; + } + + BOOL + FRemoveDeleteRedundantMotion() const + { + return m_fRemoveDeleteRedundantMotion; + } + // Stop generating alternatives for group expression if bindings exceed this threshold ULONG UlXformBindThreshold() const @@ -156,6 +221,12 @@ public: true, /* enforce_constraint_on_dml */ PUSH_GROUP_BY_BELOW_SETOP_THRESHOLD, /* push_group_by_below_setop_threshold */ XFORM_BIND_THRESHOLD, /* xform_bind_threshold */ + MAX_INSERT_DOP_NUM, + MAX_UPDATE_DOP_NUM, + MAX_SELECT_DOP_NUM, + MAX_DELETE_DOP_NUM, + true, + true, SKEW_FACTOR /* skew_factor */ ); } diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalDML.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalDML.cpp index 9d5d66d30..20413001a 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalDML.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/src/operators/CPhysicalDML.cpp @@ -255,6 +255,8 @@ CPhysicalDML::PdsRequired(CMemoryPool *mp, ULONG // ulOptReq ) const { + COptimizerConfig *optimizer_config = + COptCtxt::PoctxtFromTLS()->GetOptimizerConfig(); SPQOS_ASSERT(0 == child_index); if (CDistributionSpec::EdtRandom == m_pds->Edt()) @@ -266,9 +268,54 @@ CPhysicalDML::PdsRequired(CMemoryPool *mp, // data inserted if (CLogicalDML::EdmlInsert == m_edmlop) { - return SPQOS_NEW(mp) CDistributionSpecStrictRandom(); + if (ULONG(1) == optimizer_config->GetHint()->UlInsertDopNum()) + return SPQOS_NEW(mp) CDistributionSpecSingleton(); + else + return SPQOS_NEW(mp) CDistributionSpecStrictRandom(); + } + else if (CLogicalDML::EdmlUpdate == m_edmlop) + { + bool remove_redundant_motion = optimizer_config->GetHint()->FRemoveUpdateRedundantMotion(); + ULONG update_dop_num = optimizer_config->GetHint()->UlUpdateDopNum(); + ULONG select_dop_num = optimizer_config->GetHint()->UlSelectDopNum(); + if (!remove_redundant_motion) + return SPQOS_NEW(mp) CDistributionSpecStrictRandom(); + else + { + if (update_dop_num == select_dop_num) + { + m_pds->AddRef(); + return m_pds; + } + else + return SPQOS_NEW(mp) CDistributionSpecStrictRandom(); + } + } + else if (CLogicalDML::EdmlDelete == m_edmlop) + { + /* delete */ + bool remove_redundant_motion = optimizer_config->GetHint()->FRemoveDeleteRedundantMotion(); + ULONG delete_dop_num = optimizer_config->GetHint()->UlDeleteDopNum(); + ULONG select_dop_num = optimizer_config->GetHint()->UlSelectDopNum(); + if (!remove_redundant_motion) + return SPQOS_NEW(mp) CDistributionSpecStrictRandom(); + else + { + if (delete_dop_num == select_dop_num) + { + m_pds->AddRef(); + return m_pds; + } + else + return SPQOS_NEW(mp) CDistributionSpecStrictRandom(); + } + } + else + { + SPQOS_RAISE( + CException::ExmaInvalid, CException::ExmiInvalid, + SPQOS_WSZ_LIT("Unknown DML type in CPhysicalDML.")); } - return SPQOS_NEW(mp) CDistributionSpecRouted(m_pcrSegmentId); } m_pds->AddRef(); diff --git a/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp b/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp index 90f7318db..f14d07d02 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/utils/COptTasks.cpp @@ -32,6 +32,8 @@ #include "spq_optimizer_util/utils/CConstExprEvaluatorProxy.h" #include "spq_optimizer_util/utils/spqdbdefs.h" +#include "spq/spq_util.h" + //#include "cdb/cdbvars.h" #include "utils/fmgroids.h" #include "utils/guc.h" @@ -387,6 +389,14 @@ COptTasks::CreateOptimizerConfig(CMemoryPool *mp, ICostModel *cost_model) ULONG push_group_by_below_setop_threshold = (ULONG) u_sess->attr.attr_spq.spq_optimizer_push_group_by_below_setop_threshold; ULONG xform_bind_threshold = (ULONG) u_sess->attr.attr_spq.spq_optimizer_xform_bind_threshold; + + ULONG insert_dop_num = (ULONG) u_sess->attr.attr_spq.spq_insert_dop_num; + ULONG update_dop_num = (ULONG) u_sess->attr.attr_spq.spq_update_dop_num; + ULONG select_dop_num = (ULONG) getSpqsegmentCount(); + ULONG delete_dop_num = (ULONG) u_sess->attr.attr_spq.spq_delete_dop_num; + BOOL remove_update_redundant_motion = u_sess->attr.attr_spq.spq_enable_remove_update_redundant_motion; + BOOL remove_delete_redundant_motion = u_sess->attr.attr_spq.spq_enable_remove_delete_redundant_motion; + ULONG skew_factor = (ULONG) u_sess->attr.attr_spq.spq_optimizer_skew_factor; return SPQOS_NEW(mp) COptimizerConfig( @@ -403,6 +413,12 @@ COptTasks::CreateOptimizerConfig(CMemoryPool *mp, ICostModel *cost_model) false, /* don't create Assert nodes for constraints, we'll * enforce them ourselves in the executor */ push_group_by_below_setop_threshold, xform_bind_threshold, + insert_dop_num, + update_dop_num, + select_dop_num, + delete_dop_num, + remove_update_redundant_motion, + remove_delete_redundant_motion, skew_factor), SPQOS_NEW(mp) CWindowOids(OID(F_WINDOW_ROW_NUMBER), OID(F_WINDOW_RANK))); } -- Gitee From ce4b73658bfbc0258413a9f525355d879ba4f90b Mon Sep 17 00:00:00 2001 From: lukeman Date: Mon, 8 Jan 2024 14:24:48 +0800 Subject: [PATCH 168/434] =?UTF-8?q?=E5=90=8C=E6=AD=A5server=E4=BB=93?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=EF=BC=9A36fa0a84=E5=92=8C9697e22d?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_parser/analyze.cpp | 40 ++++++++++++++++--- .../whale/include/plugin_parser/parsetree.h | 2 +- .../whale/plugin_parser/parse_relation.cpp | 5 ++- contrib/whale/plugin_utils/adt/ruleutils.cpp | 2 +- contrib/whale/tablecmds.cpp | 32 +++++++++++++-- 5 files changed, 68 insertions(+), 13 deletions(-) diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index 102e1c075..d03cb4a2c 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -1779,15 +1779,45 @@ static void SetUpsertAttrnoState(ParseState* pstate, List *targetList) } } +static bool isSubExprSupportRightRef(Node* node) +{ + if (!node) { + return true; + } + if (IsA(node, A_Expr)) { + A_Expr* expr = (A_Expr*)node; + return isSubExprSupportRightRef(expr->lexpr) && + isSubExprSupportRightRef(expr->rexpr); + } else if (IsA(node, SubLink)) { + SubLink* sl = (SubLink*)node; + if (sl->subselect) { + SelectStmt* subsel = (SelectStmt*)sl->subselect; + return (!subsel->whereClause || subsel->fromClause); + } + } + return true; +} + static RightRefState* MakeRightRefStateIfSupported(SelectStmt* selectStmt) { bool isSupported = DB_IS_CMPT(B_FORMAT) && selectStmt && selectStmt->valuesLists && !IsInitdb; - if (isSupported) { - RightRefState* refState = (RightRefState*)palloc0(sizeof(RightRefState)); - refState->isSupported = true; - return refState; + if (!isSupported) { + return nullptr; + } + ListCell* lc = NULL; + ListCell* lc2 = NULL; + foreach (lc, selectStmt->valuesLists) { + List* sublist = (List*)lfirst(lc); + foreach(lc2, sublist) { + Node* col = (Node*)lfirst(lc2); + if (!isSubExprSupportRightRef(col)) { + return nullptr; + } + } } - return nullptr; + RightRefState* refState = (RightRefState*)palloc0(sizeof(RightRefState)); + refState->isSupported = true; + return refState; } /* diff --git a/contrib/whale/include/plugin_parser/parsetree.h b/contrib/whale/include/plugin_parser/parsetree.h index 05daf9515..6f54f4281 100644 --- a/contrib/whale/include/plugin_parser/parsetree.h +++ b/contrib/whale/include/plugin_parser/parsetree.h @@ -41,7 +41,7 @@ * Given an RTE and an attribute number, return the appropriate * variable name or alias for that attribute of that RTE. */ -extern char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum); +extern char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum, bool allowDroppeed = false); /* * Given an RTE and an attribute number, return the appropriate diff --git a/contrib/whale/plugin_parser/parse_relation.cpp b/contrib/whale/plugin_parser/parse_relation.cpp index 0b4f64431..378d0f8a2 100644 --- a/contrib/whale/plugin_parser/parse_relation.cpp +++ b/contrib/whale/plugin_parser/parse_relation.cpp @@ -1182,6 +1182,7 @@ bool ValidateDependView(Oid view_oid, char objType) /* create or replace view */ if (objType == OBJECT_TYPE_VIEW) { ReplaceViewQueryFirstAfter(query_str); + CommandCounterIncrement(); } return isValid; } @@ -2652,7 +2653,7 @@ List* expandRelAttrs(ParseState* pstate, RangeTblEntry* rte, int rtindex, int su * * Must free the pointer after usage!!! */ -char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum) +char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum, bool allowDropped) { if (attnum == InvalidAttrNumber) { return pstrdup("*"); @@ -2672,7 +2673,7 @@ char* get_rte_attribute_name(RangeTblEntry* rte, AttrNumber attnum) * built (which can easily happen for rules). */ if (rte->rtekind == RTE_RELATION) { - return get_relid_attribute_name(rte->relid, attnum); + return get_relid_attribute_name(rte->relid, attnum, allowDropped); } /* diff --git a/contrib/whale/plugin_utils/adt/ruleutils.cpp b/contrib/whale/plugin_utils/adt/ruleutils.cpp index 26a873a4a..1f9931454 100644 --- a/contrib/whale/plugin_utils/adt/ruleutils.cpp +++ b/contrib/whale/plugin_utils/adt/ruleutils.cpp @@ -8781,7 +8781,7 @@ static char* get_variable( if (attnum == InvalidAttrNumber) attname = NULL; else - attname = get_rte_attribute_name(rte, attnum); + attname = get_rte_attribute_name(rte, attnum, true); if (refname && (context->varprefix || attname == NULL)) { if (schemaname != NULL) diff --git a/contrib/whale/tablecmds.cpp b/contrib/whale/tablecmds.cpp index 4df677356..8e33e8df3 100644 --- a/contrib/whale/tablecmds.cpp +++ b/contrib/whale/tablecmds.cpp @@ -11891,19 +11891,43 @@ void CheckPgRewriteWithDroppedColumn(Oid rel_oid, Oid rw_oid, Form_pg_attribute char *evActionString = TextDatumGetCString(evActiomDatum); List *evAction = (List *)stringToNode(evActionString); Query* query = (Query*)linitial(evAction); - // change query targetEntry + // change querytree's targetEntry and RTE ListCell* lc = NULL; foreach (lc, query->targetList) { TargetEntry* tle = (TargetEntry*)lfirst(lc); + Index rtevarno = 0; + AttrNumber rtevarattno = 0; if (nodeTag((Node*)tle->expr) == T_Var && tle->resorigtbl == rel_oid && - ((Var*)tle->expr)->varoattno == old_attnum) { + tle->resorigcol == old_attnum) { + ListCell* rtelc = NULL; + tle->resorigcol = attForm->attnum; Var *var = (Var *)tle->expr; - var->varattno = attForm->attnum; - var->varoattno = attForm->attnum; + rtevarno = var->varno; + rtevarattno = var->varattno; var->vartype = attForm->atttypid; var->vartypmod = attForm->atttypmod; + var->varcollid = attForm->attcollation; *attName = pstrdup(tle->resname); } + // change rtable entry + if (rtevarno == 0 || rtevarattno == 0) { + continue; + } + RangeTblEntry* rte = rt_fetch(rtevarno, query->rtable); + if (!rte || rte->alias != NULL || rte->rtekind != RTE_JOIN || rte->joinaliasvars == NIL) { + Var *var = (Var *)tle->expr; + var->varattno = attForm->attnum; + var->varoattno = attForm->attnum; + continue; + } + Var* aliasvar = (Var*)list_nth(rte->joinaliasvars, rtevarattno - 1); + if (IsA(aliasvar, Var)) { + aliasvar->varattno = attForm->attnum; + aliasvar->varoattno = attForm->attnum; + aliasvar->vartype = attForm->atttypid; + aliasvar->vartypmod = attForm->atttypmod; + aliasvar->varcollid = attForm->attcollation; + } } char* actiontree = nodeToString((Node*)evAction); HeapTuple new_dep_tuple; -- Gitee From 4b0d5ac24a310bb30f024fe7560edef4c7016db6 Mon Sep 17 00:00:00 2001 From: totaj Date: Tue, 9 Jan 2024 20:39:01 +0800 Subject: [PATCH 169/434] Fix comment and pk bug. --- contrib/dolphin/expected/b_comments.out | 32 +++++++- contrib/dolphin/expected/charset_gbk_b_db.out | 64 +++++++-------- .../dolphin/expected/charset_utf8mb4_b_db.out | 80 +++++++++--------- .../dolphin/output/view_definer_test.source | 18 ++--- contrib/dolphin/plugin_parser/gram.y | 81 ++++++++++--------- contrib/dolphin/sql/b_comments.sql | 10 ++- contrib/dolphin/sql/charset_gbk_b_db.sql | 26 +++--- contrib/dolphin/sql/charset_utf8mb4_b_db.sql | 26 +++--- 8 files changed, 188 insertions(+), 149 deletions(-) diff --git a/contrib/dolphin/expected/b_comments.out b/contrib/dolphin/expected/b_comments.out index 298ad48df..3b66ff57d 100644 --- a/contrib/dolphin/expected/b_comments.out +++ b/contrib/dolphin/expected/b_comments.out @@ -250,8 +250,36 @@ where pc.relname = 'uq_0034'; uq_index (1 row) +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +drop table if exists t2; +NOTICE: table "t2" does not exist, skipping +create table t1(id bigint not null comment 'pk' primary key); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" +create table t2(id bigint not null primary key comment 'pk' ); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t2_pkey" for table "t2" +\d+ t1 + Table "b_comments.t1" + Column | Type | Modifiers | Storage | Stats target | Description +--------+--------+-----------+---------+--------------+------------- + id | bigint | not null | plain | | pk +Indexes: + "t1_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default +Has OIDs: no +Options: orientation=row, compression=no + +\d+ t2 + Table "b_comments.t2" + Column | Type | Modifiers | Storage | Stats target | Description +--------+--------+-----------+---------+--------------+------------- + id | bigint | not null | plain | | pk +Indexes: + "t2_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default +Has OIDs: no +Options: orientation=row, compression=no + drop schema b_comments cascade; -NOTICE: drop cascades to 16 other objects +NOTICE: drop cascades to 18 other objects DETAIL: drop cascades to table test_unsupported drop cascades to table test_row drop cascades to table test_column @@ -268,4 +296,6 @@ drop cascades to table t_comment_0058_01 drop cascades to table fvt_distribute_query_tables_02 drop cascades to table t_comment_0032 drop cascades to table t_comment_0034 +drop cascades to table t1 +drop cascades to table t2 reset search_path; diff --git a/contrib/dolphin/expected/charset_gbk_b_db.out b/contrib/dolphin/expected/charset_gbk_b_db.out index de3a73464..cede06d51 100644 --- a/contrib/dolphin/expected/charset_gbk_b_db.out +++ b/contrib/dolphin/expected/charset_gbk_b_db.out @@ -988,16 +988,16 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk' -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT('高斯DB', trim('1.2.3')) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ - 高斯DB5.1.1 | utf8_general_ci + 高斯DB1.2.3 | gbk_chinese_ci (1 row) -SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), '高斯DB') result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ - 5.1.1高斯DB | utf8_general_ci + 1.2.3高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); @@ -1037,16 +1037,16 @@ SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); (1 row) -- -- -- -- diff charset -SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - ¸ß˹DB5.1.1 | utf8_general_ci +SELECT CONCAT(_utf8mb4'高斯DB', trim('1.2.3')) result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + ¸ß˹DB1.2.3 | utf8mb4_general_ci (1 row) -SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - 5.1.1¸ß˹DB | utf8_general_ci +SELECT CONCAT(trim('1.2.3'), _utf8mb4'高斯DB') result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + 1.2.3¸ß˹DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_utf8mb4'高斯DB', 123) result, pg_collation_for(result); @@ -1087,16 +1087,16 @@ SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, pg_collation_for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), trim('1.2.3')) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ - 高斯DB5.1.1 | utf8_general_ci + 高斯DB1.2.3 | gbk_chinese_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), CONCAT('高斯DB')) result, pg_collation_for(result); result | pg_collation_for ---------------+------------------ - 5.1.1高斯DB | utf8_general_ci + 1.2.3高斯DB | gbk_chinese_ci (1 row) SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); @@ -1136,16 +1136,16 @@ SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); (1 row) -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - ¸ß˹DB5.1.1 | utf8_general_ci +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), trim('1.2.3')) result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + ¸ß˹DB1.2.3 | utf8mb4_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - 5.1.1¸ß˹DB | utf8_general_ci +SELECT CONCAT(trim('1.2.3'), CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + 1.2.3¸ß˹DB | utf8mb4_general_ci (1 row) SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, pg_collation_for(result); @@ -2011,16 +2011,16 @@ SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FRO ERROR: collation mismatch between collations "utf8mb4_unicode_ci" and "utf8mb4_general_ci" LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+-------------------- - 高斯db5.1.1 | utf8mb4_unicode_ci + 高斯db1.2.3 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+-------------------- - 5.1.1高斯db | utf8mb4_unicode_ci + 1.2.3高斯db | utf8mb4_unicode_ci (1 row) SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2104,16 +2104,16 @@ SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) F ERROR: collation mismatch between collations "gb18030_bin" and "gbk_bin" LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+------------------ - 高斯db5.1.1 | gbk_chinese_ci + 高斯db1.2.3 | gbk_chinese_ci (1 row) -SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for ---------------+------------------ - 5.1.1高斯DB | gbk_bin + 1.2.3高斯DB | gbk_bin (1 row) SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index b089bebcb..e601ea26b 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -1381,16 +1381,16 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk' -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - 高斯DB5.1.1 | utf8_general_ci +SELECT CONCAT('高斯DB', trim('1.2.3')) result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + 高斯DB1.2.3 | utf8mb4_general_ci (1 row) -SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - 5.1.1高斯DB | utf8_general_ci +SELECT CONCAT(trim('1.2.3'), '高斯DB') result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + 1.2.3高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); @@ -1430,16 +1430,16 @@ SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); (1 row) -- -- -- -- diff charset -SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, pg_collation_for(result); - result | pg_collation_for ----------------+------------------ - 楂樻柉DB5.1.1 | utf8_general_ci +SELECT CONCAT(_gbk'高斯DB', trim('1.2.3')) result, pg_collation_for(result); + result | pg_collation_for +---------------+-------------------- + 楂樻柉DB1.2.3 | utf8mb4_general_ci (1 row) -SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ----------------+------------------ - 5.1.1楂樻柉DB | utf8_general_ci +SELECT CONCAT(trim('1.2.3'), _gbk'高斯DB') result, pg_collation_for(result); + result | pg_collation_for +---------------+-------------------- + 1.2.3楂樻柉DB | utf8mb4_general_ci (1 row) SELECT CONCAT(_gbk'高斯DB', 123) result, pg_collation_for(result); @@ -1480,16 +1480,16 @@ SELECT CONCAT(NULL, _gbk'高斯DB') result, pg_collation_for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - 高斯DB5.1.1 | utf8_general_ci +SELECT CONCAT(CONCAT('高斯DB'), trim('1.2.3')) result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + 高斯DB1.2.3 | utf8mb4_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); - result | pg_collation_for --------------+------------------ - 5.1.1高斯DB | utf8_general_ci +SELECT CONCAT(trim('1.2.3'), CONCAT('高斯DB')) result, pg_collation_for(result); + result | pg_collation_for +-------------+-------------------- + 1.2.3高斯DB | utf8mb4_general_ci (1 row) SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); @@ -1529,16 +1529,16 @@ SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); (1 row) -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, pg_collation_for(result); - result | pg_collation_for ----------------+------------------ - 楂樻柉DB5.1.1 | utf8_general_ci +SELECT CONCAT(CONCAT(_gbk'高斯DB'), trim('1.2.3')) result, pg_collation_for(result); + result | pg_collation_for +---------------+-------------------- + 楂樻柉DB1.2.3 | utf8mb4_general_ci (1 row) -SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); - result | pg_collation_for ----------------+------------------ - 5.1.1楂樻柉DB | utf8_general_ci +SELECT CONCAT(trim('1.2.3'), CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); + result | pg_collation_for +---------------+-------------------- + 1.2.3楂樻柉DB | utf8mb4_general_ci (1 row) SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, pg_collation_for(result); @@ -2962,16 +2962,16 @@ SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FRO 高斯db高斯db | (1 row) -SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- - 高斯db5.1.1 | utf8mb4_unicode_ci + 高斯db1.2.3 | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+-------------------- - 5.1.1高斯db | utf8mb4_unicode_ci + 1.2.3高斯db | utf8mb4_unicode_ci (1 row) SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -3075,16 +3075,16 @@ SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) F ERROR: collation mismatch between collations "gb18030_bin" and "gbk_bin" LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ - 高斯db5.1.1 | gbk_chinese_ci + 高斯db1.2.3 | gbk_chinese_ci (1 row) -SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for -------------+------------------ - 5.1.1高斯DB | gbk_bin + 1.2.3高斯DB | gbk_bin (1 row) SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; diff --git a/contrib/dolphin/output/view_definer_test.source b/contrib/dolphin/output/view_definer_test.source index d0455f956..ad1517ce5 100644 --- a/contrib/dolphin/output/view_definer_test.source +++ b/contrib/dolphin/output/view_definer_test.source @@ -62,10 +62,6 @@ SELECT c.relname as view_name, u.usename as rolname FROM pg_class c, pg_user u W -- dump all views \! @abs_bindir@/gs_dump test_db -p @portstring@ --include-depend-objs --exclude-self --disable-progress | grep -vE '^SET|^REVOKE|^GRANT|^--|^gs_dump|^COMMENT|^DROP'| tr -s '\n' > @abs_bindir@/definer_view_dump.sql 2>&1 -Begin scanning database. -Finish scanning database. -Start dumping objects -Finish dumping objects \! cat @abs_bindir@/definer_view_dump.sql CREATE SCHEMA "Test_User"; @@ -100,10 +96,9 @@ COPY public.tab_1107262 (id, c1) FROM stdin; \. ; \! @abs_bindir@/gs_dump test_db -p @portstring@ -F c -f @abs_bindir@/definer_view_dump.dmp --disable-progress -Begin scanning database. -Finish scanning database. -Start dumping objects -Finish dumping objects +--?gs_dump[port='@portstring@'][test_db].* +--?gs_dump[port='@portstring@'][test_db].* +--?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* --?gs_dump[port='@portstring@'][test_db].* @@ -145,10 +140,9 @@ create database test_1; create table startwith_t(id int, level int, connect_by_isleaf int, connect_by_iscycle int); create view startwith_v as select id, connect_by_isleaf as level, level as connect_by_isleaf from startwith_t; \! @abs_bindir@/gs_dump test_1 -p @portstring@ -f @abs_bindir@/dump_postgres.sql --disable-progress -Begin scanning database. -Finish scanning database. -Start dumping objects -Finish dumping objects +--?gs_dump[port='@portstring@'][test_1].* +--?gs_dump[port='@portstring@'][test_1].* +--?gs_dump[port='@portstring@'][test_1].* --?gs_dump[port='@portstring@'][test_1].* --?gs_dump[port='@portstring@'][test_1].* --?gs_dump[port='@portstring@'][test_1].* diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 019f3e1f3..5c68a826c 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -412,11 +412,11 @@ static List *mergeTableFuncParameters(List *func_args, List *columns); static TypeName *TableFuncTypeName(List *columns); static RangeVar *makeRangeVarFromAnyName(List *names, int position, core_yyscan_t yyscanner); static void SplitColQualList(List *qualList, - List **constraintList, CollateClause **collClause, + List **constraintList, CollateClause **collClause, List **columnOptions, core_yyscan_t yyscanner); static void SplitColQualList(List *qualList, List **constraintList, CollateClause **collClause, ClientLogicColumnRef **clientLogicColumnRef, - core_yyscan_t yyscanner); + List **columnOptions, core_yyscan_t yyscanner); static void processCASbits(int cas_bits, int location, const char *constrType, bool *deferrable, bool *initdeferred, bool *not_valid, bool *no_inherit, core_yyscan_t yyscanner); @@ -4768,10 +4768,10 @@ modify_column_cmds: | modify_column_cmds ',' modify_column_cmd { $$ = lappend($$, $3); } ; modify_column_cmd: - DolphinColColId Typename opt_charset ColQualList opt_column_options add_column_first_after + DolphinColColId Typename opt_charset ColQualList add_column_first_after { - AlterTableCmd *n = (AlterTableCmd *)$6; - if ($4 == NULL && $5 == NULL && n->is_first == false && n->after_name == NULL && !ENABLE_MODIFY_COLUMN) { + AlterTableCmd *n = (AlterTableCmd *)$5; + if ($4 == NULL && n->is_first == false && n->after_name == NULL && !ENABLE_MODIFY_COLUMN) { ColumnDef *def = makeNode(ColumnDef); n->subtype = AT_AlterColumnType; n->name = $1; @@ -4807,10 +4807,6 @@ modify_column_cmd: def->colname = $1; def->typname = $2; def->typname->charset = $3->charset; - def->columnOptions = $5; - if ($3->binary) { - def->columnOptions = lappend(def->columnOptions, makeString("binary")); - } def->kvtype = ATT_KV_UNDEFINED; def->inhcount = 0; def->is_local = true; @@ -4823,7 +4819,10 @@ modify_column_cmd: def->collOid = InvalidOid; def->fdwoptions = NULL; def->update_default = NULL; - SplitColQualList($4, &def->constraints, &def->collClause, &def->clientLogicColumnRef, yyscanner); + SplitColQualList($4, &def->constraints, &def->collClause, &def->clientLogicColumnRef, &def->columnOptions, yyscanner); + if ($3->binary) { + def->columnOptions = lappend(def->columnOptions, makeString("binary")); + } n->subtype = AT_ModifyColumn; n->name = $1; n->def = (Node *)def; @@ -5700,7 +5699,7 @@ alter_table_cmd: { $$ = $2; } - | MODIFY_P COLUMN DolphinColColId Typename opt_charset ColQualList opt_column_options add_column_first_after + | MODIFY_P COLUMN DolphinColColId Typename opt_charset ColQualList add_column_first_after { #ifdef ENABLE_MULTIPLE_NODES const char* message = "Un-support feature"; @@ -5721,10 +5720,6 @@ alter_table_cmd: def->colname = $3; def->typname = $4; def->typname->charset = $5->charset; - def->columnOptions = $7; - if ($5->binary) { - def->columnOptions = lappend(def->columnOptions, makeString("binary")); - } def->kvtype = ATT_KV_UNDEFINED; def->inhcount = 0; def->is_local = true; @@ -5737,14 +5732,17 @@ alter_table_cmd: def->cooked_default = NULL; def->collOid = InvalidOid; def->fdwoptions = NULL; - SplitColQualList($6, &def->constraints, &def->collClause, &def->clientLogicColumnRef, yyscanner); - AlterTableCmd *n = (AlterTableCmd *)$8; + SplitColQualList($6, &def->constraints, &def->collClause, &def->clientLogicColumnRef, &def->columnOptions, yyscanner); + if ($5->binary) { + def->columnOptions = lappend(def->columnOptions, makeString("binary")); + } + AlterTableCmd *n = (AlterTableCmd *)$7; n->subtype = AT_ModifyColumn; n->name = $3; n->def = (Node *)def; $$ = (Node *)n; } - | CHANGE opt_column DolphinColColId DolphinColColId Typename opt_charset ColQualList opt_column_options add_column_first_after + | CHANGE opt_column DolphinColColId DolphinColColId Typename opt_charset ColQualList add_column_first_after { #ifdef ENABLE_MULTIPLE_NODES const char* message = "Un-support feature"; @@ -5765,10 +5763,6 @@ alter_table_cmd: def->colname = $4; def->typname = $5; def->typname->charset = $6->charset; - def->columnOptions = $8; - if ($6->binary) { - def->columnOptions = lappend(def->columnOptions, makeString("binary")); - } def->kvtype = ATT_KV_UNDEFINED; def->inhcount = 0; def->is_local = true; @@ -5781,8 +5775,11 @@ alter_table_cmd: def->cooked_default = NULL; def->collOid = InvalidOid; def->fdwoptions = NULL; - SplitColQualList($7, &def->constraints, &def->collClause, &def->clientLogicColumnRef, yyscanner); - AlterTableCmd *n = (AlterTableCmd *)$9; + SplitColQualList($7, &def->constraints, &def->collClause, &def->clientLogicColumnRef, &def->columnOptions, yyscanner); + if ($6->binary) { + def->columnOptions = lappend(def->columnOptions, makeString("binary")); + } + AlterTableCmd *n = (AlterTableCmd *)$8; n->subtype = AT_ModifyColumn; n->name = $3; n->def = (Node *)def; @@ -9910,7 +9907,7 @@ ColIdForTableElement: DOLPHINIDENT { $$ = $1->str; } | col_name_keyword { $$ = pstrdup($1); } ; -columnDefForTableElement: ColIdForTableElement Typename opt_charset KVType ColCmprsMode create_generic_options ColQualList opt_column_options +columnDefForTableElement: ColIdForTableElement Typename opt_charset KVType ColCmprsMode create_generic_options ColQualList { ColumnDef *n = makeNode(ColumnDef); n->colname = $1; @@ -9929,13 +9926,12 @@ columnDefForTableElement: ColIdForTableElement Typename opt_charset KVType ColCm n->collOid = InvalidOid; n->fdwoptions = $6; if ($4 == ATT_KV_UNDEFINED) { - SplitColQualList($7, &n->constraints, &n->collClause, &n->clientLogicColumnRef, + SplitColQualList($7, &n->constraints, &n->collClause, &n->clientLogicColumnRef, &n->columnOptions, yyscanner); } else { - SplitColQualList($7, &n->constraints, &n->collClause, + SplitColQualList($7, &n->constraints, &n->collClause, &n->columnOptions, yyscanner); } - n->columnOptions = $8; if ($3->binary) { n->columnOptions = lappend(n->columnOptions, makeString("binary")); } @@ -9943,7 +9939,7 @@ columnDefForTableElement: ColIdForTableElement Typename opt_charset KVType ColCm } ; -columnDef: DolphinColColId Typename opt_charset KVType ColCmprsMode create_generic_options ColQualList opt_column_options +columnDef: DolphinColColId Typename opt_charset KVType ColCmprsMode create_generic_options ColQualList { ColumnDef *n = makeNode(ColumnDef); n->colname = $1; @@ -9962,13 +9958,12 @@ columnDef: DolphinColColId Typename opt_charset KVType ColCmprsMode create_gener n->collOid = InvalidOid; n->fdwoptions = $6; if ($4 == ATT_KV_UNDEFINED) { - SplitColQualList($7, &n->constraints, &n->collClause, &n->clientLogicColumnRef, + SplitColQualList($7, &n->constraints, &n->collClause, &n->clientLogicColumnRef, &n->columnOptions, yyscanner); } else { - SplitColQualList($7, &n->constraints, &n->collClause, + SplitColQualList($7, &n->constraints, &n->collClause, &n->columnOptions, yyscanner); } - n->columnOptions = $8; if ($3->binary) { n->columnOptions = lappend(n->columnOptions, makeString("binary")); } @@ -10043,7 +10038,7 @@ columnOptions: ColId WITH OPTIONS ColQualList n->raw_default = NULL; n->cooked_default = NULL; n->collOid = InvalidOid; - SplitColQualList($4, &n->constraints, &n->collClause, &n->clientLogicColumnRef, + SplitColQualList($4, &n->constraints, &n->collClause, &n->clientLogicColumnRef, &n->columnOptions, yyscanner); $$ = (Node *)n; } @@ -10113,6 +10108,10 @@ ColConstraint: n->cooked_expr = NULL; $$ = (Node *)n; } + | column_option + { + $$ = $1; + } ; with_algorithm: WITH '(' algorithm_desc ')' @@ -13879,7 +13878,7 @@ ForeignColDef: ColId Typename ForeignPosition create_generic_options ColQualLis n->collOid = InvalidOid; n->fdwoptions = $4; n->clientLogicColumnRef=NULL; - SplitColQualList($5, &n->constraints, &n->collClause, + SplitColQualList($5, &n->constraints, &n->collClause, &n->columnOptions, yyscanner); if ($3) { @@ -25710,7 +25709,7 @@ CreateDomainStmt: CreateDomainStmt *n = makeNode(CreateDomainStmt); n->domainname = $3; n->typname = $5; - SplitColQualList($6, &n->constraints, &n->collClause, + SplitColQualList($6, &n->constraints, &n->collClause, NULL, yyscanner); $$ = (Node *)n; } @@ -39290,7 +39289,7 @@ makeRangeVarFromAnyName(List *names, int position, core_yyscan_t yyscanner) /* Separate Constraint nodes from COLLATE clauses in a ColQualList */ static void SplitColQualList(List *qualList, - List **constraintList, CollateClause **collClause, + List **constraintList, CollateClause **collClause, List **columnOptions, core_yyscan_t yyscanner) { ListCell *cell; @@ -39333,6 +39332,10 @@ SplitColQualList(List *qualList, errcause("client encryption feature is not supported this operation."), erraction("Check client encryption feature whether supported this operation."))); } + else if (IsA(n, CommentStmt) && columnOptions != NULL) + { + *columnOptions = lappend(*columnOptions, n); + } else { const char* message = "unexpected node type"; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); @@ -39349,7 +39352,7 @@ SplitColQualList(List *qualList, /* Separate Constraint nodes from COLLATE clauses in a ColQualList */ static void SplitColQualList(List *qualList, - List **constraintList, CollateClause **collClause,ClientLogicColumnRef **clientLogicColumnRef, + List **constraintList, CollateClause **collClause,ClientLogicColumnRef **clientLogicColumnRef, List **columnOptions, core_yyscan_t yyscanner) { ListCell *cell; @@ -39398,6 +39401,10 @@ SplitColQualList(List *qualList, } *clientLogicColumnRef = e; } + else if (IsA(n, CommentStmt)) + { + *columnOptions = lappend(*columnOptions, n); + } else { const char* message = "unexpected node type"; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); diff --git a/contrib/dolphin/sql/b_comments.sql b/contrib/dolphin/sql/b_comments.sql index 3233dfd4e..1ada076e9 100644 --- a/contrib/dolphin/sql/b_comments.sql +++ b/contrib/dolphin/sql/b_comments.sql @@ -177,5 +177,13 @@ from pg_description pd join pg_class pc on pd.objoid = pc.oid where pc.relname = 'uq_0034'; + +drop table if exists t1; +drop table if exists t2; +create table t1(id bigint not null comment 'pk' primary key); +create table t2(id bigint not null primary key comment 'pk' ); +\d+ t1 +\d+ t2 + drop schema b_comments cascade; -reset search_path; \ No newline at end of file +reset search_path; diff --git a/contrib/dolphin/sql/charset_gbk_b_db.sql b/contrib/dolphin/sql/charset_gbk_b_db.sql index b3dc5799f..bb259d7ed 100644 --- a/contrib/dolphin/sql/charset_gbk_b_db.sql +++ b/contrib/dolphin/sql/charset_gbk_b_db.sql @@ -207,8 +207,8 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), '高斯DB') result, pg_collation_for(result); SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); SELECT CONCAT(123, '高斯DB') result, pg_collation_for(result); SELECT CONCAT('高斯DB', DATE '2023-05-01') result, pg_collation_for(result); @@ -216,8 +216,8 @@ SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, pg_collation_for(result); SELECT CONCAT('高斯DB', NULL) result, pg_collation_for(result); SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(_utf8mb4'高斯DB', opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), _utf8mb4'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), _utf8mb4'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB', 123) result, pg_collation_for(result); SELECT CONCAT(123, _utf8mb4'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB', DATE '2023-05-01') result, pg_collation_for(result); @@ -226,8 +226,8 @@ SELECT CONCAT(_utf8mb4'高斯DB', NULL) result, pg_collation_for(result); SELECT CONCAT(NULL, _utf8mb4'高斯DB') result, pg_collation_for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), CONCAT('高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); SELECT CONCAT(123, CONCAT('高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); @@ -235,8 +235,8 @@ SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, pg_collation_for(re SELECT CONCAT(CONCAT('高斯DB'), NULL) result, pg_collation_for(result); SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), 123) result, pg_collation_for(result); SELECT CONCAT(123, CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT(_utf8mb4'高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); @@ -480,8 +480,8 @@ SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT('高斯DB', fgbk_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -499,8 +499,8 @@ SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), fgbk_chi COLLATE gbk_chinese_ci) res SELECT CONCAT(fgbk_chi COLLATE gbk_chinese_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict in concat inside SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT('高斯DB', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(fgbk_chi, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -1006,4 +1006,4 @@ reset enable_seqscan; DROP TABLE t_diff_charset_columns; -\c postgres \ No newline at end of file +\c postgres diff --git a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql index 86fee6963..ae721ba94 100644 --- a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql +++ b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql @@ -336,8 +336,8 @@ SELECT CONCAT(_utf8mb4'楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB')) SELECT CONCAT(_utf8mb4'楂樻柉DB楂樻柉DB' COLLATE utf8mb4_bin, CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci)) result, pg_collation_for(result); -- -- -- const CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT('高斯DB', opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), '高斯DB') result, pg_collation_for(result); +SELECT CONCAT('高斯DB', trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), '高斯DB') result, pg_collation_for(result); SELECT CONCAT('高斯DB', 123) result, pg_collation_for(result); SELECT CONCAT(123, '高斯DB') result, pg_collation_for(result); SELECT CONCAT('高斯DB', DATE '2023-05-01') result, pg_collation_for(result); @@ -345,8 +345,8 @@ SELECT CONCAT(DATE '2023-05-01', '高斯DB') result, pg_collation_for(result); SELECT CONCAT('高斯DB', NULL) result, pg_collation_for(result); SELECT CONCAT(NULL, '高斯DB') result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(_gbk'高斯DB', opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB', trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB', 123) result, pg_collation_for(result); SELECT CONCAT(123, _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB', DATE '2023-05-01') result, pg_collation_for(result); @@ -355,8 +355,8 @@ SELECT CONCAT(_gbk'高斯DB', NULL) result, pg_collation_for(result); SELECT CONCAT(NULL, _gbk'高斯DB') result, pg_collation_for(result); -- -- -- CONCAT CONCAT with other diff DERIVATION -- -- -- -- same charset -SELECT CONCAT(CONCAT('高斯DB'), opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), CONCAT('高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT('高斯DB'), trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), CONCAT('高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT('高斯DB'), 123) result, pg_collation_for(result); SELECT CONCAT(123, CONCAT('高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT('高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); @@ -364,8 +364,8 @@ SELECT CONCAT(DATE '2023-05-01', CONCAT('高斯DB')) result, pg_collation_for(re SELECT CONCAT(CONCAT('高斯DB'), NULL) result, pg_collation_for(result); SELECT CONCAT(NULL, CONCAT('高斯DB')) result, pg_collation_for(result); -- -- -- -- diff charset -SELECT CONCAT(CONCAT(_gbk'高斯DB'), opengauss_version()) result, pg_collation_for(result); -SELECT CONCAT(opengauss_version(), CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(_gbk'高斯DB'), trim('1.2.3')) result, pg_collation_for(result); +SELECT CONCAT(trim('1.2.3'), CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT(_gbk'高斯DB'), 123) result, pg_collation_for(result); SELECT CONCAT(123, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); SELECT CONCAT(CONCAT(_gbk'高斯DB'), DATE '2023-05-01') result, pg_collation_for(result); @@ -839,8 +839,8 @@ SELECT CONCAT(CONCAT(futf8_gen, futf8_uni), futf8_bin collate utf8mb4_unicode_ci SELECT CONCAT(futf8_bin collate utf8mb4_unicode_ci, CONCAT(futf8_gen, futf8_uni)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- utf8mb4_unicode_ci SELECT CONCAT(CONCAT(futf8_gen), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict SELECT CONCAT(futf8_uni, CONCAT(futf8_gen)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict -SELECT CONCAT(futf8_uni, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT('高斯DB', futf8_uni) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -860,8 +860,8 @@ SELECT CONCAT(CONCAT(futf8_gen, fgbk_chi), futf8_uni) result, pg_collation_for(r SELECT CONCAT(futf8_uni, CONCAT(futf8_gen, fgbk_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; -- conflict SELECT CONCAT(fgbk_chi, CONCAT(fgb18030_chi)) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(CONCAT(fgb18030_bin), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_chi, opengauss_version()) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(opengauss_version(), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_chi, trim('1.2.3')) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(trim('1.2.3'), fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(fgbk_chi, '高斯DB') result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT('高斯DB', fgbk_bin) result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(fgbk_chi, 123) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -1571,4 +1571,4 @@ reset enable_seqscan; DROP TABLE t_diff_charset_columns; -\c postgres \ No newline at end of file +\c postgres -- Gitee From 2ae41dc2e3f370867614332e5f33008dcda1ee13 Mon Sep 17 00:00:00 2001 From: lukeman Date: Tue, 9 Jan 2024 22:11:54 +0800 Subject: [PATCH 170/434] =?UTF-8?q?=E5=A4=84=E7=90=86issue:=20=E6=94=AF?= =?UTF-8?q?=E6=8C=81convert(1,=20UNSIGNED=20INT)=E5=92=8C=20cast(1=20as=20?= =?UTF-8?q?UNSIGNED=20INT)=20=E8=AF=AD=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/uint_cast3.out | 24 ++++++++ contrib/dolphin/plugin_parser/gram.y | 76 ++++++++++++++----------- contrib/dolphin/sql/uint_cast3.sql | 5 ++ 3 files changed, 73 insertions(+), 32 deletions(-) diff --git a/contrib/dolphin/expected/uint_cast3.out b/contrib/dolphin/expected/uint_cast3.out index 68fac2789..630f4d73e 100644 --- a/contrib/dolphin/expected/uint_cast3.out +++ b/contrib/dolphin/expected/uint_cast3.out @@ -566,6 +566,30 @@ select * from t_uint; (1 row) drop table t_uint; +select cast(1 as signed int); + int8 +------ + 1 +(1 row) + +select cast(1 as unsigned int); + uint8 +------- + 1 +(1 row) + +select convert(1 , signed int); + int8 +------ + 1 +(1 row) + +select convert(1 , unsigned int); + uint8 +------- + 1 +(1 row) + select cast(1 as signed integer); int8 ------ diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 019f3e1f3..5b52a3332 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -34340,30 +34340,38 @@ func_expr_common_subexpr: { $$ = makeTypeCast($3, $5, @1); } - | CONVERT '(' a_expr ',' UNSIGNED INTEGER ')' - { - $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); - } - | CONVERT '(' a_expr ',' SIGNED INTEGER ')' - { - $$ = makeTypeCast($3, SystemTypeName("int8"), @1); - } - | CONVERT '(' a_expr ',' INTEGER UNSIGNED ')' - { - $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); - } - | CONVERT '(' a_expr ',' INTEGER SIGNED ')' - { - $$ = makeTypeCast($3, SystemTypeName("int8"), @1); - } - | CONVERT '(' a_expr ',' UNSIGNED ')' - { - $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); - } - | CONVERT '(' a_expr ',' SIGNED ')' - { - $$ = makeTypeCast($3, SystemTypeName("int8"), @1); - } + | CONVERT '(' a_expr ',' UNSIGNED INTEGER ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' SIGNED INTEGER ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } + | CONVERT '(' a_expr ',' INTEGER UNSIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' INTEGER SIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } + | CONVERT '(' a_expr ',' UNSIGNED INT_P ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' SIGNED INT_P ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } + | CONVERT '(' a_expr ',' UNSIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); + } + | CONVERT '(' a_expr ',' SIGNED ')' + { + $$ = makeTypeCast($3, SystemTypeName("int8"), @1); + } | CURRENT_TIME { FuncCall *n = makeNode(FuncCall); @@ -34860,14 +34868,18 @@ func_expr_common_subexpr: { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } | CAST '(' a_expr AS SIGNED ')' { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } - | CAST '(' a_expr AS UNSIGNED INTEGER ')' - { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } - | CAST '(' a_expr AS SIGNED INTEGER ')' - { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } - | CAST '(' a_expr AS INTEGER UNSIGNED ')' - { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } - | CAST '(' a_expr AS INTEGER SIGNED ')' - { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } + | CAST '(' a_expr AS UNSIGNED INTEGER ')' + { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } + | CAST '(' a_expr AS SIGNED INTEGER ')' + { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } + | CAST '(' a_expr AS UNSIGNED INT_P ')' + { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } + | CAST '(' a_expr AS SIGNED INT_P ')' + { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } + | CAST '(' a_expr AS INTEGER UNSIGNED ')' + { $$ = makeTypeCast($3, SystemTypeName("uint8"), @1); } + | CAST '(' a_expr AS INTEGER SIGNED ')' + { $$ = makeTypeCast($3, SystemTypeName("int8"), @1); } | EXTRACT '(' extract_list ')' { FuncCall *n = makeNode(FuncCall); diff --git a/contrib/dolphin/sql/uint_cast3.sql b/contrib/dolphin/sql/uint_cast3.sql index ea2f39eb2..81e8a82b6 100644 --- a/contrib/dolphin/sql/uint_cast3.sql +++ b/contrib/dolphin/sql/uint_cast3.sql @@ -110,6 +110,11 @@ insert into t_uint values('-0', '-0', '-0', '-0'); select * from t_uint; drop table t_uint; +select cast(1 as signed int); +select cast(1 as unsigned int); +select convert(1 , signed int); +select convert(1 , unsigned int); + select cast(1 as signed integer); select cast(1 as unsigned integer); select convert(1 , signed integer); -- Gitee From dd67b9063c837cd1d87cd41152203a4f647daaf2 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Wed, 10 Jan 2024 10:44:00 +0800 Subject: [PATCH 171/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E6=94=AF=E6=8C=81Direct=20Read=E7=89=B9?= =?UTF-8?q?=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../spq_plugin/src/executor/spq_seqscan.cpp | 470 +++++++++++++++++- contrib/spq_plugin/src/guc_spq.cpp | 22 + .../translate/CTranslatorDXLToPlStmt.cpp | 6 +- contrib/spq_plugin/src/spqplugin.cpp | 109 ++++ 4 files changed, 597 insertions(+), 10 deletions(-) diff --git a/contrib/spq_plugin/src/executor/spq_seqscan.cpp b/contrib/spq_plugin/src/executor/spq_seqscan.cpp index 5f42afc3d..ff21e7fc3 100644 --- a/contrib/spq_plugin/src/executor/spq_seqscan.cpp +++ b/contrib/spq_plugin/src/executor/spq_seqscan.cpp @@ -31,6 +31,10 @@ #include "libpq/pqformat.h" #include "libpq/libpq.h" #include "executor/spq_seqscan.h" +#include "access/csnlog.h" +#include "utils/snapmgr.h" +#include "storage/procarray.h" +#include "ddes/dms/ss_transaction.h" #define DECOMPRESS_HEAP_TUPLE(_isCompressed, _heapTuple, _destTupleData, _rd_att, _heapPage) \ do { \ @@ -45,6 +49,7 @@ #define BLOCKSIZE (8 * 1024) constexpr int FETCH_BLOCK_NUM = 128; +constexpr int FETCH_BLOCK_NUM_DIRECT = 512; constexpr int MAX_ENQUEUE_TIME = 3; constexpr int PAGE_QUEUE_SIZE = 2048; enum SpqState { @@ -100,6 +105,327 @@ public: } }; +static bool DirectReadXidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, bool* sync) +{ + volatile CommitSeqNo csn; + bool looped = false; + TransactionId parentXid = InvalidTransactionId; + +#ifdef XIDVIS_DEBUG + ereport(DEBUG1, + (errmsg("DirectReadXidVisibleInSnapshot xid %ld cur_xid %ld snapshot csn %lu xmax %ld", + xid, + GetCurrentTransactionIdIfAny(), + snapshot->snapshotcsn, + snapshot->xmax))); +#endif + +loop: + if (ENABLE_DMS) { + /* fetch TXN info locally if either reformer, original primary, or normal primary */ + if (SS_PRIMARY_MODE || SS_OFFICIAL_PRIMARY) { + csn = TransactionIdGetCommitSeqNo(xid, false, true, false, snapshot); + } else { + csn = SSTransactionIdGetCommitSeqNo(xid, false, true, false, snapshot, sync); + } + } else { + csn = TransactionIdGetCommitSeqNo(xid, false, true, false, snapshot); + } + +#ifdef XIDVIS_DEBUG + ereport(DEBUG1, + (errmsg("DirectReadXidVisibleInSnapshot xid %ld cur_xid %ld csn %ld snapshot" + "csn %ld xmax %ld", + xid, + GetCurrentTransactionIdIfAny(), + csn, + snapshot->snapshotcsn, + snapshot->xmax))); +#endif + + if (COMMITSEQNO_IS_COMMITTED(csn)) { + if (csn < snapshot->snapshotcsn) + return true; + else + return false; + } else if (COMMITSEQNO_IS_COMMITTING(csn)) { + /* SS master node would've already sync-waited, so this should never happen */ + if (SS_STANDBY_MODE) { + ereport(FATAL, (errmsg("SS xid %lu's csn %lu is still COMMITTING after Master txn waited.", xid, csn))); + } + if (looped) { + ereport(DEBUG1, (errmsg("transaction id %lu's csn %ld may ABORT but direct read can't change.", xid, csn))); + return false; + } else { + if (!COMMITSEQNO_IS_SUBTRANS(csn)) { + /* If snapshotcsn lower than csn stored in csn log, don't need to wait. */ + CommitSeqNo latestCSN = GET_COMMITSEQNO(csn); + if (latestCSN >= snapshot->snapshotcsn) { + ereport(DEBUG1, + (errmsg( + "snapshotcsn %lu lower than csn %lu stored in csn log, don't need to sync wait, trx id %lu", + snapshot->snapshotcsn, + csn, + xid))); + return false; + } + } else { + parentXid = (TransactionId)GET_PARENTXID(csn); + } + + if (u_sess->attr.attr_common.xc_maintenance_mode || t_thrd.xact_cxt.bInAbortTransaction) { + return false; + } + + /* Wait for txn end and check again. */ + if (sync != NULL) { + *sync = true; + } + if (TransactionIdIsValid(parentXid)) + SyncLocalXidWait(parentXid, snapshot); + else + SyncLocalXidWait(xid, snapshot); + looped = true; + parentXid = InvalidTransactionId; + goto loop; + } + } else { + return false; + } +} + +static bool DirectReadCommittedXidVisibleInSnapshot(TransactionId xid, Snapshot snapshot) +{ + CommitSeqNo csn; + bool looped = false; + TransactionId parentXid = InvalidTransactionId; + + /* + * Make a quick range check to eliminate most XIDs without looking at the + * CSN log. + */ + if (TransactionIdPrecedes(xid, snapshot->xmin)) + return true; + +loop: + if (ENABLE_DMS) { + /* fetch TXN info locally if either reformer, original primary, or normal primary */ + if (SS_PRIMARY_MODE || SS_OFFICIAL_PRIMARY) { + csn = TransactionIdGetCommitSeqNo(xid, true, true, false, snapshot); + } else { + csn = SSTransactionIdGetCommitSeqNo(xid, true, true, false, snapshot, NULL); + } + } else { + csn = TransactionIdGetCommitSeqNo(xid, true, true, false, snapshot); + } + + if (COMMITSEQNO_IS_COMMITTING(csn)) { + /* SS master node would've already sync-waited, so this should never happen */ + if (SS_STANDBY_MODE) { + ereport(FATAL, (errmsg("SS xid %lu's csn %lu is still COMMITTING after Master txn waited.", xid, csn))); + } + if (looped) { + ereport(WARNING, (errmsg("transaction id %lu's csn %ld may frozen but direct read can't change.", + xid, csn))); + return true; + } else { + if (!COMMITSEQNO_IS_SUBTRANS(csn)) { + /* If snapshotcsn lower than csn stored in csn log, don't need to wait. */ + CommitSeqNo latestCSN = GET_COMMITSEQNO(csn); + if (latestCSN >= snapshot->snapshotcsn) { + ereport(DEBUG1, + (errmsg("snapshotcsn %lu lower than csn %lu" + " stored in csn log, don't need to sync wait, trx id %lu", + snapshot->snapshotcsn, + csn, + xid))); + return false; + } + } else { + parentXid = (TransactionId)GET_PARENTXID(csn); + } + + if (u_sess->attr.attr_common.xc_maintenance_mode || t_thrd.xact_cxt.bInAbortTransaction) { + return false; + } + + /* Wait for txn end and check again. */ + if (TransactionIdIsValid(parentXid)) + SyncLocalXidWait(parentXid); + else + SyncLocalXidWait(xid); + looped = true; + parentXid = InvalidTransactionId; + goto loop; + } + } else if (!COMMITSEQNO_IS_COMMITTED(csn)) { + ereport(WARNING, + (errmsg("transaction/csn %lu/%lu was hinted as " + "committed, but was not marked as committed in " + "the transaction log", + xid, + csn))); + /* + * We have contradicting evidence on whether the transaction committed or + * not. Let's assume that it did. That seems better than erroring out. + */ + return true; + } + + if (csn < snapshot->snapshotcsn) + return true; + else + return false; +} + +static bool DirectReadHeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Page page) +{ + if (snapshot->satisfies != SNAPSHOT_MVCC) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("DirectRead only support SNAPSHOT_MVCC"))); + HeapTupleHeader tuple = htup->t_data; + Assert(ItemPointerIsValid(&htup->t_self)); + Assert(htup->t_tableOid != InvalidOid); + bool visible = false; + + if (SHOW_DEBUG_MESSAGE()) { + ereport(DEBUG1, + (errmsg("HeapTupleSatisfiesMVCC self(%d,%d) ctid(%d,%d) cur_xid %ld xmin %ld" + " xmax %ld csn %lu", + ItemPointerGetBlockNumber(&htup->t_self), + ItemPointerGetOffsetNumber(&htup->t_self), + ItemPointerGetBlockNumber(&tuple->t_ctid), + ItemPointerGetOffsetNumber(&tuple->t_ctid), + GetCurrentTransactionIdIfAny(), + HeapTupleHeaderGetXmin(page, tuple), + HeapTupleHeaderGetXmax(page, tuple), + snapshot->snapshotcsn))); + } + + /* + * Just valid for read-only transaction when u_sess->attr.attr_common.XactReadOnly is true. + * Show any tuples including dirty ones when u_sess->attr.attr_storage.enable_show_any_tuples is true. + * GUC param u_sess->attr.attr_storage.enable_show_any_tuples is just for analyse or maintenance + */ + if (u_sess->attr.attr_common.XactReadOnly && u_sess->attr.attr_storage.enable_show_any_tuples) + return true; + + if (!HeapTupleHeaderXminCommitted(tuple)) { + if (HeapTupleHeaderXminInvalid(tuple)) + return false; + + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tuple))) { + if ((tuple->t_infomask & HEAP_COMBOCID) && CheckStreamCombocid(tuple, snapshot->curcid, page)) + return true; /* delete after stream producer thread scan started */ + + if (HeapTupleHeaderGetCmin(tuple, page) >= snapshot->curcid) + return false; /* inserted after scan started */ + + if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */ + return true; + + if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask, tuple->t_infomask2)) /* not deleter */ + return true; + + if (tuple->t_infomask & HEAP_XMAX_IS_MULTI) { + TransactionId xmax = HeapTupleHeaderMultiXactGetUpdateXid(page, tuple); + /* not LOCKED_ONLY, so it has to have an xmax */ + Assert(TransactionIdIsValid(xmax)); + + /* updating subtransaction must have aborted */ + if (!TransactionIdIsCurrentTransactionId(xmax)) + return true; + else if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* updated after scan started */ + else + return false; /* updated before scan started */ + } + + if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(page, tuple))) { + /* deleting subtransaction must have aborted */ + Assert(!TransactionIdDidCommit(HeapTupleHeaderGetXmax(page, tuple))); + return true; + } + + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } else { + visible = DirectReadXidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, NULL); + if (!visible) + return false; + } + } else { + /* xmin is committed, but maybe not according to our snapshot */ + if (!HeapTupleHeaderXminFrozen(tuple) && + !DirectReadCommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot)) + return false; /* treat as still in progress */ + } + +recheck_xmax: + if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */ + return true; + + if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask, tuple->t_infomask2)) + return true; + + if (tuple->t_infomask & HEAP_XMAX_IS_MULTI) { + TransactionId xmax = HeapTupleHeaderMultiXactGetUpdateXid(page, tuple); + /* not LOCKED_ONLY, so it has to have an xmax */ + Assert(TransactionIdIsValid(xmax)); + if (TransactionIdIsCurrentTransactionId(xmax)) { + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } + if (TransactionIdIsInProgress(xmax)) + return true; + if (TransactionIdDidCommit(xmax)) { + /* updating transaction committed, but when? */ + if (!DirectReadCommittedXidVisibleInSnapshot(xmax, snapshot)) + return true; /* treat as still in progress */ + return false; + } + /* it must have aborted or crashed */ + return true; + } + + if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED)) { + bool sync = false; + TransactionId xmax = HeapTupleHeaderGetXmax(page, tuple); + + if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(page, tuple))) { + if (HeapTupleHeaderGetCmax(tuple, page) >= snapshot->curcid) + return true; /* deleted after scan started */ + else + return false; /* deleted before scan started */ + } + + visible = DirectReadXidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, &sync); + /* + * If sync wait, xmax may be modified by others. So we need to check xmax again after acquiring the page lock. + */ + if (sync && (xmax != HeapTupleHeaderGetXmax(page, tuple))) { + goto recheck_xmax; + } + + if (!visible) { + if (sync && (xmax != HeapTupleHeaderGetXmax(page, tuple))) { + goto recheck_xmax; + } + return true; /* treat as still in progress */ + } + } else { + /* xmax is committed, but maybe not according to our snapshot */ + if (!DirectReadCommittedXidVisibleInSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot)) + return true; /* treat as still in progress */ + } + return false; +} + template bool GetNextTupleFromPage(HeapScanDesc scan, Page pageptr, ScanDirection direction, OffsetNumber &lineOff, TupleTableSlot* slot) { @@ -135,7 +461,13 @@ bool GetNextTupleFromPage(HeapScanDesc scan, Page pageptr, ScanDirection directi /* * if current tuple qualifies, return it. */ - valid = HeapTupleSatisfiesVisibility(tuple, snapshot, scan->rs_base.rs_cbuf); + if (fromBuffer) { + valid = HeapTupleSatisfiesVisibility(tuple, snapshot, scan->rs_base.rs_cbuf); + CheckForSerializableConflictOut(valid, scan->rs_base.rs_rd, (void *)tuple, scan->rs_base.rs_cbuf, + snapshot); + } else { + valid = DirectReadHeapTupleSatisfiesVisibility(tuple, snapshot, dp); + } CheckForSerializableConflictOut(valid, scan->rs_base.rs_rd, (void *)tuple, scan->rs_base.rs_cbuf, snapshot); @@ -273,10 +605,114 @@ struct DirectReadBuff { uint32 current; Page currentPage; OffsetNumber lineOff; - Buffer bufferid; char* buff; }; +class SpqDirectReadPageManager : public SpqPageManager { +public: + HeapScanDesc scan; + MpmcBoundedQueue pagequeue; + DirectReadBuff *currentPages; +public: + SpqDirectReadPageManager(HeapScanDesc scan, ScanDirection direction) + : SpqPageManager(direction), scan(scan), pagequeue(PAGE_QUEUE_SIZE), currentPages(nullptr) { + scan->rs_base.rs_cbuf = InvalidBuffer; + } + + SpqState FetchBlocks(uint32 start, uint32 end) + { + uint32 step = 0; + + do { + start = start + step; + step = seg_direct_read_get_range(start); + if (start + step - 1 >= end) { + step = end - start + 1; + } + + DirectReadBuff *buffer = (DirectReadBuff*)palloc(sizeof(DirectReadBuff) + BLOCKSIZE * step); + if (buffer == nullptr) { + elog(ERROR, "SpqDirectReadPageManager: try palloc memory failed."); + } + bool enqueued = false; + for (int i = 0; i < MAX_ENQUEUE_TIME; ++i) { + if (pagequeue.Enqueue(buffer)) { + enqueued = true; + break; + } + } + if (!enqueued) { + pfree(buffer); + elog(ERROR, "SpqDirectReadPageManager: try push buffer to page queue failed."); + } + buffer->buff = (char *)(buffer + 1); + // sync read + seg_direct_read(scan->rs_base.rs_rd->rd_smgr, MAIN_FORKNUM, start, &step, buffer->buff, &buffer->locStart); + buffer->start = start; + buffer->size = step; + buffer->current = InvalidBlockNumber; + buffer->currentPage = buffer->buff; + } while (start + step - 1 < end); + return SpqState::SPQ_SUCCESS; + } + + SpqState GetNewPage() + { + if (pagequeue.Empty() && currentPages == nullptr) { + return SpqState::SPQ_QUEUE_EMPTY; + } + + while (true) { + // if currentPage is empty, try get a new page from pagequeue + if (currentPages == nullptr) { + if (!pagequeue.Dequeue(currentPages)) { + return SpqState::SPQ_QUEUE_EMPTY; + } + } + + if (currentPages->current == InvalidBlockNumber) { + currentPages->current = 0; + } else { + currentPages->current++; + } + while (currentPages->current < currentPages->size) { + currentPages->currentPage = currentPages->buff + BLOCKSIZE * currentPages->current; + if (PageIsVerified(currentPages->currentPage, currentPages->locStart + currentPages->current)) { + if (ScanDirectionIsForward(direction)) { + currentPages->lineOff = FirstOffsetNumber; + } else if (ScanDirectionIsBackward(direction)) { + currentPages->lineOff = PageGetMaxOffsetNumber(currentPages->currentPage); + } else { + return SpqState::SPQ_QUERY_END; + } + return SpqState::SPQ_SUCCESS; + } + currentPages->current++; + } + + pfree(currentPages); + currentPages = nullptr; + } + } + + bool GetTupleFromPage(TupleTableSlot* slot) + { + if (currentPages == nullptr) { + return false; + } + + return GetNextTupleFromPage(scan, currentPages->currentPage, direction, currentPages->lineOff, slot); + } + + void Rescan(TableScanDesc scanDesc) + { + while (pagequeue.Dequeue(currentPages)) { + pfree(currentPages); + } + currentPages = nullptr; + } +}; + class SpqLocalBlockManager : public SpqBlockManager { public: uint32 instanceID; @@ -544,26 +980,44 @@ SpqSeqScanState* ExecInitSpqSeqScan(SpqSeqScan* node, EState* estate, int eflags HeapScanDesc scanDesc = reinterpret_cast(seqScan->ss_currentScanDesc); + if (spqScan->ss.ss_currentRelation->rd_tam_ops != TableAmHeap) { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("error relation type."))); + } + if (!node->isDirectRead) { - if (spqScan->ss.ss_currentRelation->rd_tam_ops != TableAmHeap) { - ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("error relation type."))); + seqScan->ss_currentScanDesc->rs_nblocks = RelationGetNumberOfBlocks(seqScan->ss_currentScanDesc->rs_rd); + } else if (t_thrd.spq_ctx.spq_role == ROLE_QUERY_COORDINTOR) { + if (node->DirectReadBlkNum == InvalidBlockNumber) { + node->isDirectRead = false; + } + } else { + if (node->DirectReadBlkNum == InvalidBlockNumber) { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("DirectRead nblocks error"))); } + seqScan->ss_currentScanDesc->rs_nblocks = node->DirectReadBlkNum; + } + + if (!node->isDirectRead) { spqScan->pageManager = New(CurrentMemoryContext) SpqBufmgrPageManager(scanDesc, estate->es_direction); + } else { + spqScan->pageManager = New(CurrentMemoryContext) SpqDirectReadPageManager(scanDesc, estate->es_direction); } SpqBlockManager* blockManager = nullptr; - seqScan->ss_currentScanDesc->rs_nblocks = RelationGetNumberOfBlocks(seqScan->ss_currentScanDesc->rs_rd); + int fetchNum = node->isDirectRead ? FETCH_BLOCK_NUM_DIRECT : FETCH_BLOCK_NUM; if (node->isFullTableScan) { blockManager = New(CurrentMemoryContext) SpqLocalBlockManager(0, 1, seqScan->ss_currentScanDesc->rs_nblocks, estate->es_direction, - FETCH_BLOCK_NUM); + fetchNum); } else if (node->isAdaptiveScan) { blockManager = New(CurrentMemoryContext) SpqAdaptiveBlockManager(seqScan->ss_currentScanDesc->rs_nblocks, estate->es_direction, node->scan.plan.plan_node_id, - FETCH_BLOCK_NUM); + fetchNum); } else { int sliceNumber; int instanceID; @@ -572,7 +1026,7 @@ SpqSeqScanState* ExecInitSpqSeqScan(SpqSeqScan* node, EState* estate, int eflags sliceNumber, seqScan->ss_currentScanDesc->rs_nblocks, estate->es_direction, - FETCH_BLOCK_NUM); + fetchNum); } spqScan->blockManager = blockManager; spqScan->ss.ScanNextMtd = SpqScanNext; diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 0cbaa49de..162fd619f 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1199,6 +1199,16 @@ static void InitSpqConfigureNamesBool() NULL, NULL, NULL); + DefineCustomBoolVariable("spqplugin.spq_enable_direct_read", + "Enable spq direct read without buffer", + NULL, + &u_sess->attr.attr_spq.spq_enable_direct_read, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesInt() @@ -1555,6 +1565,18 @@ static void InitSpqConfigureNamesReal() NULL, NULL, NULL); + DefineCustomRealVariable("spqplugin.spq_small_table_threshold", + "Set the threshold for small tables. The actual value is spq_small_table_threshold * NORMAL_SHARED_BUFFER_NUM", + NULL, + &u_sess->attr.attr_spq.spq_small_table_threshold, + 0.02, + 0.0, + 1.0, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } static void InitSpqConfigureNamesString() diff --git a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp index a43c1e985..2c4bfe29b 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/translate/CTranslatorDXLToPlStmt.cpp @@ -639,7 +639,8 @@ CTranslatorDXLToPlStmt::TranslateDXLTblScan( spq_scan->scan.scanrelid = index; spq_scan->isFullTableScan = false; spq_scan->isAdaptiveScan = u_sess->attr.attr_spq.spq_enable_adaptive_scan; - spq_scan->isDirectRead = false; + spq_scan->isDirectRead = u_sess->attr.attr_spq.spq_enable_direct_read; + spq_scan->DirectReadBlkNum = InvalidBlockNumber; plan = &(spq_scan->scan.plan); plan_return = (Plan *) spq_scan; } @@ -6369,7 +6370,8 @@ CTranslatorDXLToPlStmt::TranslateDXLTblShareScan( spq_scan->scan.scanrelid = index; spq_scan->isFullTableScan = true; spq_scan->isAdaptiveScan = false; - spq_scan->isDirectRead = false; + spq_scan->isDirectRead = u_sess->attr.attr_spq.spq_enable_direct_read; + spq_scan->DirectReadBlkNum = InvalidBlockNumber; plan = &(spq_scan->scan.plan); plan_return = (Plan *) spq_scan; } diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index df673974e..64cdef558 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -10,6 +10,7 @@ */ #include "postgres.h" #include +#include #include "nodes/nodeFuncs.h" #include "catalog/pg_inherits_fn.h" #include "commands/explain.h" @@ -26,14 +27,22 @@ #include "spqplugin.h" #include "storage/ipc.h" #include "naucrates/init.h" +#include "ddes/dms/ss_transaction.h" +#include "optimizer/planmem_walker.h" PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(spqplugin_invoke); +THR_LOCAL ExecutorStart_hook_type spq_hook_ExecutorStart = NULL; THR_LOCAL spq_planner_hook_type backup_spq_planner_hook = NULL; THR_LOCAL bool HOOK_INIT = false; THR_LOCAL MemoryContext OptimizerMemoryContext = NULL; +typedef struct SpqDirectReadWalkerContext { + MethodPlanWalkerContext cxt; + std::unordered_map* directMap; +} SpqDirectReadWalkerContext; + static bool check_rangetbl_support(List* rtable) { if (rtable == NULL) @@ -287,10 +296,109 @@ PlannedStmt* spq_optimize_query(Query* parse, int cursorOptions, ParamListInfo b return result; } +static void TryDirectRead(PlannedStmt* stmt, SpqSeqScan* scan, std::unordered_map* directMap) +{ + RangeTblEntry *rte = (RangeTblEntry*)list_nth(stmt->rtable, (scan->scan.scanrelid)-1); + if (rte->rtekind == RTE_RELATION) { + Relation rel = heap_open(rte->relid, AccessShareLock); + BlockNumber nblocks = RelationGetNumberOfBlocks(rel); + BlockNumber nlimit = NORMAL_SHARED_BUFFER_NUM * u_sess->attr.attr_spq.spq_small_table_threshold; + auto it = directMap->find(rel->rd_id); + if (it == directMap->end()) { + if (SS_STANDBY_MODE || nblocks <= nlimit) { + // SS_STANDBY_MODE treat as false, may be change after libpq connected + SpqDirectReadEntry tmp; + tmp.rel_id = rel->rd_id; + tmp.nums = InvalidBlockNumber; + tmp.spq_seq_scan_node_list = NIL; + if (nblocks > nlimit) { + tmp.spq_seq_scan_node_list = lappend(tmp.spq_seq_scan_node_list, scan); + } + (*directMap)[rel->rd_id] = tmp; + scan->isDirectRead = false; + scan->DirectReadBlkNum = InvalidBlockNumber; + } else { + heap_sync(rel); + SpqDirectReadEntry tmp; + tmp.rel_id = rel->rd_id; + tmp.nums = nblocks; + tmp.spq_seq_scan_node_list = NIL; + (*directMap)[rel->rd_id] = tmp; + scan->isDirectRead = true; + scan->DirectReadBlkNum = nblocks; + } + } else { + if (it->second.nums != InvalidBlockNumber) { + scan->isDirectRead = true; + scan->DirectReadBlkNum = it->second.nums; + } else { + scan->isDirectRead = false; + scan->DirectReadBlkNum = InvalidBlockNumber; + if (it->second.spq_seq_scan_node_list != NIL) { + it->second.spq_seq_scan_node_list = lappend(it->second.spq_seq_scan_node_list, scan); + } + } + } + heap_close(rel, AccessShareLock); + } +} + +static bool TraversePlan(Node* plan, void* cxt) +{ + if (plan == nullptr) return false; + + if (IsA(plan, RemoteQuery)) { + return walk_plan_node_fields((Plan*)plan, (MethodWalker)TraversePlan, cxt); + } + + if (IsA(plan, SpqSeqScan)) { + SpqDirectReadWalkerContext* walkerCxt = (SpqDirectReadWalkerContext*)cxt; + PlannedStmt* stmt = (PlannedStmt*)walkerCxt->cxt.base.node; + TryDirectRead(stmt, castNode(SpqSeqScan, plan), walkerCxt->directMap); + } + + return plan_tree_walker(plan, (MethodWalker)TraversePlan, cxt); +} + +static void spq_executor_start(QueryDesc* queryDesc, int eflags) +{ + if (t_thrd.spq_ctx.spq_role == ROLE_QUERY_COORDINTOR && !(eflags & EXEC_FLAG_EXPLAIN_ONLY) && + u_sess->attr.attr_spq.spq_enable_direct_read) { + u_sess->spq_cxt.direct_read_map = NIL; + PlannedStmt *stmt = queryDesc->plannedstmt; + std::unordered_map directMap; + SpqDirectReadWalkerContext cxt; + errno_t rc = 0; + rc = memset_s(&cxt, sizeof(SpqDirectReadWalkerContext), 0, sizeof(SpqDirectReadWalkerContext)); + securec_check(rc, "\0", "\0"); + cxt.cxt.base.init_plans = NIL; + cxt.cxt.base.traverse_flag = NULL; + exec_init_plan_tree_base(&cxt.cxt.base, stmt); + cxt.directMap = &directMap; + TraversePlan((Node *)stmt->planTree, (void*)&cxt); + for (auto it = directMap.begin(); it != directMap.end(); ++it) { + if (it->second.spq_seq_scan_node_list != NIL) { + SpqDirectReadEntry* entry = (SpqDirectReadEntry*)palloc0(sizeof(SpqDirectReadEntry)); + entry->rel_id = it->second.rel_id; + entry->nums = it->second.nums; + entry->spq_seq_scan_node_list = it->second.spq_seq_scan_node_list; + u_sess->spq_cxt.direct_read_map = lappend(u_sess->spq_cxt.direct_read_map, entry); + } + } + } + + if (spq_hook_ExecutorStart) + spq_hook_ExecutorStart(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); +} + void _PG_init(void) { InitDXLManager(); if (!HOOK_INIT) { + spq_hook_ExecutorStart = ExecutorStart_hook; + ExecutorStart_hook = spq_executor_start; backup_spq_planner_hook = spq_planner_hook; spq_planner_hook = spq_optimize_query; init_spqseqscan_hook(); @@ -304,6 +412,7 @@ void _PG_init(void) void _PG_fini(void) { + ExecutorStart_hook = spq_hook_ExecutorStart; spq_planner_hook = backup_spq_planner_hook; MemoryContextDelete(u_sess->spq_cxt.spq_worker_context); restore_spqseqscan_hook(); -- Gitee From 7d63b7fc4bb096adb01fc2046c78094c04fa4df6 Mon Sep 17 00:00:00 2001 From: lukeman Date: Tue, 26 Dec 2023 11:05:37 +0800 Subject: [PATCH 172/434] =?UTF-8?q?=E5=A4=84=E7=90=86=E7=BC=BA=E9=99=B7?= =?UTF-8?q?=EF=BC=9A=E5=8F=96=E5=8F=8D=E8=BF=90=E7=AE=97=E7=AC=A6~?= =?UTF-8?q?=E8=AE=A1=E7=AE=97=E7=BB=93=E6=9E=9C=E4=B8=8Emysql=E4=B8=8D?= =?UTF-8?q?=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/test_mysql_operator.out | 93 +++++++++++++++ .../include/plugin_parser/parse_oper.h | 1 + contrib/dolphin/plugin_parser/parse_oper.cpp | 65 +++++++++-- contrib/dolphin/plugin_utils/adt/date.cpp | 20 +++- contrib/dolphin/plugin_utils/adt/int.cpp | 65 +++++++++++ contrib/dolphin/plugin_utils/adt/numeric.cpp | 11 +- contrib/dolphin/plugin_utils/adt/set.cpp | 9 +- .../dolphin/plugin_utils/adt/timestamp.cpp | 23 ++++ .../dolphin/plugin_utils/adt/unsigned_int.cpp | 106 +++++++++++++++++- contrib/dolphin/plugin_utils/adt/varbit.cpp | 15 ++- contrib/dolphin/plugin_utils/adt/year.cpp | 17 ++- .../rollback_script/dolphin--3.0--2.0.sql | 75 ++++++++++++- contrib/dolphin/sql/test_mysql_operator.sql | 80 +++++++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 100 ++++++++++++++++- 14 files changed, 651 insertions(+), 29 deletions(-) diff --git a/contrib/dolphin/expected/test_mysql_operator.out b/contrib/dolphin/expected/test_mysql_operator.out index c955a5af2..827ded088 100644 --- a/contrib/dolphin/expected/test_mysql_operator.out +++ b/contrib/dolphin/expected/test_mysql_operator.out @@ -2571,6 +2571,99 @@ select !b'1001'; select 10!; ERROR: Operator '!' behind expression is deprecated when b_compatibility_mode is on. Please use function factorial(). +-- test for '~' +CREATE TABLE test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table values(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,b'1', b'111', true,'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023','1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a','a', 'a,c',json_object('a', 1, 'b', 2)); +select +~(`int1`), +~(`uint1`), +~(`int2`), +~(`uint2`), +~(`int4`), +~(`uint4`), +~(`int8`), +~(`uint8`), +~(`float4`), +~(`float8`), +~(`numeric`), +~(`bit1`), +~(`bit64`), +~(`boolean`), +~(`date`), +~(`time`), +~(`time(4)`), +~(`datetime`), +~(`datetime(4)`), +~(`timestamp`), +~(`timestamp(4)`), +~(`year`), +~(`char`), +~(`varchar`), +~(`binary`), +~(`varbinary`), +~(`tinyblob`), +~(`blob`), +~(`mediumblob`), +~(`longblob`), +~(`text`), +~(`enum_t`), +~(`set_t`), +~(`json`) +from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a " +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type double precision: "1.23a" +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" + ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? | ?column? +----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+---------------------- + 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551608 | 18446744073709551614 | 18446744073689321410 | 18446744073709360565 | 18446744073709360565 | 18446723843504360565 | 18446723843504360565 | 18446723843504360565 | 18446723843504360565 | 18446744073709549592 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551614 | 18446744073709551610 | 18446744073709551615 +(1 row) + +DROP TABLE test_type_table; set dolphin.b_compatibility_mode = 0; select !10; ERROR: operator does not exist: ! integer diff --git a/contrib/dolphin/include/plugin_parser/parse_oper.h b/contrib/dolphin/include/plugin_parser/parse_oper.h index 57c4b0583..a01a1e04e 100644 --- a/contrib/dolphin/include/plugin_parser/parse_oper.h +++ b/contrib/dolphin/include/plugin_parser/parse_oper.h @@ -18,6 +18,7 @@ #include "plugin_parser/parse_node.h" typedef HeapTuple Operator; +#define DOLPHIN_CATALOG_STR "dolphin_catalog" /* Routines to look up an operator given name and exact input type(s) */ extern Oid LookupOperName(ParseState* pstate, List* opername, Oid oprleft, Oid oprright, bool noError, int location); diff --git a/contrib/dolphin/plugin_parser/parse_oper.cpp b/contrib/dolphin/plugin_parser/parse_oper.cpp index 329511bfa..4ca9b5c9e 100644 --- a/contrib/dolphin/plugin_parser/parse_oper.cpp +++ b/contrib/dolphin/plugin_parser/parse_oper.cpp @@ -91,6 +91,7 @@ typedef struct GetDolphinOperatorTupInfo { } GetDolphinOperatorTupInfo; static Operator GetDolphinOperatorTup(GetDolphinOperatorTupInfo* info); +static Operator GetDolphinRightOperatorTup(GetDolphinOperatorTupInfo* info); static Operator GetNumericDolphinOperatorTup( ParseState* pstate, List* opername, Oid ltypeId, Oid rtypeId, int location, bool inNumeric); static void TransformDolphinType(Oid& type, int32& typmod); @@ -976,14 +977,19 @@ Expr* make_op(ParseState* pstate, List* opname, Node* ltree, Node* rtree, Node* info.rtree = rtree; info.location = location; info.inNumeric = inNumeric; - if (IsJsonType(rtypeId) && GetSessionContext()->enableBCmptMode) { - DeconstructQualifiedName(opname, &schemaname, &opername); - jsonTransfored = TransformJsonDolphinType(opername, ltypeId, rtypeId); - tup = left_oper(pstate, opname, rtypeId, false, location); - if (jsonTransfored && HeapTupleIsValid(tup)) - newRightTree = CreateCastForType(pstate, info.rtypeId, rtree, tup, location, false); + tup = GetDolphinRightOperatorTup(&info); + if (!HeapTupleIsValid(tup)) { + if (IsJsonType(rtypeId) && GetSessionContext()->enableBCmptMode) { + DeconstructQualifiedName(opname, &schemaname, &opername); + jsonTransfored = TransformJsonDolphinType(opername, ltypeId, rtypeId); + tup = left_oper(pstate, opname, rtypeId, false, location); + if (jsonTransfored && HeapTupleIsValid(tup)) + newRightTree = CreateCastForType(pstate, info.rtypeId, rtree, tup, location, false); + } else { + tup = left_oper(pstate, opname, rtypeId, false, location); + } } else { - tup = left_oper(pstate, opname, rtypeId, false, location); + newRightTree = CreateCastForType(pstate, rtypeId, rtree, tup, location, false); } #else tup = left_oper(pstate, opname, rtypeId, false, location); @@ -1400,6 +1406,49 @@ static Operator GetNumericDolphinOperatorTup( return (Operator)tup; } +static Operator GetDolphinRightOperatorTup(GetDolphinOperatorTupInfo* info) +{ + if (!GetSessionContext()->enableBCmptMode) { + return NULL; + } + ParseState* pstate = info->pstate; + List* opname = info->opname; + Oid rightType = info->rtypeId; + int location = info->location; + bool inNumeric = info->inNumeric; + char* schemaname = NULL; + char* opername = NULL; + Operator tup = NULL; + DeconstructQualifiedName(opname, &schemaname, &opername); + List *newOpList = list_make2(makeString(DOLPHIN_CATALOG_STR), makeString(opername)); + if (schemaname == NULL) { + tup = GetNumericDolphinOperatorTup(pstate, newOpList, InvalidOid, rightType, location, inNumeric); + if (tup != NULL) { + return tup; + } + } + Oid nspOid = InvalidOid; + Oid oprOid = InvalidOid; + Oid rightOid = rightType; + nspOid = get_namespace_oid(DOLPHIN_CATALOG_STR, true); + if (!OidIsValid(nspOid)) { + return NULL; + } + char rightTypType = get_typtype(rightType); + if (IsBinaryType(rightType)) { + rightOid = ANYELEMENTOID; + } else if (rightTypType == TYPTYPE_ENUM) { + rightOid = ANYENUMOID; + } else if (rightTypType == TYPTYPE_SET) { + rightOid = ANYSETOID; + } + oprOid = get_operator_oid(opername, nspOid, InvalidOid, rightOid); + if (!OidIsValid(oprOid)) { + return NULL; + } + return SearchSysCache1(OPEROID, ObjectIdGetDatum(oprOid)); +} + static Operator GetDolphinOperatorTup(GetDolphinOperatorTupInfo* info) { if (!GetSessionContext()->enableBCmptMode) { @@ -1417,7 +1466,7 @@ static Operator GetDolphinOperatorTup(GetDolphinOperatorTupInfo* info) char* opername = NULL; Operator tup = NULL; DeconstructQualifiedName(opname, &schemaname, &opername); - List *newOpList = list_make2(makeString("dolphin_catalog"), makeString(opername)); + List *newOpList = list_make2(makeString(DOLPHIN_CATALOG_STR), makeString(opername)); if (IsNumericCatalogByOid(leftType) && IsNumericCatalogByOid(rightType) && schemaname == NULL) { tup = GetNumericDolphinOperatorTup(pstate, newOpList, leftType, rightType, location, inNumeric); if (tup != NULL) { diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 46a7f3ae3..1a2678950 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -774,7 +774,7 @@ Datum int32_b_format_date(PG_FUNCTION_ARGS) int4 date = PG_GETARG_INT32(0); DateADT result; struct pg_tm tt, *tm = &tt; - int errlevel = SQL_MODE_STRICT() ? ERROR : WARNING; + int errlevel = !fcinfo->can_ignore && SQL_MODE_STRICT() ? ERROR : WARNING; if (int32_b_format_date_internal(tm, date, true)) { ereport(errlevel, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), @@ -807,7 +807,7 @@ Datum int64_b_format_date(PG_FUNCTION_ARGS) Datum datetime = DirectFunctionCall1(int64_b_format_datetime, Int64GetDatum(number)); return DirectFunctionCall1(timestamp_date, datetime); } - return DirectFunctionCall1(int32_b_format_date, Int32GetDatum((int32)number)); + return DirectFunctionCall1Coll(int32_b_format_date, InvalidOid, Int32GetDatum((int32)number), fcinfo->can_ignore); } #endif @@ -7202,6 +7202,22 @@ Datum timestamp_ge_time(PG_FUNCTION_ARGS) PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); } + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_datenot); +extern "C" DLL_PUBLIC Datum dolphin_datenot(PG_FUNCTION_ARGS); +Datum dolphin_datenot(PG_FUNCTION_ARGS) +{ + DateADT date = PG_GETARG_DATEADT(0); + struct pg_tm tt; + struct pg_tm* tm = &tt; + if (unlikely(date > 0 && (INT_MAX - date < POSTGRES_EPOCH_JDATE))) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input julian date is overflow"))); + } + j2date(date + POSTGRES_EPOCH_JDATE, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday)); + PG_RETURN_UINT64(~((uint64)date2int(tm))); +} #endif #endif diff --git a/contrib/dolphin/plugin_utils/adt/int.cpp b/contrib/dolphin/plugin_utils/adt/int.cpp index 37c899b90..55d2f1b7e 100644 --- a/contrib/dolphin/plugin_utils/adt/int.cpp +++ b/contrib/dolphin/plugin_utils/adt/int.cpp @@ -2400,4 +2400,69 @@ Datum dolphin_int42mul(PG_FUNCTION_ARGS) int64 arg2 = PG_GETARG_INT16(1); PG_RETURN_INT64(arg1 * arg2); } + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_int1not); +extern "C" DLL_PUBLIC Datum dolphin_int1not(PG_FUNCTION_ARGS); +Datum dolphin_int1not(PG_FUNCTION_ARGS) +{ + int8 arg1 = PG_GETARG_INT8(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_int2not); +extern "C" DLL_PUBLIC Datum dolphin_int2not(PG_FUNCTION_ARGS); +Datum dolphin_int2not(PG_FUNCTION_ARGS) +{ + int16 arg1 = PG_GETARG_INT16(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_int4not); +extern "C" DLL_PUBLIC Datum dolphin_int4not(PG_FUNCTION_ARGS); +Datum dolphin_int4not(PG_FUNCTION_ARGS) +{ + int32 arg1 = PG_GETARG_INT32(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_int8not); +extern "C" DLL_PUBLIC Datum dolphin_int8not(PG_FUNCTION_ARGS); +Datum dolphin_int8not(PG_FUNCTION_ARGS) +{ + int64 arg1 = PG_GETARG_INT64(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_uint1not); +extern "C" DLL_PUBLIC Datum dolphin_uint1not(PG_FUNCTION_ARGS); +Datum dolphin_uint1not(PG_FUNCTION_ARGS) +{ + uint8 arg1 = PG_GETARG_UINT8(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_uint2not); +extern "C" DLL_PUBLIC Datum dolphin_uint2not(PG_FUNCTION_ARGS); +Datum dolphin_uint2not(PG_FUNCTION_ARGS) +{ + uint16 arg1 = PG_GETARG_UINT16(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_uint4not); +extern "C" DLL_PUBLIC Datum dolphin_uint4not(PG_FUNCTION_ARGS); +Datum dolphin_uint4not(PG_FUNCTION_ARGS) +{ + uint32 arg1 = PG_GETARG_UINT32(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_uint8not); +extern "C" DLL_PUBLIC Datum dolphin_uint8not(PG_FUNCTION_ARGS); +Datum dolphin_uint8not(PG_FUNCTION_ARGS) +{ + uint64 arg1 = PG_GETARG_UINT64(0); + PG_RETURN_UINT64(~((uint64)arg1)); +} + #endif diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index a715ac577..76df7841f 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -9082,7 +9082,7 @@ Datum bpchar_float8(PG_FUNCTION_ARGS) Datum result; tmp = DatumGetCString(DirectFunctionCall1(bpcharout, bpcharValue)); - result = DirectFunctionCall1(float8in, CStringGetDatum(tmp)); + result = DirectFunctionCall1Coll(float8in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -9108,7 +9108,7 @@ Datum varchar_float8(PG_FUNCTION_ARGS) Datum result; tmp = DatumGetCString(DirectFunctionCall1(varcharout, varcharValue)); - result = DirectFunctionCall1(float8in, CStringGetDatum(tmp)); + result = DirectFunctionCall1Coll(float8in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -23115,6 +23115,13 @@ Datum dolphin_uint8div(PG_FUNCTION_ARGS) } } +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_numericnot); +extern "C" DLL_PUBLIC Datum dolphin_numericnot(PG_FUNCTION_ARGS); +Datum dolphin_numericnot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~numeric_cast_uint8(fcinfo)); +} + Datum numeric_cast_int8(PG_FUNCTION_ARGS) { Numeric num = PG_GETARG_NUMERIC(0); diff --git a/contrib/dolphin/plugin_utils/adt/set.cpp b/contrib/dolphin/plugin_utils/adt/set.cpp index 13e99eb7b..8885e2f9a 100644 --- a/contrib/dolphin/plugin_utils/adt/set.cpp +++ b/contrib/dolphin/plugin_utils/adt/set.cpp @@ -45,6 +45,7 @@ #ifdef DOLPHIN #include "plugin_postgres.h" #include "plugin_utils/int8.h" +#include "plugin_commands/mysqlmode.h" #else #include "utils/int8.h" #endif @@ -226,7 +227,7 @@ static int64 settoint64(VarBit *bitmap) return result; } -static Datum int64toset(int64 val, Oid typid) +static Datum int64toset(int64 val, Oid typid, bool canIgnore = false) { int bitlen = 0; Relation pg_set = NULL; @@ -258,7 +259,7 @@ static Datum int64toset(int64 val, Oid typid) uint64 mask = (bitlen < SETLABELNUM) ? ((1UL << bitlen) - 1) : PG_UINT64_MAX; if (val & (~mask)) { - ereport(ERROR, + ereport(!canIgnore && SQL_MODE_STRICT() ? ERROR : WARNING, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("invalid input value for set %s: %ld", format_type_be(typid), val))); } @@ -945,7 +946,7 @@ Datum settonvarchar2(PG_FUNCTION_ARGS) Datum i8toset(PG_FUNCTION_ARGS) { - return int64toset(PG_GETARG_INT64(0), PG_GETARG_OID(1)); + return int64toset(PG_GETARG_INT64(0), PG_GETARG_OID(1), fcinfo->can_ignore); } Datum i4toset(PG_FUNCTION_ARGS) @@ -1336,7 +1337,7 @@ PG_FUNCTION_INFO_V1_PUBLIC(ui8toset); extern "C" DLL_PUBLIC Datum ui8toset(PG_FUNCTION_ARGS); Datum ui8toset(PG_FUNCTION_ARGS) { - return int64toset(PG_GETARG_UINT64(0), PG_GETARG_OID(1)); + return int64toset(PG_GETARG_UINT64(0), PG_GETARG_OID(1), fcinfo->can_ignore); } PG_FUNCTION_INFO_V1_PUBLIC(bittoset); diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 21dbd8185..689a3341e 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -11848,6 +11848,29 @@ void check_zero_month_day(pg_tm *tm, bool can_ignore) } } +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_timestampnot); +extern "C" DLL_PUBLIC Datum dolphin_timestampnot(PG_FUNCTION_ARGS); +Datum dolphin_timestampnot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~timestamp_uint8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_timestamptznot); +extern "C" DLL_PUBLIC Datum dolphin_timestamptznot(PG_FUNCTION_ARGS); +Datum dolphin_timestamptznot(PG_FUNCTION_ARGS) +{ + TimestampTz dt = PG_GETARG_TIMESTAMPTZ(0); + struct pg_tm tt; + struct pg_tm* tm = &tt; + fsec_t fsec; + int tz; + const char *tzn = NULL; + if (timestamp2tm(dt, &tz, tm, &fsec, &tzn, NULL) != 0) { + ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); + } + PG_RETURN_UINT64(~((uint64)timestamp2int(tm))); +} + #endif #endif diff --git a/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp b/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp index bb4b846ec..d5bdaac25 100644 --- a/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp +++ b/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp @@ -25,6 +25,7 @@ #include "access/tuptoaster.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" +#include "catalog/pg_enum.h" #include "common/int.h" #include "lib/hyperloglog.h" #include "libpq/md5.h" @@ -4316,7 +4317,7 @@ static int128 UnknownUintInternal(Datum txt, int128 min, int128 max, char* intTy bool typIsVarlena; getTypeOutputInfo(oid, &typeOutput, &typIsVarlena); tmp = DatumGetCString(OidOutputFunctionCall(typeOutput, txt)); - result = DatumGetInt128(DirectFunctionCall1(int16in, CStringGetDatum(tmp))); + result = DatumGetInt128(DirectFunctionCall1Coll(int16in, InvalidOid, CStringGetDatum(tmp), canIgnore)); pfree_ext(tmp); if (result < 0 && result >= min) { ereport(WARNING, @@ -5096,7 +5097,7 @@ extern "C" DLL_PUBLIC Datum char_cast_ui8(PG_FUNCTION_ARGS); Datum char_cast_ui8(PG_FUNCTION_ARGS) { Datum val = bpchar_float8(fcinfo); - return DirectFunctionCall1(f8_cast_ui8, val); + return DirectFunctionCall1Coll(f8_cast_ui8, InvalidOid, val, fcinfo->can_ignore); } PG_FUNCTION_INFO_V1_PUBLIC(varchar_cast_ui1); @@ -5128,7 +5129,7 @@ extern "C" DLL_PUBLIC Datum varchar_cast_ui8(PG_FUNCTION_ARGS); Datum varchar_cast_ui8(PG_FUNCTION_ARGS) { Datum val = varchar_float8(fcinfo); - return DirectFunctionCall1(f8_cast_ui8, val); + return DirectFunctionCall1Coll(f8_cast_ui8, InvalidOid, val, fcinfo->can_ignore); } extern "C" Datum Varlena2Float8(PG_FUNCTION_ARGS); @@ -5195,4 +5196,103 @@ Datum varlena_cast_ui8(PG_FUNCTION_ARGS) Datum val = Varlena2Float8(fcinfo); return DirectFunctionCall1(f8_cast_ui8, val); } + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_float4not); +extern "C" DLL_PUBLIC Datum dolphin_float4not(PG_FUNCTION_ARGS); +Datum dolphin_float4not(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~f4_cast_ui8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_float8not); +extern "C" DLL_PUBLIC Datum dolphin_float8not(PG_FUNCTION_ARGS); +Datum dolphin_float8not(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~f8_cast_ui8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_boolnot); +extern "C" DLL_PUBLIC Datum dolphin_boolnot(PG_FUNCTION_ARGS); +Datum dolphin_boolnot(PG_FUNCTION_ARGS) +{ + bool arg = PG_GETARG_BOOL(0); + uint64 argval = arg ? 1 : 0; + PG_RETURN_UINT64(~argval); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_timenot); +extern "C" DLL_PUBLIC Datum dolphin_timenot(PG_FUNCTION_ARGS); +Datum dolphin_timenot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~time_cast_ui8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_charnot); +extern "C" DLL_PUBLIC Datum dolphin_charnot(PG_FUNCTION_ARGS); +Datum dolphin_charnot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~char_cast_ui8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_varcharnot); +extern "C" DLL_PUBLIC Datum dolphin_varcharnot(PG_FUNCTION_ARGS); +Datum dolphin_varcharnot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~varchar_cast_ui8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_textnot); +extern "C" DLL_PUBLIC Datum dolphin_textnot(PG_FUNCTION_ARGS); +Datum dolphin_textnot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~text_cast_uint8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_varlenanot); +extern "C" DLL_PUBLIC Datum dolphin_varlenanot(PG_FUNCTION_ARGS); +Datum dolphin_varlenanot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~varlena_cast_ui8(fcinfo)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_setnot); +extern "C" DLL_PUBLIC Datum dolphin_setnot(PG_FUNCTION_ARGS); +Datum dolphin_setnot(PG_FUNCTION_ARGS) +{ + VarBit *bitmap = PG_GETARG_VARBIT_P(0); + int128 result = 0; + int typmod = VARBITLEN(bitmap); + bits8 *base = (bits8*)VARBITS(bitmap) + sizeof(Oid); + int1 bitlen = typmod - sizeof(Oid) * BITS_PER_BYTE; + /* bitlen can up to max 64 */ + for (int1 order = 0; order < bitlen; order++) { + bits8 *r = base + order / BITS_PER_BYTE; + bool bitset = (*r) & (1 << (order % BITS_PER_BYTE)); + if (bitset) { + result |= (1UL << order); + } + } + PG_RETURN_UINT64(~((uint64)result)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_enumnot); +extern "C" DLL_PUBLIC Datum dolphin_enumnot(PG_FUNCTION_ARGS); +Datum dolphin_enumnot(PG_FUNCTION_ARGS) +{ + Oid enumval = PG_GETARG_OID(0); + float8 result = 0.0; + HeapTuple tup; + Form_pg_enum en; + if (enumval == 0) { + PG_RETURN_FLOAT8(result); + } + tup = SearchSysCache1(ENUMOID, ObjectIdGetDatum(enumval)); + if (!HeapTupleIsValid(tup)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("invalid internal value for enum: %u", enumval))); + en = (Form_pg_enum)GETSTRUCT(tup); + result = en->enumsortorder; + ReleaseSysCache(tup); + PG_RETURN_UINT64(~DatumGetUInt64(DirectFunctionCall1(f8_cast_ui8, Float8GetDatum(result)))); +} #endif diff --git a/contrib/dolphin/plugin_utils/adt/varbit.cpp b/contrib/dolphin/plugin_utils/adt/varbit.cpp index 7e608f6d4..93008fea3 100644 --- a/contrib/dolphin/plugin_utils/adt/varbit.cpp +++ b/contrib/dolphin/plugin_utils/adt/varbit.cpp @@ -1863,7 +1863,7 @@ Datum bitfromint(int64 a, int32 typmod) PG_RETURN_VARBIT_P(result); } -Datum bitfrombigint(int128 a, int32 typmod) +Datum bitfrombigint(int128 a, int32 typmod, bool canIgnore = false) { VarBit* result = NULL; bits8* r = NULL; @@ -1874,7 +1874,8 @@ Datum bitfrombigint(int128 a, int32 typmod) typmod = 1; /* default bit length */ if (typmod < M_BIT_LEN && GetSessionContext()->enableBCmptMode) { - return DirectFunctionCall2(bit, bitfrombigint(a, M_BIT_LEN), Int32GetDatum(typmod)); + return DirectFunctionCall2Coll(bit, InvalidOid, bitfrombigint(a, M_BIT_LEN, canIgnore), + Int32GetDatum(typmod), canIgnore); } rlen = VARBITTOTALLEN(typmod); @@ -1999,7 +2000,7 @@ Datum bitfromuint8(PG_FUNCTION_ARGS) { uint64 a = PG_GETARG_UINT64(0); int32 typmod = PG_GETARG_INT32(1); - return bitfrombigint(a, typmod); + return bitfrombigint(a, typmod, fcinfo->can_ignore); } Datum bittouint8(PG_FUNCTION_ARGS) @@ -2588,4 +2589,12 @@ Datum bool_bit(PG_FUNCTION_ARGS) return DirectFunctionCall2(bitfromint8, Int64GetDatum(1), Int32GetDatum(atttypmod)); } } + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_bitnot); +extern "C" DLL_PUBLIC Datum dolphin_bitnot(PG_FUNCTION_ARGS); +Datum dolphin_bitnot(PG_FUNCTION_ARGS) +{ + VarBit* arg = PG_GETARG_VARBIT_P(0); + PG_RETURN_UINT64(~bittobigint(arg, true)); +} #endif \ No newline at end of file diff --git a/contrib/dolphin/plugin_utils/adt/year.cpp b/contrib/dolphin/plugin_utils/adt/year.cpp index ee24dfda2..0cce3a64c 100644 --- a/contrib/dolphin/plugin_utils/adt/year.cpp +++ b/contrib/dolphin/plugin_utils/adt/year.cpp @@ -23,7 +23,7 @@ #define TYPMODOUT_LEN 64 static int year_fastcmp(Datum x, Datum y, SortSupport ssup); -static YearADT int32_to_YearADT(int4 year); +static YearADT int32_to_YearADT(int4 year, bool canIgnore = false); PG_FUNCTION_INFO_V1_PUBLIC(year_in); extern "C" DLL_PUBLIC Datum year_in(PG_FUNCTION_ARGS); @@ -217,10 +217,10 @@ Datum yeartypmodout(PG_FUNCTION_ARGS) PG_RETURN_CSTRING(ret); } -static YearADT int32_to_YearADT(int4 year) +static YearADT int32_to_YearADT(int4 year, bool canIgnore) { #ifdef DOLPHIN - int errlevel = (SQL_MODE_STRICT() ? ERROR : WARNING); + int errlevel = (!canIgnore && SQL_MODE_STRICT() ? ERROR : WARNING); #endif if (year) { @@ -312,7 +312,7 @@ Datum year_smaller(PG_FUNCTION_ARGS) Datum int32_year(PG_FUNCTION_ARGS) { int4 year = PG_GETARG_INT32(0); - PG_RETURN_YEARADT(int32_to_YearADT(year)); + PG_RETURN_YEARADT(int32_to_YearADT(year, fcinfo->can_ignore)); } Datum year_integer(PG_FUNCTION_ARGS) { @@ -531,7 +531,7 @@ Datum int16_year(PG_FUNCTION_ARGS) Datum int64_year(PG_FUNCTION_ARGS) { int64 year = PG_GETARG_INT64(0); - PG_RETURN_YEARADT(int32_to_YearADT((int32)year)); + PG_RETURN_YEARADT(int32_to_YearADT((int32)year, fcinfo->can_ignore)); } PG_FUNCTION_INFO_V1_PUBLIC(year_int8); @@ -598,4 +598,11 @@ Datum year_any_value(PG_FUNCTION_ARGS) { return PG_GETARG_DATUM(0); } + +PG_FUNCTION_INFO_V1_PUBLIC(dolphin_yearnot); +extern "C" DLL_PUBLIC Datum dolphin_yearnot(PG_FUNCTION_ARGS); +Datum dolphin_yearnot(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT64(~year_uint8(fcinfo)); +} #endif diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index dccb741f6..e949df54d 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -279,4 +279,77 @@ DROP FUNCTION IF EXISTS pg_catalog.div(double precision, binary); DROP FUNCTION IF EXISTS pg_catalog.div(binary, binary); DROP CAST IF EXISTS (year AS boolean); -DROP FUNCTION IF EXISTS pg_catalog.year_to_bool(year); \ No newline at end of file +DROP FUNCTION IF EXISTS pg_catalog.year_to_bool(year); + +-- not operator +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int1); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int1not(int1); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int2); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int2not(int2); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int4); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int4not(int4); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int8); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int8not(int8); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint1); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint1not(uint1); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint2); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint2not(uint2); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint4); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint4not(uint4); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint8); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint8not(uint8); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = float4); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_float4not(float4); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = float8); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_float8not(float8); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = numeric); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_numericnot(numeric); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = bit); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_bitnot(bit); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = boolean); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_boolnot(boolean); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = date); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_datenot(date); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = time); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timenot(time); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = timestamp); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timestampnot(timestamp without time zone); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = timestamp); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timestamptznot(timestamp with time zone); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = year); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_yearnot(year); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = char); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_charnot(char); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = varchar); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_varcharnot(varchar); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = text); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_textnot(text); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyelement); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_varlenanot(anyelement); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyenum); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_enumnot(anyenum); + +DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyset); +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset); diff --git a/contrib/dolphin/sql/test_mysql_operator.sql b/contrib/dolphin/sql/test_mysql_operator.sql index 554628038..79d6a5051 100644 --- a/contrib/dolphin/sql/test_mysql_operator.sql +++ b/contrib/dolphin/sql/test_mysql_operator.sql @@ -898,6 +898,86 @@ select !('23:59:59'::time); select !b'1001'; select 10!; +-- test for '~' +CREATE TABLE test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); + +insert into test_type_table values(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,b'1', b'111', true,'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023','1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a','a', 'a,c',json_object('a', 1, 'b', 2)); + +select +~(`int1`), +~(`uint1`), +~(`int2`), +~(`uint2`), +~(`int4`), +~(`uint4`), +~(`int8`), +~(`uint8`), +~(`float4`), +~(`float8`), +~(`numeric`), +~(`bit1`), +~(`bit64`), +~(`boolean`), +~(`date`), +~(`time`), +~(`time(4)`), +~(`datetime`), +~(`datetime(4)`), +~(`timestamp`), +~(`timestamp(4)`), +~(`year`), +~(`char`), +~(`varchar`), +~(`binary`), +~(`varbinary`), +~(`tinyblob`), +~(`blob`), +~(`mediumblob`), +~(`longblob`), +~(`text`), +~(`enum_t`), +~(`set_t`), +~(`json`) +from test_type_table; + +DROP TABLE test_type_table; + set dolphin.b_compatibility_mode = 0; select !10; select !!10; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 22a9431d8..72c359e1b 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -492,4 +492,102 @@ DROP FUNCTION IF EXISTS pg_catalog.year_to_bool(year); CREATE OR REPLACE FUNCTION pg_catalog.year_to_bool(year) RETURNS boolean LANGUAGE SQL IMMUTABLE STRICT as 'select cast(cast($1 as text) as boolean)'; -CREATE CAST (year AS boolean) WITH FUNCTION year_to_bool(year) AS IMPLICIT; \ No newline at end of file +CREATE CAST (year AS boolean) WITH FUNCTION year_to_bool(year) AS IMPLICIT; + +-- not operator +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int1not(int1) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_int1not(int1) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_int1not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = int1, procedure = dolphin_catalog.dolphin_int1not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int2not(int2) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_int2not(int2) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_int2not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = int2, procedure = dolphin_catalog.dolphin_int2not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int4not(int4) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_int4not(int4) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_int4not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = int4, procedure = dolphin_catalog.dolphin_int4not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int8not(int8) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_int8not(int8) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_int8not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = int8, procedure = dolphin_catalog.dolphin_int8not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint1not(uint1) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_uint1not(uint1) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_uint1not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = uint1, procedure = dolphin_catalog.dolphin_uint1not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint2not(uint2) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_uint2not(uint2) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_uint2not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = uint2, procedure = dolphin_catalog.dolphin_uint2not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint4not(uint4) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_uint4not(uint4) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_uint4not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = uint4, procedure = dolphin_catalog.dolphin_uint4not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint8not(uint8) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_uint8not(uint8) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_uint8not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = uint8, procedure = dolphin_catalog.dolphin_uint8not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_float4not(float4) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_float4not(float4) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_float4not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = float4, procedure = dolphin_catalog.dolphin_float4not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_float8not(float8) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_float8not(float8) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_float8not'; +CREATE OPERATOR dolphin_catalog.~(rightarg = float8, procedure = dolphin_catalog.dolphin_float8not); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_numericnot(numeric) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_numericnot(numeric) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_numericnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = numeric, procedure = dolphin_catalog.dolphin_numericnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_bitnot(bit) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_bitnot(bit) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_bitnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = bit, procedure = dolphin_catalog.dolphin_bitnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_boolnot(boolean) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_boolnot(boolean) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_boolnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = boolean, procedure = dolphin_catalog.dolphin_boolnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_datenot(date) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_datenot(date) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_datenot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = date, procedure = dolphin_catalog.dolphin_datenot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timenot(time) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_timenot(time) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_timenot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = time, procedure = dolphin_catalog.dolphin_timenot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timestampnot(timestamp without time zone) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_timestampnot(timestamp without time zone) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_timestampnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = timestamp without time zone, procedure = dolphin_catalog.dolphin_timestampnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timestamptznot(timestamp with time zone) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_timestamptznot(timestamp with time zone) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_timestamptznot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = timestamp with time zone, procedure = dolphin_catalog.dolphin_timestamptznot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_yearnot(year) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_yearnot(year) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_yearnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = year, procedure = dolphin_catalog.dolphin_yearnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_charnot(char) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_charnot(char) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_charnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = char, procedure = dolphin_catalog.dolphin_charnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_varcharnot(varchar) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_varcharnot(varchar) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_varcharnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = varchar, procedure = dolphin_catalog.dolphin_varcharnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_textnot(text) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_textnot(text) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_textnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = text, procedure = dolphin_catalog.dolphin_textnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_varlenanot(anyelement) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_varlenanot(anyelement) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_varlenanot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = anyelement, procedure = dolphin_catalog.dolphin_varlenanot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_enumnot(anyenum) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_enumnot(anyenum) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_enumnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = anyenum, procedure = dolphin_catalog.dolphin_enumnot); + +DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset) CASCADE; +CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_setnot(anyset) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_setnot'; +CREATE OPERATOR dolphin_catalog.~(rightarg = anyset, procedure = dolphin_catalog.dolphin_setnot); + -- Gitee From e396533848ab61c05945424cd495f31a841da9b3 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Wed, 10 Jan 2024 14:44:41 +0800 Subject: [PATCH 173/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h b/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h index c9bc801a3..3ae48480f 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h +++ b/contrib/spq_plugin/src/spq_optimizer/libspqopt/include/spqopt/engine/CHint.h @@ -225,8 +225,8 @@ public: MAX_UPDATE_DOP_NUM, MAX_SELECT_DOP_NUM, MAX_DELETE_DOP_NUM, - true, - true, + true, + true, SKEW_FACTOR /* skew_factor */ ); } -- Gitee From 1b572ba48763c0a3800837c96f22d32e773050ae Mon Sep 17 00:00:00 2001 From: Mijamind Date: Wed, 10 Jan 2024 16:20:24 +0800 Subject: [PATCH 174/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=A3=80=E8=A7=86?= =?UTF-8?q?=E6=84=8F=E8=A7=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/src/spq/spq_mutate.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/spq_plugin/src/spq/spq_mutate.cpp b/contrib/spq_plugin/src/spq/spq_mutate.cpp index 74aa88b56..1a006a55f 100644 --- a/contrib/spq_plugin/src/spq/spq_mutate.cpp +++ b/contrib/spq_plugin/src/spq/spq_mutate.cpp @@ -1034,6 +1034,7 @@ static void InitRemoteNodeDefinition(PlannedStmt* planstmt) planstmt->num_nodes = 0; return; } + errno_t rc; if ((planstmt->commandType == CMD_INSERT || planstmt->commandType == CMD_UPDATE || planstmt->commandType == CMD_DELETE) && planstmt->write_node_index >= 0 && planstmt->write_node_index < t_thrd.spq_ctx.num_nodes && IsA(planstmt->planTree, RemoteQuery) && ((RemoteQuery*)planstmt->planTree)->nodeCount == 1) { @@ -1041,13 +1042,15 @@ static void InitRemoteNodeDefinition(PlannedStmt* planstmt) int nodes_size = sizeof(NodeDefinition); planstmt->num_nodes = 1; planstmt->nodesDefinition = (NodeDefinition *) palloc0(nodes_size); - memcpy_s(planstmt->nodesDefinition, nodes_size, &t_thrd.spq_ctx.nodesDefinition[planstmt->write_node_index], + rc = memcpy_s(planstmt->nodesDefinition, nodes_size, &t_thrd.spq_ctx.nodesDefinition[planstmt->write_node_index], nodes_size); + securec_check_c(rc, "\0", "\0"); } else { int nodes_size = sizeof(NodeDefinition) * t_thrd.spq_ctx.num_nodes; planstmt->num_nodes = t_thrd.spq_ctx.num_nodes; planstmt->nodesDefinition = (NodeDefinition *) palloc0(nodes_size); - memcpy_s(planstmt->nodesDefinition, nodes_size, t_thrd.spq_ctx.nodesDefinition, nodes_size); + rc = memcpy_s(planstmt->nodesDefinition, nodes_size, t_thrd.spq_ctx.nodesDefinition, nodes_size); + securec_check_c(rc, "\0", "\0"); } } -- Gitee From c2abc6d263205a5ce335c72ef19d768f455a2d08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=98=99=E9=B8=A3=E5=81=A5?= Date: Wed, 10 Jan 2024 10:19:06 +0000 Subject: [PATCH 175/434] update contrib/spq_plugin/src/guc_spq.cpp. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: é˜™é¸£å¥ --- contrib/spq_plugin/src/guc_spq.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index eb9c75ad0..31c19bcb4 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1203,7 +1203,7 @@ static void InitSpqConfigureNamesBool() "Enable spq direct read without buffer", NULL, &u_sess->attr.attr_spq.spq_enable_direct_read, - false, + false, PGC_USERSET, 0, NULL, -- Gitee From 57c3774f88791e1d1721a76386abb44f2a74d2c9 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 10 Jan 2024 19:09:39 +0800 Subject: [PATCH 176/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Ddegrees=E5=87=BD=E6=95=B0=E4=B8=8D=E6=94=AF?= =?UTF-8?q?=E6=8C=81boolean=E3=80=81year=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=EF=BC=8C=E4=BD=86=E6=98=AFmysql=E6=98=AF=E6=94=AF?= =?UTF-8?q?=E6=8C=81=E7=9A=84=E9=97=AE=E9=A2=98.=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE=E5=A4=8Ddegre?= =?UTF-8?q?es=E5=87=BD=E6=95=B0=E4=B8=8D=E6=94=AF=E6=8C=81boolean=E3=80=81?= =?UTF-8?q?year=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=EF=BC=8C=E4=BD=86?= =?UTF-8?q?=E6=98=AFmysql=E6=98=AF=E6=94=AF=E6=8C=81=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90?= =?UTF-8?q?=E3=80=91:=20=E5=9B=A0=E4=B8=BA=E5=BD=93=E5=89=8Dboolean?= =?UTF-8?q?=E3=80=81year=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=E8=BD=ACd?= =?UTF-8?q?ouble=E7=9A=84cast=E9=83=BD=E4=B8=8D=E6=98=AF=E9=9A=90=E5=BC=8F?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2=EF=BC=8C=E5=87=BD=E6=95=B0degrees=E5=8F=88?= =?UTF-8?q?=E6=B2=A1=E5=A3=B0=E6=98=8E=E8=BF=99=E5=87=A0=E4=B8=AA=E7=B1=BB?= =?UTF-8?q?=E5=9E=8B=E7=9A=84=E5=8F=82=E6=95=B0=EF=BC=8C=E5=9B=A0=E6=AD=A4?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E3=80=82=20=E3=80=90=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=A3=B0=E6=98=8Eboolean?= =?UTF-8?q?=E3=80=81year=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=E7=9A=84d?= =?UTF-8?q?egrees=E5=8F=82=E6=95=B0=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94?= =?UTF-8?q?=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.co?= =?UTF-8?q?m/opengaussorg/dashboard=3Fissue=3DI8QQR7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/math_func.out | 125 ++++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_utils/adt/numeric.cpp | 25 ++-- .../rollback_script/dolphin--3.0--2.0.sql | 4 + .../dolphin/sql/builtin_funcs/math_func.sql | 65 +++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 6 + 6 files changed, 216 insertions(+), 11 deletions(-) create mode 100644 contrib/dolphin/expected/builtin_funcs/math_func.out create mode 100644 contrib/dolphin/sql/builtin_funcs/math_func.sql diff --git a/contrib/dolphin/expected/builtin_funcs/math_func.out b/contrib/dolphin/expected/builtin_funcs/math_func.out new file mode 100644 index 000000000..0f35a4cd7 --- /dev/null +++ b/contrib/dolphin/expected/builtin_funcs/math_func.out @@ -0,0 +1,125 @@ +create schema test_math_func; +set current_schema to 'test_math_func'; +set dolphin.b_compatibility_mode = on; +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, '2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', +'2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', 'a', 'a,c', json_object('a', 1, 'b', 2)); +-- degrees math function +select degrees(`int1`), degrees(`uint1`), degrees(`int2`), degrees(`uint2`), degrees(`int4`), degrees(`uint4`), degrees(`int8`), degrees(`uint8`), degrees(`float4`), degrees(`float8`), degrees(`numeric`),degrees(`bit1`), degrees(`bit64`), degrees(`boolean`), degrees(`date`), degrees(`time`), degrees(`time(4)`), degrees(`datetime`),degrees(`datetime(4)`), degrees(`timestamp`), degrees(`timestamp(4)`), degrees(`year`), degrees(`char`), degrees(`varchar`), degrees(`binary`), degrees(`varbinary`), degrees(`tinyblob`), degrees(`blob`), degrees(`mediumblob`), degrees(`longblob`), degrees(`text`), degrees(`enum_t`), degrees(`set_t`), degrees(`json`) from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a " +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: degrees +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: degrees + degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees | degrees +------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+----------------------+----------------------+----------------------+----------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+--------- + 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 401.070456591576 | 57.2957795130823 | 1159105365.18446 | 10946358.6759744 | 10946378.4773958 | 1.15910537613081e+15 | 1.15910537613084e+15 | 1.15910537613081e+15 | 1.15910537613084e+15 | 115909.361954966 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 57.2957795130823 | 286.478897565412 | 0 +(1 row) + +create table test_double_degrees(d1 double, d2 double, d3 double, d4 double, d5 double, d6 double, d7 double, d8 double, d9 double, d10 double, d11 double, d12 double, d13 double, d14 double, d15 double, d16 double, d17 double, d18 double, d19 double, d20 double, d21 double, d22 double, d23 double, d24 double, d25 double, d26 double, d27 double, d28 double, d29 double, d30 double, d31 double, d32 double, d33 double, d34 double); +insert ignore into test_double_degrees select degrees(`int1`), degrees(`uint1`), degrees(`int2`), degrees(`uint2`), degrees(`int4`), degrees(`uint4`), degrees(`int8`), degrees(`uint8`), degrees(`float4`), degrees(`float8`), degrees(`numeric`),degrees(`bit1`), degrees(`bit64`),degrees(`boolean`), degrees(`date`), degrees(`time`), degrees(`time(4)`), degrees(`datetime`),degrees(`datetime(4)`), degrees(`timestamp`), degrees(`timestamp(4)`), degrees(`year`), degrees(`char`), degrees(`varchar`), degrees(`binary`),degrees(`varbinary`), degrees(`tinyblob`), degrees(`blob`), degrees(`mediumblob`), degrees(`longblob`), degrees(`text`), degrees(`enum_t`), degrees(`set_t`), degrees(`json`) from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a " +CONTEXT: referenced column: d23 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d24 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d25 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d26 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d27 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d28 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d29 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d30 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d31 +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: d34 +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into test_double_degrees select degrees(`int1`), degrees(`uint1`), degrees(`int2`), degrees(`uint2`), degrees(`int4`), degrees(`uint4`), degrees(`int8`), degrees(`uint8`), degrees(`float4`), degrees(`float8`), degrees(`numeric`),degrees(`bit1`), degrees(`bit64`),degrees(`boolean`), degrees(`date`), degrees(`time`), degrees(`time(4)`), degrees(`datetime`),degrees(`datetime(4)`), degrees(`timestamp`), degrees(`timestamp(4)`), degrees(`year`), degrees(`char`), degrees(`varchar`), degrees(`binary`),degrees(`varbinary`), degrees(`tinyblob`), degrees(`blob`), degrees(`mediumblob`), degrees(`longblob`), degrees(`text`), degrees(`enum_t`), degrees(`set_t`), degrees(`json`) from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d23 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d24 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d25 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d26 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d27 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d28 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d29 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d30 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d31 +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: d34 +select * from test_double_degrees order by 1; + d1 | d2 | d3 | d4 | d5 | d6 | d7 | d8 | d9 | d10 | d11 | d12 | d13 | d14 | d15 | d16 | d17 | d18 | d19 | d20 | d21 | d22 | d23 | d24 | d25 | d26 | d27 | d28 | d29 | d30 | d31 | d32 | d33 | d34 +------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+----------------------+----------------------+----------------------+----------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+----- + 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 401.070456591576 | 57.2957795130823 | 1159105365.18446 | 10946358.6759744 | 10946378.4773958 | 1.15910537613081e+15 | 1.15910537613084e+15 | 1.15910537613081e+15 | 1.15910537613084e+15 | 115909.361954966 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 57.2957795130823 | 286.478897565412 | 0 + 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 401.070456591576 | 57.2957795130823 | 1159105365.18446 | 10946358.6759744 | 10946378.4773958 | 1.15910537613081e+15 | 1.15910537613084e+15 | 1.15910537613081e+15 | 1.15910537613084e+15 | 115909.361954966 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 57.2957795130823 | 286.478897565412 | 0 +(2 rows) + +drop table if exists test_double_degrees; +drop table if exists test_type_table; +drop schema test_math_func cascade; +reset current_schema; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index adb7b40b4..f10839412 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -36,7 +36,7 @@ test: test_enum_collation test: test_char_tail_blank test_char_tail_blank_2 test_char_tail_blank_3 -test: builtin_funcs/bin builtin_funcs/char builtin_funcs/char_length builtin_funcs/character_length builtin_funcs/conv builtin_funcs/convert builtin_funcs/crc32 builtin_funcs/db_b_format +test: builtin_funcs/bin builtin_funcs/char builtin_funcs/char_length builtin_funcs/character_length builtin_funcs/conv builtin_funcs/convert builtin_funcs/crc32 builtin_funcs/db_b_format builtin_funcs/math_func test: builtin_funcs/db_b_hex builtin_funcs/db_b_if builtin_funcs/elt builtin_funcs/field builtin_funcs/find_in_set builtin_funcs/soundex builtin_funcs/space builtin_funcs/make_set builtin_funcs/between builtin_funcs/not_between diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index 1590816fd..8a66c194c 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -9032,9 +9032,10 @@ Datum text_float4(PG_FUNCTION_ARGS) Datum txt = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(textout, txt)); - result = DirectFunctionCall1(float4in, CStringGetDatum(tmp)); + tmp = DatumGetCString(DirectFunctionCall1Coll(textout, InvalidOid, txt, fcinfo->can_ignore)); + + result = DirectFunctionCall1Coll(float4in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -9045,9 +9046,10 @@ Datum text_float8(PG_FUNCTION_ARGS) Datum txt = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(textout, txt)); - result = DirectFunctionCall1(float8in, CStringGetDatum(tmp)); + tmp = DatumGetCString(DirectFunctionCall1Coll(textout, InvalidOid, txt, fcinfo->can_ignore)); + + result = DirectFunctionCall1Coll(float8in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -9071,9 +9073,9 @@ Datum bpchar_float4(PG_FUNCTION_ARGS) Datum bpcharValue = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(bpcharout, bpcharValue)); + tmp = DatumGetCString(DirectFunctionCall1Coll(bpcharout, InvalidOid, bpcharValue, fcinfo->can_ignore)); - result = DirectFunctionCall1(float4in, CStringGetDatum(tmp)); + result = DirectFunctionCall1Coll(float4in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -9084,7 +9086,8 @@ Datum bpchar_float8(PG_FUNCTION_ARGS) Datum bpcharValue = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(bpcharout, bpcharValue)); + + tmp = DatumGetCString(DirectFunctionCall1Coll(bpcharout, InvalidOid, bpcharValue, fcinfo->can_ignore)); result = DirectFunctionCall1Coll(float8in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); @@ -9097,9 +9100,10 @@ Datum varchar_float4(PG_FUNCTION_ARGS) Datum varcharValue = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(varcharout, varcharValue)); - result = DirectFunctionCall1(float4in, CStringGetDatum(tmp)); + tmp = DatumGetCString(DirectFunctionCall1Coll(varcharout, InvalidOid, varcharValue, fcinfo->can_ignore)); + + result = DirectFunctionCall1Coll(float4in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -9110,7 +9114,8 @@ Datum varchar_float8(PG_FUNCTION_ARGS) Datum varcharValue = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(varcharout, varcharValue)); + + tmp = DatumGetCString(DirectFunctionCall1Coll(varcharout, InvalidOid, varcharValue, fcinfo->can_ignore)); result = DirectFunctionCall1Coll(float8in, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index e949df54d..6fce4f49c 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -353,3 +353,7 @@ DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_enumnot(anyenum); DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyset); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset); + +DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); +DROP FUNCTION IF EXISTS pg_catalog.degrees(year); +DROP FUNCTION IF EXISTS pg_catalog.degrees(json); diff --git a/contrib/dolphin/sql/builtin_funcs/math_func.sql b/contrib/dolphin/sql/builtin_funcs/math_func.sql new file mode 100644 index 000000000..0ea963d90 --- /dev/null +++ b/contrib/dolphin/sql/builtin_funcs/math_func.sql @@ -0,0 +1,65 @@ +create schema test_math_func; +set current_schema to 'test_math_func'; + +set dolphin.b_compatibility_mode = on; + +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); + +insert into test_type_table values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, '2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', +'2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', 'a', 'a,c', json_object('a', 1, 'b', 2)); + + +-- degrees math function +select degrees(`int1`), degrees(`uint1`), degrees(`int2`), degrees(`uint2`), degrees(`int4`), degrees(`uint4`), degrees(`int8`), degrees(`uint8`), degrees(`float4`), degrees(`float8`), degrees(`numeric`),degrees(`bit1`), degrees(`bit64`), degrees(`boolean`), degrees(`date`), degrees(`time`), degrees(`time(4)`), degrees(`datetime`),degrees(`datetime(4)`), degrees(`timestamp`), degrees(`timestamp(4)`), degrees(`year`), degrees(`char`), degrees(`varchar`), degrees(`binary`), degrees(`varbinary`), degrees(`tinyblob`), degrees(`blob`), degrees(`mediumblob`), degrees(`longblob`), degrees(`text`), degrees(`enum_t`), degrees(`set_t`), degrees(`json`) from test_type_table; + +create table test_double_degrees(d1 double, d2 double, d3 double, d4 double, d5 double, d6 double, d7 double, d8 double, d9 double, d10 double, d11 double, d12 double, d13 double, d14 double, d15 double, d16 double, d17 double, d18 double, d19 double, d20 double, d21 double, d22 double, d23 double, d24 double, d25 double, d26 double, d27 double, d28 double, d29 double, d30 double, d31 double, d32 double, d33 double, d34 double); + +insert ignore into test_double_degrees select degrees(`int1`), degrees(`uint1`), degrees(`int2`), degrees(`uint2`), degrees(`int4`), degrees(`uint4`), degrees(`int8`), degrees(`uint8`), degrees(`float4`), degrees(`float8`), degrees(`numeric`),degrees(`bit1`), degrees(`bit64`),degrees(`boolean`), degrees(`date`), degrees(`time`), degrees(`time(4)`), degrees(`datetime`),degrees(`datetime(4)`), degrees(`timestamp`), degrees(`timestamp(4)`), degrees(`year`), degrees(`char`), degrees(`varchar`), degrees(`binary`),degrees(`varbinary`), degrees(`tinyblob`), degrees(`blob`), degrees(`mediumblob`), degrees(`longblob`), degrees(`text`), degrees(`enum_t`), degrees(`set_t`), degrees(`json`) from test_type_table; + +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; + +insert into test_double_degrees select degrees(`int1`), degrees(`uint1`), degrees(`int2`), degrees(`uint2`), degrees(`int4`), degrees(`uint4`), degrees(`int8`), degrees(`uint8`), degrees(`float4`), degrees(`float8`), degrees(`numeric`),degrees(`bit1`), degrees(`bit64`),degrees(`boolean`), degrees(`date`), degrees(`time`), degrees(`time(4)`), degrees(`datetime`),degrees(`datetime(4)`), degrees(`timestamp`), degrees(`timestamp(4)`), degrees(`year`), degrees(`char`), degrees(`varchar`), degrees(`binary`),degrees(`varbinary`), degrees(`tinyblob`), degrees(`blob`), degrees(`mediumblob`), degrees(`longblob`), degrees(`text`), degrees(`enum_t`), degrees(`set_t`), degrees(`json`) from test_type_table; + +select * from test_double_degrees order by 1; + +drop table if exists test_double_degrees; +drop table if exists test_type_table; + +drop schema test_math_func cascade; +reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 72c359e1b..99dec510f 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -591,3 +591,9 @@ DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset) CASCADE; CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_setnot(anyset) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_setnot'; CREATE OPERATOR dolphin_catalog.~(rightarg = anyset, procedure = dolphin_catalog.dolphin_setnot); +DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); +DROP FUNCTION IF EXISTS pg_catalog.degrees(year); +DROP FUNCTION IF EXISTS pg_catalog.degrees(json); +CREATE OR REPLACE FUNCTION pg_catalog.degrees(boolean) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; +CREATE OR REPLACE FUNCTION pg_catalog.degrees(year) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; +CREATE OR REPLACE FUNCTION pg_catalog.degrees(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; -- Gitee From 4cf790a45a4182773aa0a26b794089df772e5797 Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Wed, 10 Jan 2024 19:37:39 +0800 Subject: [PATCH 177/434] =?UTF-8?q?=E4=BF=AE=E6=94=B9concat=E6=8B=BC?= =?UTF-8?q?=E6=8E=A5binary=E7=AD=89=E7=B1=BB=E5=9E=8B=E6=97=B6=E7=BB=93?= =?UTF-8?q?=E6=9E=9C=E4=B8=8D=E5=AF=B9=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/charset_gbk_b_db.out | 173 ++++++------- .../dolphin/expected/charset_utf8mb4_b_db.out | 240 +++++++++--------- contrib/dolphin/expected/test_binary.out | 33 ++- contrib/dolphin/plugin_parser/parse_func.cpp | 11 + contrib/dolphin/plugin_utils/adt/varlena.cpp | 15 ++ contrib/dolphin/sql/charset_gbk_b_db.sql | 44 ++-- contrib/dolphin/sql/charset_utf8mb4_b_db.sql | 60 ++--- contrib/dolphin/sql/test_binary.sql | 11 + 8 files changed, 329 insertions(+), 258 deletions(-) diff --git a/contrib/dolphin/expected/charset_gbk_b_db.out b/contrib/dolphin/expected/charset_gbk_b_db.out index cede06d51..2193adc77 100644 --- a/contrib/dolphin/expected/charset_gbk_b_db.out +++ b/contrib/dolphin/expected/charset_gbk_b_db.out @@ -662,10 +662,10 @@ ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" LINE 1: ...–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); @@ -686,10 +686,10 @@ SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); ¸ß˹DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB'); + concat +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR @@ -700,22 +700,22 @@ SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" LINE 1: ...30'高斯DB' , '高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | gbk_chinese_ci +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB'); + concat +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - \xe9ab98e696af4442高斯DB | gbk_chinese_ci +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - \xe9ab98e696af4442高斯DB | gbk_chinese_ci +SELECT CONCAT( _binary'高斯DB', '高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -- -- -- explicit & implicit @@ -785,10 +785,10 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result ¸ß˹DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_unicode_ci +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); @@ -809,10 +809,10 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_co ¸ß˹DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - ¸ß˹DB\xe9ab98e696af4442 | utf8mb4_bin +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); @@ -863,29 +863,29 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_co 高斯DB高斯DB | gbk_bin (1 row) -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442¸ß˹DB | utf8mb4_unicode_ci +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------+------------------ - \xe9ab98e696af4442高斯DB | gbk_chinese_ci +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - \xe9ab98e696af4442高斯DB高斯DB | gbk_chinese_ci +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); @@ -900,36 +900,36 @@ SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk ¸ß˹DB高斯DB高斯DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------------+-------------------- - ¸ß˹DB高斯DB\xe9ab98e696af4442 | gb18030_chinese_ci +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result; + result +------------------------------------------------ + \xb8dfcbb94442e9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" LINE 1: ...–¯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result; -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...–¯DB', _binary'高斯DB') result, pg_collation_for(result); - ^ +LINE 1: ...b18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result; + ^ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for ------------------------+-------------------- 高斯DB高斯DB¸ß˹DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - 高斯DB高斯DB\xe9ab98e696af4442 | gbk_chinese_ci +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) -- -- -- const compare CONCAT @@ -1203,10 +1203,10 @@ SELECT CONCAT('100', NULL) result, pg_collation_for(result); | gbk_chinese_ci (1 row) -SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); - result | pg_collation_for ---------+------------------ - | gbk_chinese_ci +SELECT CONCAT('100', NULL::varbinary(16)) result; + result +-------- + (1 row) SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); @@ -1233,10 +1233,10 @@ SELECT CONCAT(100, NULL) result, pg_collation_for(result); | gbk_chinese_ci (1 row) -SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); - result | pg_collation_for ---------+------------------ - | gbk_chinese_ci +SELECT CONCAT(100, NULL::varbinary(16)) result; + result +-------- + (1 row) SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); @@ -1245,10 +1245,10 @@ SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); | gbk_chinese_ci (1 row) -SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); - result | pg_collation_for ---------+------------------ - | gbk_chinese_ci +SELECT CONCAT(NULL, NULL::varbinary(16)) result; + result +-------- + (1 row) SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); @@ -1263,10 +1263,10 @@ SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); - result | pg_collation_for ---------+------------------ - | gbk_chinese_ci +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result; + result +-------- + (1 row) SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); @@ -1281,10 +1281,10 @@ SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); | gbk_chinese_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); - result | pg_collation_for ---------+------------------ - | gbk_chinese_ci +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result; + result +-------- + (1 row) SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); @@ -2188,14 +2188,17 @@ ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci re... ^ -- -- -- with binary & implicit collation -SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -ERROR: COLLATION "binary" is not valid for CHARACTER SET "GBK" -LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_c... - ^ -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -ERROR: COLLATION "utf8mb4_general_ci" is not valid for CHARACTER SET "GBK" +SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result FROM t_diff_charset_columns; -- return datatype still text + result +------------------------------------------ + \xe9ab98e696af4442e6a582e6a8bbe69f896462 +(1 row) + +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result FROM t_diff_charset_columns; -- return datatype still text +ERROR: COLLATION "utf8mb4_general_ci" is not valid for binary type LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci re... ^ +CONTEXT: referenced column: result -- -- test explicit collate on blob result SELECT CAST('DEADBEEF' AS blob) COLLATE utf8mb4_general_ci result; -- ERROR ERROR: COLLATION "utf8mb4_general_ci" is not valid for binary type diff --git a/contrib/dolphin/expected/charset_utf8mb4_b_db.out b/contrib/dolphin/expected/charset_utf8mb4_b_db.out index e601ea26b..52fd22fc1 100644 --- a/contrib/dolphin/expected/charset_utf8mb4_b_db.out +++ b/contrib/dolphin/expected/charset_utf8mb4_b_db.out @@ -1053,10 +1053,10 @@ ERROR: collation mismatch between collations "gbk_bin" and "gb18030_bin" LINE 1: ...æ–¯DB' COLLATE gb18030_bin) result, pg_collation_for(result); ^ -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); @@ -1077,10 +1077,10 @@ SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); 楂樻柉DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR @@ -1093,22 +1093,22 @@ SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); 楂樻柉DB高斯DB | utf8mb4_general_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_general_ci +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_general_ci +SELECT CONCAT( _binary'高斯DB', '高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -- -- -- explicit & implicit @@ -1178,10 +1178,10 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result 高斯DB高斯DB | utf8mb4_unicode_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); @@ -1202,10 +1202,10 @@ SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_co 高斯DB高斯DB | utf8mb4_bin (1 row) -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+------------------ - 高斯DB\xe9ab98e696af4442 | utf8mb4_bin +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); @@ -1256,29 +1256,29 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_co 楂樻柉DB楂樻柉DB | gbk_bin (1 row) -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_unicode_ci +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯DB | utf8mb4_general_ci +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------------+-------------------- - \xe9ab98e696af4442高斯DB高斯DB | utf8mb4_general_ci +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - \xe9ab98e696af4442楂樻柉DB楂樻柉DB | gbk_chinese_ci +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); @@ -1293,25 +1293,25 @@ SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk 高斯DB楂樻柉DB楂樻柉DB | gbk_chinese_ci (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for ---------------------------------+-------------------- - 高斯DB高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -----------------------------------+-------------------- - 高斯DB楂樻柉DB\xe9ab98e696af4442 | gb18030_chinese_ci +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result; + result +------------------------------------------------ + \xb8dfcbb94442e9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" LINE 1: ...高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); ^ -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result; -- ERROR ERROR: collation mismatch between collations "gb18030_chinese_ci" and "gbk_chinese_ci" -LINE 1: ...'高斯DB', _binary'高斯DB') result, pg_collation_for(result); +LINE 1: ...CAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result; ^ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); result | pg_collation_for @@ -1319,10 +1319,10 @@ SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _ut 楂樻柉DB楂樻柉DB高斯DB | gb18030_chinese_ci (1 row) -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); - result | pg_collation_for -------------------------------------+------------------ - 楂樻柉DB楂樻柉DB\xe9ab98e696af4442 | gbk_chinese_ci +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result; + result +---------------------------------------------------- + \xe9ab98e696af4442e9ab98e696af4442e9ab98e696af4442 (1 row) -- -- -- const compare CONCAT @@ -1596,10 +1596,10 @@ SELECT CONCAT('100', NULL) result, pg_collation_for(result); | utf8mb4_general_ci (1 row) -SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); - result | pg_collation_for ---------+-------------------- - | utf8mb4_general_ci +SELECT CONCAT('100', NULL::varbinary(16)) result; + result +-------- + (1 row) SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); @@ -1626,10 +1626,10 @@ SELECT CONCAT(100, NULL) result, pg_collation_for(result); | utf8mb4_general_ci (1 row) -SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); - result | pg_collation_for ---------+-------------------- - | utf8mb4_general_ci +SELECT CONCAT(100, NULL::varbinary(16)) result; + result +-------- + (1 row) SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); @@ -1638,10 +1638,10 @@ SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); | utf8mb4_general_ci (1 row) -SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); - result | pg_collation_for ---------+-------------------- - | utf8mb4_general_ci +SELECT CONCAT(NULL, NULL::varbinary(16)) result; + result +-------- + (1 row) SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); @@ -1656,10 +1656,10 @@ SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); - result | pg_collation_for ---------+-------------------- - | utf8mb4_general_ci +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result; + result +-------- + (1 row) SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); @@ -1674,10 +1674,10 @@ SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); | utf8mb4_general_ci (1 row) -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); - result | pg_collation_for ---------+-------------------- - | utf8mb4_general_ci +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result; + result +-------- + (1 row) SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); @@ -2196,10 +2196,10 @@ ERROR: collation mismatch between collations "gbk_chinese_ci" and "gb18030_chin LINE 1: ...esult, pg_collation_for(result) FROM t_diff_charset_columns; ^ -- -- -- with binary & implicit collation -SELECT CONCAT(futf8_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(futf8_bin, fbin) result FROM t_diff_charset_columns; + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 (1 row) SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2208,10 +2208,10 @@ SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_cha 高斯DBE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(futf8_uni, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------------+-------------------- - 高斯db\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(futf8_uni, fbin) result FROM t_diff_charset_columns; + result +------------------------------------ + \xe9ab98e696af6462e9ab98e696af4442 (1 row) SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2220,10 +2220,10 @@ SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_cha 高斯dbE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(fgbk_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ------------------------+-------------------- - ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(fgbk_bin, fbin) result FROM t_diff_charset_columns; + result +-------------------------------- + \xb8dfcbb94442e9ab98e696af4442 (1 row) SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2232,10 +2232,10 @@ SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_char ˹DBE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(fgb18030_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ------------------------+-------------------- - ˹DB\xe9ab98e696af4442 | utf8mb4_general_ci +SELECT CONCAT(fgb18030_bin, fbin) result FROM t_diff_charset_columns; + result +-------------------------------- + \xb8dfcbb94442e9ab98e696af4442 (1 row) SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; @@ -2244,10 +2244,10 @@ SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_ ˹DBE9AB98E696AF | utf8mb4_general_ci (1 row) -SELECT CONCAT(fbin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; - result | pg_collation_for ---------------------------------+-------------------- - \xe9ab98e696af4442E9AB98E696AF | utf8mb4_general_ci +SELECT CONCAT(fbin, fblob) result FROM t_diff_charset_columns; + result +-------------------------------------------- + \xe9ab98e696af4442453941423938453639364146 (1 row) -- -- concat column and @uservar @@ -3160,15 +3160,13 @@ LINE 1: SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci re... ^ -- -- -- with binary & implicit collation SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" -LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_c... +ERROR: collations are not supported by type "varbinary" +CONTEXT: referenced column: pg_collation_for +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result FROM t_diff_charset_columns; -- return datatype still text +ERROR: COLLATION "utf8mb4_general_ci" is not valid for binary type +LINE 1: SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci re... ^ -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text - result | pg_collation_for ---------------------------+-------------------- - \xe9ab98e696af4442高斯db | utf8mb4_general_ci -(1 row) - +CONTEXT: referenced column: result -- -- test explicit collate on blob result SELECT CAST('DEADBEEF' AS blob) COLLATE utf8mb4_general_ci result; -- ERROR ERROR: COLLATION "utf8mb4_general_ci" is not valid for binary type @@ -3730,24 +3728,28 @@ ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" LINE 1: SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci ... ^ -- -- -- for binary argument string function -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result, pg_collation_for(result); -- ERROR -ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" -LINE 1: SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "bi... +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result; -- ERROR + result +------------------------------------ + \xe9ab98e696af4442e9ab98e696af4442 +(1 row) + +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result; +ERROR: COLLATION "utf8mb4_unicode_ci" is not valid for binary type +LINE 1: SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf... ^ -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, pg_collation_for(result); - result | pg_collation_for ---------------------------+-------------------- - 高斯DB\xe9ab98e696af4442 | utf8mb4_unicode_ci +CONTEXT: referenced column: result +SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result FROM t_diff_charset_columns; -- ERROR + result +------------------------------------ + \xe9ab98e696af6462e9ab98e696af4442 (1 row) -SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -ERROR: COLLATION "binary" is not valid for CHARACTER SET "UTF8" -LINE 1: SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_c... - ^ -SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -ERROR: COLLATION "gbk_chinese_ci" is not valid for CHARACTER SET "UTF8" +SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result FROM t_diff_charset_columns; -- ERROR +ERROR: COLLATION "gbk_chinese_ci" is not valid for binary type LINE 1: SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result... ^ +CONTEXT: referenced column: result -- -- -- for convert function SELECT CONVERT(futf8_uni USING 'GBK') result, pg_collation_for(result) FROM t_diff_charset_columns; result | pg_collation_for diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index 71b415f52..6aabeb77f 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -881,12 +881,41 @@ select (-2075)::binary(30)::bigint; -2075 (1 row) +-- binary about concat +DROP TABLE IF EXISTS t1; +SET dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; +CREATE TABLE t1 (s1 binary(2), s2 varbinary(2)); +INSERT INTO t1 VALUES (0x4100, 0x4100); +SELECT LENGTH(concat('*', s1, '*', s2, '*')) FROM t1; + length +-------- + 7 +(1 row) + +SELECT HEX(concat('*', s1, '*', s2, '*')) FROM t1; + hex +---------------- + 2a41002a41002a +(1 row) + +SELECT HEX(concat('*', s1, '*', s2, '*')) FROM t1; + hex +---------------- + 2a41002a41002a +(1 row) + +SELECT HEX(s1), HEX(s2), HEX('*') FROM t1; + hex | hex | hex +------+------+----- + 4100 | 4100 | 2a +(1 row) + +DROP TABLE t1; drop table if exists binary_operator; reset dolphin.b_compatibility_mode; drop schema test_binary cascade; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table test_bytea -drop cascades to table t1 drop cascades to table test33 drop cascades to table test34 reset current_schema; diff --git a/contrib/dolphin/plugin_parser/parse_func.cpp b/contrib/dolphin/plugin_parser/parse_func.cpp index 11dcb96b9..1b9574b77 100644 --- a/contrib/dolphin/plugin_parser/parse_func.cpp +++ b/contrib/dolphin/plugin_parser/parse_func.cpp @@ -226,6 +226,17 @@ Node* ParseFuncOrColumn(ParseState* pstate, List* funcname, List* fargs, Node* l &refSynOid, &rettype_orig); name_string = NameListToString(funcname); + +#ifdef DOLPHIN + if (funcid == CONCATFUNCOID) { + for (int i = 0; i < nargs; i++) { + if (actual_arg_types[i] == BINARYOID || actual_arg_types[i] == VARBINARYOID) { + rettype = VARBINARYOID; + break; + } + } + } +#endif if (fdresult == FUNCDETAIL_COERCION) { /* * We interpreted it as a type coercion. coerce_type can handle these diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 8479957be..1169fe1f7 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -7185,8 +7185,23 @@ static text* concat_internal(const char* sepstr, int seplen, int argidx, Functio if (!OidIsValid(valtype)) ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_DATATYPE), errmsg("could not determine data type of concat() input"))); +#ifdef DOLPHIN + if (valtype == BINARYOID || valtype == VARBINARYOID || valtype == TEXTOID) { + bytea* bin = PG_GETARG_BYTEA_PP(i); + char* data = VARDATA_ANY(bin); + int len = VARSIZE_ANY_EXHDR(bin); + /* cannot use appendStringInfoString because of the '\0' */ + for (int i = 0; i < len; i++) { + appendStringInfoChar(&str, data[i]); + } + } else { + getTypeOutputInfo(valtype, &typOutput, &typIsVarlena); + appendStringInfoString(&str, OidOutputFunctionCall(typOutput, value)); + } +#else getTypeOutputInfo(valtype, &typOutput, &typIsVarlena); appendStringInfoString(&str, OidOutputFunctionCall(typOutput, value)); +#endif } else if (PG_ARGISNULL(i) && u_sess->attr.attr_sql.sql_compatibility == B_FORMAT && !is_concat_ws) { pfree_ext(str.data); PG_RETURN_NULL(); diff --git a/contrib/dolphin/sql/charset_gbk_b_db.sql b/contrib/dolphin/sql/charset_gbk_b_db.sql index bb259d7ed..61374e9c1 100644 --- a/contrib/dolphin/sql/charset_gbk_b_db.sql +++ b/contrib/dolphin/sql/charset_gbk_b_db.sql @@ -145,16 +145,16 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result; SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB'); SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB'); +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result; +SELECT CONCAT( _binary'高斯DB', '高斯DB') result; -- -- -- explicit & implicit SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.gbk") result, pg_collation_for(result); @@ -167,11 +167,11 @@ SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result; SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result; SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); @@ -180,20 +180,20 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, pg_col SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result; +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result; -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result; +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result; SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result; +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result; SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result; -- ERROR SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result; -- -- -- const compare CONCAT SELECT _utf8mb4'楂樻柉DB' = CONCAT(_gbk'高斯DB'); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci); @@ -247,20 +247,20 @@ SELECT CONCAT(NULL, CONCAT(_utf8mb4'高斯DB')) result, pg_collation_for(result) SELECT CONCAT('100', 200) result, pg_collation_for(result); SELECT CONCAT('100', date'2021-01-01') result, pg_collation_for(result); SELECT CONCAT('100', NULL) result, pg_collation_for(result); -SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT('100', NULL::varbinary(16)) result; SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); SELECT CONCAT(100, 200) result, pg_collation_for(result); SELECT CONCAT(100, date'2021-01-01') result, pg_collation_for(result); SELECT CONCAT(100, NULL) result, pg_collation_for(result); -SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(100, NULL::varbinary(16)) result; SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); -SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(NULL, NULL::varbinary(16)) result; SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result; SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result; SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); -- -- 中文 with column charset @@ -518,8 +518,8 @@ SELECT CONCAT(futf8_bin, futf8_uni) COLLATE gbk_chinese_ci result, pg_collation_ SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- with binary & implicit collation -SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result FROM t_diff_charset_columns; -- return datatype still text -- -- test explicit collate on blob result SELECT CAST('DEADBEEF' AS blob) COLLATE utf8mb4_general_ci result; -- ERROR diff --git a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql index ae721ba94..b85ff72c3 100644 --- a/contrib/dolphin/sql/charset_utf8mb4_b_db.sql +++ b/contrib/dolphin/sql/charset_utf8mb4_b_db.sql @@ -274,16 +274,16 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB' COLLATE SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); -- ERROR SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); -- ERROR -- -- -- diff charset & implicit collation -SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' , _binary'高斯DB') result; SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gbk'高斯DB' , _binary'高斯DB') result; SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB') result, pg_collation_for(result); -- ERROR SELECT CONCAT(_gb18030'高斯DB' , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result, pg_collation_for(result); -SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -SELECT CONCAT( _binary'高斯DB', '高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB' , _binary'高斯DB') result; +SELECT CONCAT( _binary'高斯DB', _utf8mb4'高斯DB') result; +SELECT CONCAT( _binary'高斯DB', '高斯DB') result; -- -- -- explicit & implicit SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "C") result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , '高斯DB' COLLATE "zh_CN.utf8") result, pg_collation_for(result); @@ -296,11 +296,11 @@ SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci SELECT CONCAT(_utf8mb4'楂樻柉DB' , _gb18030'高斯db' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_bin) result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _binary'高斯DB') result; SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , '高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _binary'高斯DB') result; SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB' COLLATE utf8mb4_bin , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' , _gb18030'高斯DB' COLLATE gb18030_chinese_ci) result, pg_collation_for(result); @@ -309,20 +309,20 @@ SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , '高斯DB') result, pg_col SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_chinese_ci , _gb18030'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , '高斯DB') result, pg_collation_for(result); SELECT CONCAT(_gbk'高斯DB' COLLATE gbk_bin , _gb18030'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result, pg_collation_for(result); -SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _utf8mb4'高斯DB' COLLATE utf8mb4_unicode_ci) result; +SELECT CONCAT(_binary'高斯DB' COLLATE 'binary', _gbk'高斯DB') result; -- -- -- concat 3 args -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result; +SELECT CONCAT(_binary'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result; SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB') result, pg_collation_for(result); SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci) result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB', _binary'高斯DB') result; +SELECT CONCAT(_utf8mb4'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci, _binary'高斯DB') result; SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -- ERROR -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result, pg_collation_for(result); -- ERROR +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB', _binary'高斯DB') result; -- ERROR SELECT CONCAT(_gb18030'高斯DB' COLLATE gb18030_chinese_ci, _gbk'高斯DB', _utf8mb4'高斯DB') result, pg_collation_for(result); -SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result, pg_collation_for(result); +SELECT CONCAT(_gb18030'高斯DB', _gbk'高斯DB' COLLATE gbk_chinese_ci, _binary'高斯DB') result; -- -- -- const compare CONCAT SELECT _utf8mb4'楂樻柉DB' = CONCAT(_gbk'高斯DB'); SELECT _utf8mb4'楂樻柉DB楂樻柉DB' = CONCAT(_gbk'高斯DB', _gb18030'高斯DB' COLLATE gb18030_chinese_ci); @@ -376,20 +376,20 @@ SELECT CONCAT(NULL, CONCAT(_gbk'高斯DB')) result, pg_collation_for(result); SELECT CONCAT('100', 200) result, pg_collation_for(result); SELECT CONCAT('100', date'2021-01-01') result, pg_collation_for(result); SELECT CONCAT('100', NULL) result, pg_collation_for(result); -SELECT CONCAT('100', NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT('100', NULL::varbinary(16)) result; SELECT CONCAT('100', NULL::text) result, pg_collation_for(result); SELECT CONCAT(100, 200) result, pg_collation_for(result); SELECT CONCAT(100, date'2021-01-01') result, pg_collation_for(result); SELECT CONCAT(100, NULL) result, pg_collation_for(result); -SELECT CONCAT(100, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(100, NULL::varbinary(16)) result; SELECT CONCAT(100, NULL::text) result, pg_collation_for(result); -SELECT CONCAT(NULL, NULL::varbinary(16)) result, pg_collation_for(result); +SELECT CONCAT(NULL, NULL::varbinary(16)) result; SELECT CONCAT(NULL, NULL::text) result, pg_collation_for(result); SELECT CONCAT(CONCAT(100, NULL), '100') result, pg_collation_for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), '100') result; SELECT CONCAT(CONCAT(100, NULL::text), '100') result, pg_collation_for(result); SELECT CONCAT(CONCAT(100, NULL), 100) result, pg_collation_for(result); -SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result, pg_collation_for(result); +SELECT CONCAT(CONCAT(100, NULL::varbinary(16)), 100) result; SELECT CONCAT(CONCAT(100, NULL::text), 100) result, pg_collation_for(result); -- -- 中文 with column charset @@ -535,15 +535,15 @@ SELECT CONCAT(fgbk_bin, fgb18030_bin) result, pg_collation_for(result) FROM t_di SELECT CONCAT(fgbk_bin, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR SELECT CONCAT(fgbk_chi, fgb18030_chi) result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- with binary & implicit collation -SELECT CONCAT(futf8_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_bin, fbin) result FROM t_diff_charset_columns; SELECT CONCAT(futf8_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(futf8_uni, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(futf8_uni, fbin) result FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgbk_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgbk_bin, fbin) result FROM t_diff_charset_columns; SELECT CONCAT(fgbk_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fgb18030_bin, fbin) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fgb18030_bin, fbin) result FROM t_diff_charset_columns; SELECT CONCAT(fgb18030_bin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; -SELECT CONCAT(fbin, fblob) result, pg_collation_for(result) FROM t_diff_charset_columns; +SELECT CONCAT(fbin, fblob) result FROM t_diff_charset_columns; -- -- concat column and @uservar set enable_set_variable_b_format=on; @@ -880,7 +880,7 @@ SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE utf8mb4_general_ci result, pg_collati SELECT CONCAT(futf8_bin, fgbk_bin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- with binary & implicit collation SELECT CONCAT(fbin, futf8_uni) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text -SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- return datatype still text +SELECT CONCAT(fbin, futf8_uni) COLLATE utf8mb4_general_ci result FROM t_diff_charset_columns; -- return datatype still text -- -- test explicit collate on blob result SELECT CAST('DEADBEEF' AS blob) COLLATE utf8mb4_general_ci result; -- ERROR @@ -1048,10 +1048,10 @@ SELECT CONCAT(_utf8mb4'高斯DB' , _gbk'高斯DB') COLLATE "binary" result, pg_c SELECT CONCAT(futf8_uni , futf8_gen) COLLATE utf8mb4_bin result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONCAT(futf8_uni , futf8_gen) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -- -- -- for binary argument string function -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result, pg_collation_for(result); -- ERROR -SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result, pg_collation_for(result); -SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR -SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result, pg_collation_for(result) FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE "binary" result; -- ERROR +SELECT CONCAT(_utf8mb4'高斯DB', _binary'高斯DB') COLLATE utf8mb4_unicode_ci result; +SELECT CONCAT(futf8_uni, fbin) COLLATE "binary" result FROM t_diff_charset_columns; -- ERROR +SELECT CONCAT(futf8_uni, fbin) COLLATE gbk_chinese_ci result FROM t_diff_charset_columns; -- ERROR -- -- -- for convert function SELECT CONVERT(futf8_uni USING 'GBK') result, pg_collation_for(result) FROM t_diff_charset_columns; SELECT CONVERT(futf8_uni USING 'GBK') COLLATE gbk_bin result FROM t_diff_charset_columns; diff --git a/contrib/dolphin/sql/test_binary.sql b/contrib/dolphin/sql/test_binary.sql index 602a0681f..113ffd218 100644 --- a/contrib/dolphin/sql/test_binary.sql +++ b/contrib/dolphin/sql/test_binary.sql @@ -249,6 +249,17 @@ select c1 < c7, c1 > c7, c1 <= c7, c1 >= c7 from binary_operator; select (20220101)::binary(30)::bigint; select (-2075)::binary(30)::bigint; +-- binary about concat +DROP TABLE IF EXISTS t1; +SET dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; +CREATE TABLE t1 (s1 binary(2), s2 varbinary(2)); +INSERT INTO t1 VALUES (0x4100, 0x4100); +SELECT LENGTH(concat('*', s1, '*', s2, '*')) FROM t1; +SELECT HEX(concat('*', s1, '*', s2, '*')) FROM t1; +SELECT HEX(concat('*', s1, '*', s2, '*')) FROM t1; +SELECT HEX(s1), HEX(s2), HEX('*') FROM t1; +DROP TABLE t1; + drop table if exists binary_operator; reset dolphin.b_compatibility_mode; -- Gitee From 94b7ffc42e0c256d2de6d354955c2df35a77bf39 Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Wed, 10 Jan 2024 20:02:27 +0800 Subject: [PATCH 178/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dvarbinary=E5=9C=A8cop?= =?UTF-8?q?y=E5=91=BD=E4=BB=A4=E4=B8=AD=E6=8F=92=E5=85=A5=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/input/binary_export_test.source | 4 ++++ .../dolphin/output/binary_export_test.source | 22 ++++++++++++++++--- contrib/dolphin/plugin_utils/adt/varlena.cpp | 12 +++++----- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/contrib/dolphin/input/binary_export_test.source b/contrib/dolphin/input/binary_export_test.source index b23b13a51..62ac1e3b9 100644 --- a/contrib/dolphin/input/binary_export_test.source +++ b/contrib/dolphin/input/binary_export_test.source @@ -5,6 +5,10 @@ CREATE DATABASE b_target dbcompatibility 'B'; set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; create table t1 (a binary(255), b varbinary(255)); insert into t1 values +('123', '123'), +('234', '234'), +('345', '345'), +('456', '456'), (0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE, 0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE); select * from t1; diff --git a/contrib/dolphin/output/binary_export_test.source b/contrib/dolphin/output/binary_export_test.source index bc105c384..b1bddded5 100644 --- a/contrib/dolphin/output/binary_export_test.source +++ b/contrib/dolphin/output/binary_export_test.source @@ -5,13 +5,21 @@ CREATE DATABASE b_target dbcompatibility 'B'; set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; create table t1 (a binary(255), b varbinary(255)); insert into t1 values +('123', '123'), +('234', '234'), +('345', '345'), +('456', '456'), (0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE, 0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFE); select * from t1; a | b ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x313233000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x313233 + \x323334000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x323334 + \x333435000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x333435 + \x343536000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x343536 \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe | \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe -(1 row) +(5 rows) -- test about copy copy t1 to '@abs_builddir@/data/binary.sql'; @@ -20,8 +28,12 @@ copy t2 from '@abs_builddir@/data/binary.sql'; select * from t2; a | b ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x313233000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x313233 + \x323334000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x323334 + \x333435000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x333435 + \x343536000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x343536 \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe | \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe -(1 row) +(5 rows) -- test about dump \! @abs_bindir@/gs_dump b_source -p @portstring@ -f @abs_bindir@/binary_dump.tar -F t >/dev/null 2>&1; echo $? @@ -32,8 +44,12 @@ select * from t2; select * from t1; a | b ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + \x313233000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x313233 + \x323334000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x323334 + \x333435000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x333435 + \x343536000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 | \x343536 \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe | \x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfe -(1 row) +(5 rows) \c contrib_regression drop database b_source; diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 8479957be..b19c8b6be 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -694,23 +694,23 @@ Datum dolphin_binaryin(PG_FUNCTION_ARGS) int bc; int cl; bytea* result = NULL; + int binary_length = 0; /* Recognize hex input */ if (inputText[0] == '\\' && inputText[1] == 'x') { size_t len = strlen(inputText); if (atttypmod < VARHDRSZ) { - bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */ - result = (bytea*)palloc(bc); + binary_length = BINARY_LEN(len) + VARHDRSZ; /* maximum possible length */ } else { if (BINARY_LEN(len) > (size_t)(atttypmod - VARHDRSZ)) { ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("value too long for type binary(%d)", atttypmod - VARHDRSZ))); } - result = (bytea*)palloc0(atttypmod); /* palloc0, pad with zero */ + binary_length = fcinfo->flinfo->fn_rettype == BINARYOID ? atttypmod : BINARY_LEN(len) + VARHDRSZ; } - - bc = hex_decode(inputText + 2, len - 2, VARDATA(result)); - SET_VARSIZE(result, atttypmod < VARHDRSZ ? bc + VARHDRSZ : atttypmod); + result = (bytea*)palloc0(binary_length); + (void)hex_decode(inputText + 2, len - 2, VARDATA(result)); + SET_VARSIZE(result, binary_length); PG_RETURN_BYTEA_P(result); } -- Gitee From 1b6e91f395182fb98303011c9af3470142db4952 Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Thu, 11 Jan 2024 15:55:55 +0800 Subject: [PATCH 179/434] =?UTF-8?q?=E7=A7=BB=E9=99=A4upsert=E5=90=8E?= =?UTF-8?q?=E6=94=AF=E6=8C=81=E5=88=AB=E5=90=8D=E5=AE=9E=E7=8E=B0=EF=BC=8C?= =?UTF-8?q?=E8=A1=A8=E7=8E=B0=E4=B8=BA5.7=E7=9A=84=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E6=96=B9=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/kwlist.out | 7 +- contrib/dolphin/plugin_parser/gram.y | 97 ++++++---------------------- contrib/dolphin/sql/kwlist.sql | 3 +- 3 files changed, 21 insertions(+), 86 deletions(-) diff --git a/contrib/dolphin/expected/kwlist.out b/contrib/dolphin/expected/kwlist.out index 7f9ec1402..6c5293757 100644 --- a/contrib/dolphin/expected/kwlist.out +++ b/contrib/dolphin/expected/kwlist.out @@ -384,12 +384,7 @@ select excluded from excluded; (1 row) insert into excluded values (1) on duplicate key update excluded = excluded.excluded; -insert into excluded values (1) as excluded on duplicate key update excluded = excluded.excluded; -- ERROR -ERROR: table reference "excluded" is ambiguous -LINE 1: ...1) as excluded on duplicate key update excluded = excluded.e... - ^ -CONTEXT: referenced column: excluded -insert into excluded values (1) as t on duplicate key update excluded = excluded.excluded; +insert into excluded select 1 on duplicate key update excluded = excluded + 1; -- name test CREATE TABLE x (id int); CREATE FUNCTION fn_x_before () RETURNS TRIGGER AS ' diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 2cf1ce0e7..565c4f296 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -678,7 +678,7 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul /* */ %type select_no_parens select_no_parens_without_withclause select_with_parens select_clause - simple_select values_clause insert_empty_values insert_mysql_rest_selectStmt + simple_select values_clause insert_empty_values %type alter_column_default opclass_item opclass_drop alter_using AutoIncrementValue %type add_drop opt_asc_desc opt_nulls_order con_asc_desc @@ -742,7 +742,6 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul %type TriggerEvents TriggerOneEvent %type TriggerFuncArg %type TriggerWhen -%type opt_values_reference %type event_trigger_when_list event_trigger_value_list %type event_trigger_when_item %type enable_trigger @@ -891,7 +890,7 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul %type SeqOptElem /* INSERT */ -%type insert_rest insert_mysql_rest insert_rest_without_select insert_mysql_rest_normal insert_mysql_rest_upsert insert_mysql_rest_ignore +%type insert_rest insert_mysql_rest %type upsert_clause %type merge_insert merge_update @@ -28330,7 +28329,7 @@ update_delete_partition_clause: PARTITION '(' name ')' * *****************************************************************************/ -InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest_ignore returning_clause +InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest returning_clause { $7->relation = $6; $7->returningList = $8; @@ -28391,9 +28390,9 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ } } - | opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest_upsert opt_values_reference upsert_clause returning_clause + | opt_with_clause INSERT hint_string opt_ignore into_empty insert_target insert_mysql_rest upsert_clause returning_clause { - if ($10 != NIL) { + if ($9 != NIL) { const char* message = "RETURNING clause is not yet supported whithin INSERT ON DUPLICATE KEY UPDATE statement."; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); ereport(errstate, @@ -28441,17 +28440,17 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ /* for UPSERT, keep the INSERT statement as well */ $7->relation = $6; - $7->returningList = $10; + $7->returningList = $9; $7->isReplace = false; $7->withClause = $1; $7->hasIgnore = $4; #ifdef ENABLE_MULTIPLE_NODES if (t_thrd.proc->workingVersionNum >= UPSERT_ROW_STORE_VERSION_NUM) { UpsertClause *uc = makeNode(UpsertClause); - if ($9 == NULL) + if ($8 == NULL) uc->targetList = NIL; else - uc->targetList = ((MergeWhenClause *)$9)->targetList; + uc->targetList = ((MergeWhenClause *)$8)->targetList; $7->upsertClause = uc; } #endif @@ -28475,8 +28474,8 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ n->values = NULL; m->mergeWhenClauses = list_make1((Node *) n); - if ($9 != NULL) - m->mergeWhenClauses = list_concat(list_make1($9), m->mergeWhenClauses); + if ($8 != NULL) + m->mergeWhenClauses = list_concat(list_make1($8), m->mergeWhenClauses); m->hintState = create_hintstate($3); GetSessionContext()->upSertAliasName = NULL; @@ -28484,9 +28483,9 @@ InsertStmt: opt_with_clause INSERT hint_string opt_ignore into_empty insert_targ $$ = (Node *)m; } else { $7->relation = $6; - $7->returningList = $10; + $7->returningList = $9; $7->withClause = $1; - $7->upsertClause = (UpsertClause *)$9; + $7->upsertClause = (UpsertClause *)$8; $7->upsertClause->aliasName = GetSessionContext()->upSertAliasName; $7->isReplace = false; $7->hintState = create_hintstate($3); @@ -28549,37 +28548,10 @@ insert_target: } ; -opt_values_reference: - /* EMPTY */ - { - GetSessionContext()->upSertAliasName = makeAlias("delay", NIL); - $$ =(Node *)NULL; - } - | AS DolphinColId '(' insert_column_list ')' - { - Alias *a2 = makeAlias(GetDolphinObjName($2->str, $2->is_quoted), $4); - GetSessionContext()->upSertAliasName = a2; - $$ = (Node *) a2; - } - | AS DolphinColId - { - Alias *a2 = makeAlias(GetDolphinObjName($2->str, $2->is_quoted), NIL); - GetSessionContext()->upSertAliasName = a2; - $$ = (Node *) a2; - } - -insert_mysql_rest_upsert: - insert_mysql_rest_normal - { $$ = $1; } - | insert_mysql_rest - { $$ = $1; } -insert_mysql_rest_ignore: +insert_mysql_rest: insert_rest { $$ = $1; } - | insert_mysql_rest - { $$ = $1; } -insert_mysql_rest: - SET insert_set_list + | SET insert_set_list { $$ = makeNode(InsertStmt); SelectStmt *n = makeNode(SelectStmt); @@ -28597,35 +28569,6 @@ insert_mysql_rest: } ; -insert_mysql_rest_selectStmt: select_with_parens { $$ = (Node*)$1; } - | values_clause { $$ = $1; } - -insert_mysql_rest_normal: insert_mysql_rest_selectStmt - { - $$ = makeNode(InsertStmt); - $$->cols = NIL; - $$->selectStmt = (Node*)$1; - $$->isRewritten = false; - } - | '(' insert_column_list ')' insert_mysql_rest_selectStmt - { - $$ = makeNode(InsertStmt); - $$->cols = $2; - $$->selectStmt = (Node*)$4; - $$->isRewritten = false; - } - | '(' ')' insert_mysql_rest_selectStmt - { - $$ = makeNode(InsertStmt); - $$->cols = NIL; - $$->selectStmt = (Node*)$3; - $$->isRewritten = false; - } - | insert_rest_without_select - { - $$ = $1; - } - insert_rest: SelectStmt { @@ -28648,12 +28591,7 @@ insert_rest: $$->selectStmt = $3; $$->isRewritten = false; } - | insert_rest_without_select - { - $$ = $1; - } -insert_rest_without_select: - DEFAULT VALUES + | DEFAULT VALUES { $$ = makeNode(InsertStmt); $$->cols = NIL; @@ -28800,7 +28738,10 @@ upsert_clause: ; UPSERT: - ON DUPLICATE KEY UPDATE { GetSessionContext()->isUpsert = true; } + ON DUPLICATE KEY UPDATE { + GetSessionContext()->isUpsert = true; + GetSessionContext()->upSertAliasName = GetSessionContext()->upSertAliasName = makeAlias("delay", NIL); + } ; /***************************************************************************** diff --git a/contrib/dolphin/sql/kwlist.sql b/contrib/dolphin/sql/kwlist.sql index ce93f5a59..57ddefabc 100644 --- a/contrib/dolphin/sql/kwlist.sql +++ b/contrib/dolphin/sql/kwlist.sql @@ -185,8 +185,7 @@ create table excluded(excluded int); insert into excluded values(1); select excluded from excluded; insert into excluded values (1) on duplicate key update excluded = excluded.excluded; -insert into excluded values (1) as excluded on duplicate key update excluded = excluded.excluded; -- ERROR -insert into excluded values (1) as t on duplicate key update excluded = excluded.excluded; +insert into excluded select 1 on duplicate key update excluded = excluded + 1; -- name test CREATE TABLE x (id int); -- Gitee From 59206341da5c26d5efbaf7c45a772bd921dbe8c0 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Thu, 11 Jan 2024 16:47:19 +0800 Subject: [PATCH 180/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8Dexp=E5=87=BD=E6=95=B0=E4=B8=8D=E6=94=AF?= =?UTF-8?q?=E6=8C=81year=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=E4=BB=A5?= =?UTF-8?q?=E5=8F=8Aexp(-1000)=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E3=80=91:=20=E4=BF=AE=E5=A4=8Dexp=E5=87=BD=E6=95=B0=E4=B8=8D?= =?UTF-8?q?=E6=94=AF=E6=8C=81year=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E4=BB=A5=E5=8F=8Aexp(-1000)=E6=8A=A5=E9=94=99=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98=E3=80=82=20=E3=80=90=E6=A0=B9=E5=9B=A0?= =?UTF-8?q?=E5=88=86=E6=9E=90=E3=80=91:=20=E5=9B=A0=E4=B8=BA=E5=BD=93?= =?UTF-8?q?=E5=89=8Dyear=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=E8=BD=ACn?= =?UTF-8?q?umeric=E7=9A=84cast=E9=83=BD=E4=B8=8D=E6=98=AF=E9=9A=90?= =?UTF-8?q?=E5=BC=8F=E8=BD=AC=E6=8D=A2=EF=BC=8C=E5=87=BD=E6=95=B0exp?= =?UTF-8?q?=E5=8F=88=E6=B2=A1=E5=A3=B0=E6=98=8E=E8=BF=99=E5=87=A0=E4=B8=AA?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E7=9A=84=E5=8F=82=E6=95=B0=EF=BC=8C=E5=9B=A0?= =?UTF-8?q?=E6=AD=A4=E6=8A=A5=E9=94=99=E3=80=82=20=E3=80=90=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=A3=B0=E6=98=8Eyear?= =?UTF-8?q?=E4=BB=A5=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=E7=9A=84exp=E5=8F=82?= =?UTF-8?q?=E6=95=B0=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80?= =?UTF-8?q?=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengau?= =?UTF-8?q?ssorg/dashboard=3Fissue=3DI8QRN6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/math_func.out | 98 +++++++++++++++++++ contrib/dolphin/plugin_utils/adt/float.cpp | 9 +- contrib/dolphin/plugin_utils/adt/numeric.cpp | 23 +++-- contrib/dolphin/plugin_utils/adt/varlena.cpp | 3 +- .../rollback_script/dolphin--3.0--2.0.sql | 2 + .../dolphin/sql/builtin_funcs/math_func.sql | 23 +++++ .../upgrade_script/dolphin--2.0--3.0.sql | 4 + 7 files changed, 151 insertions(+), 11 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/math_func.out b/contrib/dolphin/expected/builtin_funcs/math_func.out index 0f35a4cd7..6627cda06 100644 --- a/contrib/dolphin/expected/builtin_funcs/math_func.out +++ b/contrib/dolphin/expected/builtin_funcs/math_func.out @@ -119,6 +119,104 @@ select * from test_double_degrees order by 1; 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 57.2957795130823 | 401.070456591576 | 57.2957795130823 | 1159105365.18446 | 10946358.6759744 | 10946378.4773958 | 1.15910537613081e+15 | 1.15910537613084e+15 | 1.15910537613081e+15 | 1.15910537613084e+15 | 115909.361954966 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 70.4738088010913 | 57.2957795130823 | 286.478897565412 | 0 (2 rows) +-- exp math function +reset dolphin.sql_mode; +select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`),exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`date`), exp(`time`), exp(`time(4)`), exp(`datetime`),exp(`datetime(4)`), exp(`timestamp`), exp(`timestamp(4)`), exp(`year`), exp(`char`), exp(`varchar`), exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; +ERROR: value overflows numeric format +CONTEXT: referenced column: exp +select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`), exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`char`), exp(`varchar`),exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a " +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: exp + exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp | exp +------------------+--------------------+------------------+--------------------+------------------+--------------------+------------------+--------------------+------------------+------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+------------------+------------------+--------------------+--------------------+--------------------+-------------------- + 2.71828182845905 | 2.7182818284590452 | 2.71828182845905 | 2.7182818284590452 | 2.71828182845905 | 2.7182818284590452 | 2.71828182845905 | 2.7182818284590452 | 2.71828182845905 | 2.71828182845905 | 2.7182818284590452 | 2.7182818284590452 | 1096.6331584284586 | 2.7182818284590452 | 3.4212295362896736 | 3.4212295362896736 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.4212295362896736 | 2.7182818284590452 | 148.41315910257660 | 1.0000000000000000 +(1 row) + +create table test_double_exp(d1 double, d2 double, d3 double, d4 double, d5 double, d6 double, d7 double, d8 double, d9 double, d10 double, d11 double, d12 double, d13 double, d14 double, d15 double, d16 double, d17 double, d18 double, d19 double, d20 double, d21 double, d22 double, d23 double, d24 double, d25 double, d26 double); +insert ignore into test_double_exp select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`), exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`char`), exp(`varchar`),exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a " +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: exp +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into test_double_exp select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`), exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`char`), exp(`varchar`),exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: exp +WARNING: invalid input syntax for type numeric: "{"a": 1, "b": 2}" +CONTEXT: referenced column: exp +select * from test_double_exp order by 1; + d1 | d2 | d3 | d4 | d5 | d6 | d7 | d8 | d9 | d10 | d11 | d12 | d13 | d14 | d15 | d16 | d17 | d18 | d19 | d20 | d21 | d22 | d23 | d24 | d25 | d26 +------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+----- + 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 1096.63315842846 | 2.71828182845905 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 2.71828182845905 | 148.413159102577 | 1 + 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 1096.63315842846 | 2.71828182845905 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 2.71828182845905 | 148.413159102577 | 1 +(2 rows) + +select exp(709); + exp +----------------------- + 8.21840746155497e+307 +(1 row) + +select exp(710); +ERROR: value out of range: overflow +CONTEXT: referenced column: exp +select exp(-1000); + exp +----- + 0 +(1 row) + +drop table if exists test_double_exp; drop table if exists test_double_degrees; drop table if exists test_type_table; drop schema test_math_func cascade; diff --git a/contrib/dolphin/plugin_utils/adt/float.cpp b/contrib/dolphin/plugin_utils/adt/float.cpp index 10c64733c..e1a053c19 100644 --- a/contrib/dolphin/plugin_utils/adt/float.cpp +++ b/contrib/dolphin/plugin_utils/adt/float.cpp @@ -1729,8 +1729,13 @@ Datum dexp(PG_FUNCTION_ARGS) result = exp(arg1); if (errno == ERANGE && result != 0 && !isinf(result)) result = get_float8_infinity(); - - CHECKFLOATVAL(result, isinf(arg1), false); +#ifdef DOLPHIN + /* The Zero value is handled as normal data in mysql*/ + if (ENABLE_B_CMPT_MODE) + CHECKFLOATVAL(result, isinf(arg1), true); + else +#endif + CHECKFLOATVAL(result, isinf(arg1), false); PG_RETURN_FLOAT8(result); } diff --git a/contrib/dolphin/plugin_utils/adt/numeric.cpp b/contrib/dolphin/plugin_utils/adt/numeric.cpp index 8a66c194c..ce7899c22 100644 --- a/contrib/dolphin/plugin_utils/adt/numeric.cpp +++ b/contrib/dolphin/plugin_utils/adt/numeric.cpp @@ -8952,8 +8952,9 @@ Datum numeric_text(PG_FUNCTION_ARGS) if (NUMERIC_IS_BI(num)) { num = makeNumericNormal(num); } - tmp = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(num))); - result = DirectFunctionCall1(textin, CStringGetDatum(tmp)); + + tmp = DatumGetCString(DirectFunctionCall1Coll(numeric_out, InvalidOid, NumericGetDatum(num), fcinfo->can_ignore)); + result = DirectFunctionCall1Coll(textin, InvalidOid, CStringGetDatum(tmp), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -8964,9 +8965,11 @@ Datum bpchar_numeric(PG_FUNCTION_ARGS) Datum bpcharValue = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(bpcharout, bpcharValue)); - result = DirectFunctionCall3(numeric_in, CStringGetDatum(tmp), ObjectIdGetDatum(0), Int32GetDatum(-1)); + tmp = DatumGetCString(DirectFunctionCall1Coll(bpcharout, InvalidOid, bpcharValue, fcinfo->can_ignore)); + + result = DirectFunctionCall3Coll(numeric_in, InvalidOid, CStringGetDatum(tmp), ObjectIdGetDatum(0), + Int32GetDatum(-1), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -8977,9 +8980,11 @@ Datum varchar_numeric(PG_FUNCTION_ARGS) Datum txt = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(varcharout, txt)); - result = DirectFunctionCall3(numeric_in, CStringGetDatum(tmp), ObjectIdGetDatum(0), Int32GetDatum(-1)); + tmp = DatumGetCString(DirectFunctionCall1Coll(varcharout, InvalidOid, txt, fcinfo->can_ignore)); + + result = DirectFunctionCall3Coll(numeric_in, InvalidOid, CStringGetDatum(tmp), ObjectIdGetDatum(0), + Int32GetDatum(-1), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); @@ -9060,9 +9065,11 @@ Datum text_numeric(PG_FUNCTION_ARGS) Datum txt = PG_GETARG_DATUM(0); char* tmp = NULL; Datum result; - tmp = DatumGetCString(DirectFunctionCall1(textout, txt)); - result = DirectFunctionCall3(numeric_in, CStringGetDatum(tmp), ObjectIdGetDatum(0), Int32GetDatum(-1)); + tmp = DatumGetCString(DirectFunctionCall1Coll(textout, InvalidOid, txt, fcinfo->can_ignore)); + + result = DirectFunctionCall3Coll(numeric_in, InvalidOid, CStringGetDatum(tmp), ObjectIdGetDatum(0), + Int32GetDatum(-1), fcinfo->can_ignore); pfree_ext(tmp); PG_RETURN_DATUM(result); diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index b7f52a99f..15498e48c 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10779,7 +10779,8 @@ Datum Varlena2Numeric(PG_FUNCTION_ARGS) data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); Datum result; - result = DirectFunctionCall3(numeric_in, CStringGetDatum(data), ObjectIdGetDatum(0), Int32GetDatum(-1)); + result = DirectFunctionCall3Coll(numeric_in, InvalidOid, CStringGetDatum(data), ObjectIdGetDatum(0), + Int32GetDatum(-1), fcinfo->can_ignore); pfree_ext(data); PG_RETURN_DATUM(result); diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 6fce4f49c..b724c2b48 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -357,3 +357,5 @@ DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset); DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); +DROP FUNCTION IF EXISTS pg_catalog.exp(year); +DROP FUNCTION IF EXISTS pg_catalog.exp(json); diff --git a/contrib/dolphin/sql/builtin_funcs/math_func.sql b/contrib/dolphin/sql/builtin_funcs/math_func.sql index 0ea963d90..6ac8dd6e2 100644 --- a/contrib/dolphin/sql/builtin_funcs/math_func.sql +++ b/contrib/dolphin/sql/builtin_funcs/math_func.sql @@ -58,6 +58,29 @@ insert into test_double_degrees select degrees(`int1`), degrees(`uint1`), degre select * from test_double_degrees order by 1; + +-- exp math function +reset dolphin.sql_mode; +select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`),exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`date`), exp(`time`), exp(`time(4)`), exp(`datetime`),exp(`datetime(4)`), exp(`timestamp`), exp(`timestamp(4)`), exp(`year`), exp(`char`), exp(`varchar`), exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; + + +select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`), exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`char`), exp(`varchar`),exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; + +create table test_double_exp(d1 double, d2 double, d3 double, d4 double, d5 double, d6 double, d7 double, d8 double, d9 double, d10 double, d11 double, d12 double, d13 double, d14 double, d15 double, d16 double, d17 double, d18 double, d19 double, d20 double, d21 double, d22 double, d23 double, d24 double, d25 double, d26 double); + +insert ignore into test_double_exp select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`), exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`char`), exp(`varchar`),exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; + +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; + +insert into test_double_exp select exp(`int1`), exp(`uint1`), exp(`int2`), exp(`uint2`), exp(`int4`), exp(`uint4`), exp(`int8`), exp(`uint8`), exp(`float4`), exp(`float8`), exp(`numeric`), exp(`bit1`), exp(`bit64`), exp(`boolean`), exp(`char`), exp(`varchar`),exp(`binary`), exp(`varbinary`), exp(`tinyblob`), exp(`blob`), exp(`mediumblob`), exp(`longblob`), exp(`text`), exp(`enum_t`), exp(`set_t`), exp(`json`) from test_type_table; + +select * from test_double_exp order by 1; + +select exp(709); +select exp(710); +select exp(-1000); + +drop table if exists test_double_exp; drop table if exists test_double_degrees; drop table if exists test_type_table; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 99dec510f..471e157d0 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -594,6 +594,10 @@ CREATE OPERATOR dolphin_catalog.~(rightarg = anyset, procedure = dolphin_catalog DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); +DROP FUNCTION IF EXISTS pg_catalog.exp(year); +DROP FUNCTION IF EXISTS pg_catalog.exp(json); CREATE OR REPLACE FUNCTION pg_catalog.degrees(boolean) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(year) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; +CREATE OR REPLACE FUNCTION pg_catalog.exp(year) RETURNS numeric LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.exp(cast($1 as numeric))'; +CREATE OR REPLACE FUNCTION pg_catalog.exp(json) RETURNS numeric LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.exp(cast($1 as numeric))'; -- Gitee From d1dd67bc2ff664d4049f62a3d142dca6f3098d7c Mon Sep 17 00:00:00 2001 From: yuchao Date: Thu, 11 Jan 2024 15:07:06 +0800 Subject: [PATCH 181/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dselect=E8=AF=AD?= =?UTF-8?q?=E5=8F=A5from=20list=E4=B8=8D=E6=94=AF=E6=8C=81=E5=8A=A0?= =?UTF-8?q?=E6=8B=AC=E5=8F=B7=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/select_from_parens.out | 28 +++++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_parser/gram.y | 23 +++++++++++++-- contrib/dolphin/sql/select_from_parens.sql | 13 +++++++++ 4 files changed, 62 insertions(+), 4 deletions(-) create mode 100644 contrib/dolphin/expected/select_from_parens.out create mode 100644 contrib/dolphin/sql/select_from_parens.sql diff --git a/contrib/dolphin/expected/select_from_parens.out b/contrib/dolphin/expected/select_from_parens.out new file mode 100644 index 000000000..44a84b5b1 --- /dev/null +++ b/contrib/dolphin/expected/select_from_parens.out @@ -0,0 +1,28 @@ +create schema select_from_parens; +set current_schema = select_from_parens; +create table t1(a int); +create table t2(b int); +insert into t1 values(1); +insert into t2 values(2); +select * from (t1); + a +--- + 1 +(1 row) + +select * from (t1,t2); + a | b +---+--- + 1 | 2 +(1 row) + +select * from ((t1),(t2)); + a | b +---+--- + 1 | 2 +(1 row) + +drop table t1; +drop table t2; +reset current_schema; +drop schema select_from_parens; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index adb7b40b4..642c2d2b1 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -5,7 +5,7 @@ test: string_func_test/db_b_ascii_test string_func_test/db_b_left_right_test str test: ast b_compatibility_time_type db_b_new_gram_test group_concat_test test_condition vec_engine test_uncommon_table_option add_unique_index -test: db_b_parser1 db_b_parser2 db_b_parser3 db_b_parser4 second_microsecond test_set_password_for_user test_timestamp_overflow +test: db_b_parser1 db_b_parser2 db_b_parser3 db_b_parser4 second_microsecond test_set_password_for_user test_timestamp_overflow select_from_parens test: db_b_plpgsql_test default_guc describe explain_desc kill set_password network test_dayofweek test_timestampn kwlist empty_enum_value diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 2cf1ce0e7..c98710496 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -798,7 +798,7 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul opt_column_list columnList opt_name_list opt_analyze_column_define opt_multi_name_list opt_include_without_empty opt_c_include index_including_params sort_clause opt_sort_clause sortby_list index_params fulltext_index_params table_index_elems constraint_params - name_list UserIdList from_clause from_list opt_array_bounds dolphin_schema_name_list + name_list UserIdList from_clause from_list from_list_parens opt_array_bounds dolphin_schema_name_list qualified_name_list any_name type_name_list collate_name any_name_or_sconst any_name_list dolphin_qualified_name_list dolphin_any_name dolphin_any_name_list any_operator expr_list attrs callfunc_args callfunc_args_or_empty dolphin_attrs rename_user_clause rename_list target_list insert_column_list set_target_list rename_clause_list rename_clause @@ -921,7 +921,7 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul %type alias_clause opt_alias_clause dolphin_alias_clause opt_dolphin_alias_clause %type sortby %type index_elem table_index_elem constraint_elem fulltext_index_elem -%type table_ref +%type table_ref single_table %type joined_table %type relation_expr %type relation_expr_opt_alias delete_relation_expr_opt_alias @@ -30415,6 +30415,7 @@ values_clause: from_clause: FROM from_list { $$ = $2; } + | FROM from_list_parens { $$ = $2; } | FROM DUAL_P { $$ = NIL; } | /*EMPTY*/ %prec EMPTY_FROM_CLAUSE { $$ = NIL; } @@ -30432,7 +30433,13 @@ from_list: * and joined_table := '(' joined_table ')'. So, we must have the * redundant-looking productions here instead. */ -table_ref: relation_expr %prec UMINUS + +from_list_parens: + '(' from_list_parens ')' { $$ = $2; } + | '(' from_list ',' table_ref ')' { $$ = lappend($2, $4); } + ; + +single_table: relation_expr %prec UMINUS { #ifndef ENABLE_MULTIPLE_NODES StringInfoData detailInfo; @@ -30578,6 +30585,16 @@ table_ref: relation_expr %prec UMINUS $1->indexhints = $7; $$ = (Node *)$1; } + | '(' single_table ')' + { + $$ = $2; + } + ; + +table_ref: single_table + { + $$ = $1; + } | func_table %prec UMINUS { RangeFunction *n = makeNode(RangeFunction); diff --git a/contrib/dolphin/sql/select_from_parens.sql b/contrib/dolphin/sql/select_from_parens.sql new file mode 100644 index 000000000..fa1c3f4c0 --- /dev/null +++ b/contrib/dolphin/sql/select_from_parens.sql @@ -0,0 +1,13 @@ +create schema select_from_parens; +set current_schema = select_from_parens; +create table t1(a int); +create table t2(b int); +insert into t1 values(1); +insert into t2 values(2); +select * from (t1); +select * from (t1,t2); +select * from ((t1),(t2)); +drop table t1; +drop table t2; +reset current_schema; +drop schema select_from_parens; -- Gitee From de4938b94937462e977abb515428526de20ef045 Mon Sep 17 00:00:00 2001 From: totaj Date: Thu, 11 Jan 2024 16:45:00 +0800 Subject: [PATCH 182/434] Fix binary/varbinary can't use index bug. --- contrib/dolphin/expected/test_binary.out | 106 ++++++++++++++++++ contrib/dolphin/expected/test_mysql_enum.out | 8 +- .../rollback_script/dolphin--3.0--2.0.sql | 59 ++++++---- contrib/dolphin/sql/test_binary.sql | 29 +++++ .../upgrade_script/dolphin--2.0--3.0.sql | 39 +++++++ 5 files changed, 213 insertions(+), 28 deletions(-) diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index 6aabeb77f..c3fc1629a 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -881,6 +881,112 @@ select (-2075)::binary(30)::bigint; -2075 (1 row) +--binary/varbinary index test +create table t_index_test(a binary(100), b varbinary(100)); +insert into t_index_test select i,i from generate_series(1,10000) i; +create index i_b on t_index_test(a); +create index i_vb on t_index_test(b); +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (a = '\x31'::"binary") +(3 rows) + +explain (costs off) select * from t_index_test where a>='a1'; + QUERY PLAN +----------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (a >= '\x6131'::"binary") +(3 rows) + +explain (costs off) select * from t_index_test where a>'a1'; + QUERY PLAN +---------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (a > '\x6131'::"binary") +(3 rows) + +explain (costs off) select * from t_index_test where a<='1'; + QUERY PLAN +--------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (a <= '\x31'::"binary") +(3 rows) + +explain (costs off) select * from t_index_test where a<'1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (a < '\x31'::"binary") +(3 rows) + +explain (costs off) select * from t_index_test where b='1'; + QUERY PLAN +--------------------------------------- + [Bypass] + Index Scan using i_vb on t_index_test + Index Cond: (b = '\x31'::varbinary) +(3 rows) + +explain (costs off) select * from t_index_test where b>='a1'; + QUERY PLAN +------------------------------------------ + [Bypass] + Index Scan using i_vb on t_index_test + Index Cond: (b >= '\x6131'::varbinary) +(3 rows) + +explain (costs off) select * from t_index_test where b>'a1'; + QUERY PLAN +----------------------------------------- + [Bypass] + Index Scan using i_vb on t_index_test + Index Cond: (b > '\x6131'::varbinary) +(3 rows) + +explain (costs off) select * from t_index_test where b<='1'; + QUERY PLAN +---------------------------------------- + [Bypass] + Index Scan using i_vb on t_index_test + Index Cond: (b <= '\x31'::varbinary) +(3 rows) + +explain (costs off) select * from t_index_test where b<'1'; + QUERY PLAN +--------------------------------------- + [Bypass] + Index Scan using i_vb on t_index_test + Index Cond: (b < '\x31'::varbinary) +(3 rows) + +drop index i_b; +drop index i_vb; +create index i_b on t_index_test(a) using hash; +create index i_vb on t_index_test(b) using hash; +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; + QUERY PLAN +-------------------------------------- + Index Scan using i_b on t_index_test + Index Cond: (a = '\x31'::"binary") +(2 rows) + +explain (costs off) select * from t_index_test where b='1'; + QUERY PLAN +--------------------------------------- + Index Scan using i_vb on t_index_test + Index Cond: (b = '\x31'::varbinary) +(2 rows) + +drop table t_index_test; -- binary about concat DROP TABLE IF EXISTS t1; SET dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; diff --git a/contrib/dolphin/expected/test_mysql_enum.out b/contrib/dolphin/expected/test_mysql_enum.out index ebad8a8c3..2b69446c6 100644 --- a/contrib/dolphin/expected/test_mysql_enum.out +++ b/contrib/dolphin/expected/test_mysql_enum.out @@ -443,15 +443,15 @@ select a / 1 from enum_test_table; create table test_enum_d(ssl_type enum('','any','X509','SPECIFIED') not null default ''); \d test_enum_d - Table "test_enum.test_enum_d" - Column | Type | Modifiers +--?.* +--?.* --?.* --?.* \d+ test_enum_d - Table "test_enum.test_enum_d" - Column | Type | Modifiers | Storage | Stats target | Description +--?.* +--?.* --?.* --?.* Has OIDs: no diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 6fce4f49c..866645f28 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -282,78 +282,89 @@ DROP CAST IF EXISTS (year AS boolean); DROP FUNCTION IF EXISTS pg_catalog.year_to_bool(year); -- not operator -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int1); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, int1); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int1not(int1); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int2); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, int2); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int2not(int2); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int4); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, int4); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int4not(int4); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = int8); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, int8); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_int8not(int8); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint1); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, uint1); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint1not(uint1); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint2); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, uint2); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint2not(uint2); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint4); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, uint4); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint4not(uint4); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = uint8); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, uint8); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_uint8not(uint8); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = float4); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, float4); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_float4not(float4); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = float8); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, float8); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_float8not(float8); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = numeric); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, numeric); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_numericnot(numeric); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = bit); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, bit); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_bitnot(bit); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = boolean); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, boolean); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_boolnot(boolean); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = date); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, date); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_datenot(date); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = time); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, time); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timenot(time); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = timestamp); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, timestamp without time zone); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timestampnot(timestamp without time zone); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = timestamp); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, timestamp with time zone); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_timestamptznot(timestamp with time zone); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = year); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, year); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_yearnot(year); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = char); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, char); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_charnot(char); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = varchar); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, varchar); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_varcharnot(varchar); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = text); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, text); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_textnot(text); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyelement); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, anyelement); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_varlenanot(anyelement); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyenum); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, anyenum); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_enumnot(anyenum); -DROP OPERATOR IF EXISTS dolphin_catalog.~(rightarg = anyset); +DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, anyset); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset); +DROP OPERATOR CLASS IF EXISTS pg_catalog.varbinary_ops USING BTREE; +DROP OPERATOR CLASS IF EXISTS pg_catalog.varbinary_ops USING HASH; +DROP OPERATOR CLASS IF EXISTS pg_catalog.binary_ops USING BTREE; +DROP OPERATOR CLASS IF EXISTS pg_catalog.binary_ops USING HASH; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.varbinary_ops USING BTREE; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.varbinary_ops USING HASH; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.binary_ops USING BTREE; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.binary_ops USING HASH; +DROP FUNCTION IF EXISTS pg_catalog.varbinary_cmp(varbinary, varbinary); +DROP FUNCTION IF EXISTS pg_catalog.binary_cmp(binary, binary); + DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); diff --git a/contrib/dolphin/sql/test_binary.sql b/contrib/dolphin/sql/test_binary.sql index 113ffd218..e0f48e69d 100644 --- a/contrib/dolphin/sql/test_binary.sql +++ b/contrib/dolphin/sql/test_binary.sql @@ -249,6 +249,35 @@ select c1 < c7, c1 > c7, c1 <= c7, c1 >= c7 from binary_operator; select (20220101)::binary(30)::bigint; select (-2075)::binary(30)::bigint; +--binary/varbinary index test +create table t_index_test(a binary(100), b varbinary(100)); +insert into t_index_test select i,i from generate_series(1,10000) i; +create index i_b on t_index_test(a); +create index i_vb on t_index_test(b); +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; +explain (costs off) select * from t_index_test where a>='a1'; +explain (costs off) select * from t_index_test where a>'a1'; +explain (costs off) select * from t_index_test where a<='1'; +explain (costs off) select * from t_index_test where a<'1'; + +explain (costs off) select * from t_index_test where b='1'; +explain (costs off) select * from t_index_test where b>='a1'; +explain (costs off) select * from t_index_test where b>'a1'; +explain (costs off) select * from t_index_test where b<='1'; +explain (costs off) select * from t_index_test where b<'1'; + +drop index i_b; +drop index i_vb; +create index i_b on t_index_test(a) using hash; +create index i_vb on t_index_test(b) using hash; +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; + +explain (costs off) select * from t_index_test where b='1'; + +drop table t_index_test; + -- binary about concat DROP TABLE IF EXISTS t1; SET dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero,treat_bxconst_as_binary'; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 99dec510f..0eef1398a 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -591,6 +591,45 @@ DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset) CASCADE; CREATE OR REPLACE FUNCTION dolphin_catalog.dolphin_setnot(anyset) RETURNS uint8 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'dolphin_setnot'; CREATE OPERATOR dolphin_catalog.~(rightarg = anyset, procedure = dolphin_catalog.dolphin_setnot); +CREATE OR REPLACE FUNCTION pg_catalog.binary_cmp(binary, binary) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; + +CREATE OPERATOR FAMILY pg_catalog.binary_ops USING BTREE; +CREATE OPERATOR FAMILY pg_catalog.binary_ops USING HASH; + +CREATE OPERATOR CLASS pg_catalog.binary_ops DEFAULT + FOR TYPE binary USING BTREE FAMILY pg_catalog.binary_ops as + OPERATOR 1 pg_catalog.<(binary, binary), + OPERATOR 2 pg_catalog.<=(binary, binary), + OPERATOR 3 pg_catalog.=(binary, binary), + OPERATOR 4 pg_catalog.>=(binary, binary), + OPERATOR 5 pg_catalog.>(binary, binary), + FUNCTION 1 pg_catalog.binary_cmp(binary, binary), + FUNCTION 2 pg_catalog.bytea_sortsupport(internal); + +CREATE OPERATOR CLASS pg_catalog.binary_ops DEFAULT + FOR TYPE binary USING HASH FAMILY binary_ops as + OPERATOR 1 pg_catalog.=(binary, binary), + FUNCTION 1 (binary, binary) pg_catalog.hashvarlena(internal); + +CREATE OR REPLACE FUNCTION pg_catalog.varbinary_cmp(varbinary, varbinary) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR FAMILY pg_catalog.varbinary_ops USING BTREE; +CREATE OPERATOR FAMILY pg_catalog.varbinary_ops USING HASH; + +CREATE OPERATOR CLASS pg_catalog.varbinary_ops DEFAULT + FOR TYPE varbinary USING BTREE FAMILY pg_catalog.varbinary_ops as + OPERATOR 1 pg_catalog.<(varbinary, varbinary), + OPERATOR 2 pg_catalog.<=(varbinary, varbinary), + OPERATOR 3 pg_catalog.=(varbinary, varbinary), + OPERATOR 4 pg_catalog.>=(varbinary, varbinary), + OPERATOR 5 pg_catalog.>(varbinary, varbinary), + FUNCTION 1 pg_catalog.varbinary_cmp(varbinary, varbinary), + FUNCTION 2 pg_catalog.bytea_sortsupport(internal); + +CREATE OPERATOR CLASS pg_catalog.varbinary_ops DEFAULT + FOR TYPE varbinary USING HASH FAMILY pg_catalog.varbinary_ops as + OPERATOR 1 pg_catalog.=(varbinary, varbinary), + FUNCTION 1 (varbinary, varbinary) pg_catalog.hashvarlena(internal); + DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); -- Gitee From b33bacef4919b3f351a0039f9c31fa75c5b9a887 Mon Sep 17 00:00:00 2001 From: yuchao Date: Fri, 12 Jan 2024 14:23:57 +0800 Subject: [PATCH 183/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=85=BC=E5=AE=B9B?= =?UTF-8?q?=E5=BA=93=E8=B0=83=E7=94=A8=E5=87=BD=E6=95=B0str=5Fto=5Fdate?= =?UTF-8?q?=E3=80=81inet=5Fntoa=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/inet_ntoa_nvarchar2.out | 416 ++++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- .../rollback_script/dolphin--3.0--2.0.sql | 6 + contrib/dolphin/sql/inet_ntoa_nvarchar2.sql | 131 ++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 10 + 5 files changed, 564 insertions(+), 1 deletion(-) create mode 100644 contrib/dolphin/expected/inet_ntoa_nvarchar2.out create mode 100644 contrib/dolphin/sql/inet_ntoa_nvarchar2.sql diff --git a/contrib/dolphin/expected/inet_ntoa_nvarchar2.out b/contrib/dolphin/expected/inet_ntoa_nvarchar2.out new file mode 100644 index 000000000..bd5b0d291 --- /dev/null +++ b/contrib/dolphin/expected/inet_ntoa_nvarchar2.out @@ -0,0 +1,416 @@ +create schema inet_ntoa_nvarchar2; +set current_schema = inet_ntoa_nvarchar2; +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +set dolphin.b_compatibility_mode=on; +set bytea_output=escape; +set extra_float_digits=0; +set b_format_behavior_compat_options='enable_set_variables'; +create table t_text0001( + c1 national varchar(1), + c2 national varchar(10), + c3 national varchar(255), + c4 text, + c5 text(1), + c6 text(10), + c7 text(255), + c8 tinytext, + c9 mediumtext, + c10 longtext, + c11 varchar(1), + c12 varchar(255), + c13 varchar(20000)); +set @val = 3.14159265; +insert into t_text0001 values +(substr(@val, 1, 1), @val, @val, @val, substr(@val, 1, 1), +@val, @val, @val, @val, @val, substr(@val, 1, 1), @val, @val); +set @val = 0; +insert into t_text0001 values +(@val, @val, @val, @val, @val, @val, @val, @val, @val, @val, @val, @val, @val); +set @val = -1; +insert into t_text0001 values +(substr(@val, 1, 1), @val, @val, @val, substr(@val, 1, 1), +@val, @val, @val, @val, @val, substr(@val, 1, 1), @val, @val); +set @val = 'abcdefghigklmnopqrstuvw'; +set @val1 = substr(@val, 1, 1); +set @val2 = substr(@val, 1, 2); +set @val3 = repeat(@val, 3); +insert into t_text0001 values +(@val1, @val1, @val1, @val1, @val1, @val1, +@val1, @val1, @val1, @val1, @val1, @val1, @val1); +insert into t_text0001 values +(substr(@val2, 1, 1), @val2, @val2, @val2, substr(@val2, 1, 1), @val2, +@val2, @val2, @val2, @val2, substr(@val2, 1, 1), @val2, @val2); +insert into t_text0001 values +(substr(@val3, 1, 1),substr(@val3, 1, 10), substr(@val3, 1, 255), +@val3, substr(@val3, 1, 1), substr(@val3, 1, 10), substr(@val3, 1, 255), +substr(@val3, 1, 133), @val3, @val3, +substr(@val3, 1, 1), @val3, repeat(@val3, 10)); +select str_to_date(c1, "%Y-%m-%d"), str_to_date(c2, "%Y-%m-%d"), str_to_date(c3, +"%Y-%m-%d"), str_to_date(c4, "%Y-%m-%d"), str_to_date(c5, "%Y-%m-%d"), +str_to_date(c6, "%Y-%m-%d"), str_to_date(c7, "%Y-%m-%d"), str_to_date(c8, +"%Y-%m-%d"), str_to_date(c9, "%Y-%m-%d"), str_to_date(c10, "%Y-%m-%d"), +str_to_date(c11, "%Y-%m-%d"), str_to_date(c12, "%Y-%m-%d"), str_to_date(c13, +"%Y-%m-%d") from t_text0001 order by 1,2,3,4,5,6,7,8,9,10,11,12,13; +WARNING: Incorrect date value: '3' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '3.14159265' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '0' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '-1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'ab' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghig' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghig' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: 'abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw' for function str_to_date +CONTEXT: referenced column: str_to_date + str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+------------- + | | | | | | | | | | | | + | | | | | | | | | | | | + | | | | | | | | | | | | + | | | | | | | | | | | | + | | | | | | | | | | | | + | | | | | | | | | | | | +(6 rows) + +select inet_ntoa(c1), inet_ntoa(c2), inet_ntoa(c3), inet_ntoa(c4), +inet_ntoa(c5), inet_ntoa(c6), inet_ntoa(c7), inet_ntoa(c8), inet_ntoa(c9), +inet_ntoa(c10), inet_ntoa(c11), inet_ntoa(c12), inet_ntoa(c13) from t_text0001 +order by 1,2,3,4,5,6,7,8,9,10,11,12,13; +WARNING: invalid input syntax for type bigint: "-" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "-" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "-" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "ab" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghig" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghig" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "abcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvwabcdefghigklmnopqrstuvw" +CONTEXT: referenced column: inet_ntoa + inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+----------- + 0.0.0.0 | | | | 0.0.0.0 | | | | | | 0.0.0.0 | | + 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 + 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 + 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 + 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 + 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 | 0.0.0.3 +(6 rows) + +CREATE TABLE test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table values(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,b'1', b'111', true,'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023','1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a','a', 'a,c',json_object('a', 1, 'b', 2)); +select +inet_ntoa(`int1`), +inet_ntoa(`uint1`), +inet_ntoa(`int2`), +inet_ntoa(`uint2`), +inet_ntoa(`int4`), +inet_ntoa(`uint4`), +inet_ntoa(`int8`), +inet_ntoa(`uint8`), +inet_ntoa(`float4`), +inet_ntoa(`float8`), +inet_ntoa(`numeric`), +inet_ntoa(`bit1`), +inet_ntoa(`bit64`), +inet_ntoa(`boolean`), +inet_ntoa(`char`), +inet_ntoa(`varchar`), +inet_ntoa(`binary`), +inet_ntoa(`varbinary`), +inet_ntoa(`tinyblob`), +inet_ntoa(`blob`), +inet_ntoa(`mediumblob`), +inet_ntoa(`longblob`), +inet_ntoa(`text`), +inet_ntoa(`enum_t`), +inet_ntoa(`set_t`) +from test_type_table; +WARNING: invalid input syntax for type bigint: "1.23a " +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "1.23a" +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type int16: "1.23a" +DETAIL: text contain invalid character +CONTEXT: referenced column: inet_ntoa +WARNING: invalid input syntax for type bigint: "1.23a" +CONTEXT: referenced column: inet_ntoa + inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa | inet_ntoa +-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+-----------+----------- + 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.7 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.1 | 0.0.0.5 +(1 row) + +drop table test_type_table; +drop table t_text0001; +reset current_schema; +drop schema inet_ntoa_nvarchar2; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index f10839412..224827731 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -11,7 +11,7 @@ test: db_b_plpgsql_test default_guc describe explain_desc kill set_password netw test: empty_value_list empty_value_lists empty_value_support_value create_index test_guc_select_and_set test_copy_year2 test_default convert_truncated_warning -test: greatest_least join_without_on mysqlmode_fullgroup mysqlmode_strict mysqlmode_strict2 none_strict_warning test_alter_table test_dolphin_catalog +test: greatest_least join_without_on mysqlmode_fullgroup mysqlmode_strict mysqlmode_strict2 none_strict_warning test_alter_table test_dolphin_catalog inet_ntoa_nvarchar2 #import and export test: test_table_ddl_import_and_export test_table_partition_ddl_import_and_export test_view_ddl_import_and_export test_replace_ddl_import_and_export test_index_ddl_import_and_export test_function_import_and_export diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 6fce4f49c..e009c43e9 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -357,3 +357,9 @@ DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset); DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); + +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(bit); +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(binary); +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(nvarchar2); + diff --git a/contrib/dolphin/sql/inet_ntoa_nvarchar2.sql b/contrib/dolphin/sql/inet_ntoa_nvarchar2.sql new file mode 100644 index 000000000..40e896775 --- /dev/null +++ b/contrib/dolphin/sql/inet_ntoa_nvarchar2.sql @@ -0,0 +1,131 @@ +create schema inet_ntoa_nvarchar2; +set current_schema = inet_ntoa_nvarchar2; +set dolphin.sql_mode = 'sql_mode_strict,sql_mode_full_group,pipes_as_concat,no_zero_date,pad_char_to_full_length,auto_recompile_function,error_for_division_by_zero'; +set dolphin.b_compatibility_mode=on; +set bytea_output=escape; +set extra_float_digits=0; +set b_format_behavior_compat_options='enable_set_variables'; +create table t_text0001( + c1 national varchar(1), + c2 national varchar(10), + c3 national varchar(255), + c4 text, + c5 text(1), + c6 text(10), + c7 text(255), + c8 tinytext, + c9 mediumtext, + c10 longtext, + c11 varchar(1), + c12 varchar(255), + c13 varchar(20000)); +set @val = 3.14159265; +insert into t_text0001 values +(substr(@val, 1, 1), @val, @val, @val, substr(@val, 1, 1), +@val, @val, @val, @val, @val, substr(@val, 1, 1), @val, @val); +set @val = 0; +insert into t_text0001 values +(@val, @val, @val, @val, @val, @val, @val, @val, @val, @val, @val, @val, @val); +set @val = -1; +insert into t_text0001 values +(substr(@val, 1, 1), @val, @val, @val, substr(@val, 1, 1), +@val, @val, @val, @val, @val, substr(@val, 1, 1), @val, @val); +set @val = 'abcdefghigklmnopqrstuvw'; +set @val1 = substr(@val, 1, 1); +set @val2 = substr(@val, 1, 2); +set @val3 = repeat(@val, 3); +insert into t_text0001 values +(@val1, @val1, @val1, @val1, @val1, @val1, +@val1, @val1, @val1, @val1, @val1, @val1, @val1); +insert into t_text0001 values +(substr(@val2, 1, 1), @val2, @val2, @val2, substr(@val2, 1, 1), @val2, +@val2, @val2, @val2, @val2, substr(@val2, 1, 1), @val2, @val2); +insert into t_text0001 values +(substr(@val3, 1, 1),substr(@val3, 1, 10), substr(@val3, 1, 255), +@val3, substr(@val3, 1, 1), substr(@val3, 1, 10), substr(@val3, 1, 255), +substr(@val3, 1, 133), @val3, @val3, +substr(@val3, 1, 1), @val3, repeat(@val3, 10)); +select str_to_date(c1, "%Y-%m-%d"), str_to_date(c2, "%Y-%m-%d"), str_to_date(c3, +"%Y-%m-%d"), str_to_date(c4, "%Y-%m-%d"), str_to_date(c5, "%Y-%m-%d"), +str_to_date(c6, "%Y-%m-%d"), str_to_date(c7, "%Y-%m-%d"), str_to_date(c8, +"%Y-%m-%d"), str_to_date(c9, "%Y-%m-%d"), str_to_date(c10, "%Y-%m-%d"), +str_to_date(c11, "%Y-%m-%d"), str_to_date(c12, "%Y-%m-%d"), str_to_date(c13, +"%Y-%m-%d") from t_text0001 order by 1,2,3,4,5,6,7,8,9,10,11,12,13; + +select inet_ntoa(c1), inet_ntoa(c2), inet_ntoa(c3), inet_ntoa(c4), +inet_ntoa(c5), inet_ntoa(c6), inet_ntoa(c7), inet_ntoa(c8), inet_ntoa(c9), +inet_ntoa(c10), inet_ntoa(c11), inet_ntoa(c12), inet_ntoa(c13) from t_text0001 +order by 1,2,3,4,5,6,7,8,9,10,11,12,13; + +CREATE TABLE test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); + +insert into test_type_table values(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,b'1', b'111', true,'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023','1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a','a', 'a,c',json_object('a', 1, 'b', 2)); + +select +inet_ntoa(`int1`), +inet_ntoa(`uint1`), +inet_ntoa(`int2`), +inet_ntoa(`uint2`), +inet_ntoa(`int4`), +inet_ntoa(`uint4`), +inet_ntoa(`int8`), +inet_ntoa(`uint8`), +inet_ntoa(`float4`), +inet_ntoa(`float8`), +inet_ntoa(`numeric`), +inet_ntoa(`bit1`), +inet_ntoa(`bit64`), +inet_ntoa(`boolean`), +inet_ntoa(`char`), +inet_ntoa(`varchar`), +inet_ntoa(`binary`), +inet_ntoa(`varbinary`), +inet_ntoa(`tinyblob`), +inet_ntoa(`blob`), +inet_ntoa(`mediumblob`), +inet_ntoa(`longblob`), +inet_ntoa(`text`), +inet_ntoa(`enum_t`), +inet_ntoa(`set_t`) +from test_type_table; + +drop table test_type_table; +drop table t_text0001; +reset current_schema; +drop schema inet_ntoa_nvarchar2; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 99dec510f..7787c710e 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -597,3 +597,13 @@ DROP FUNCTION IF EXISTS pg_catalog.degrees(json); CREATE OR REPLACE FUNCTION pg_catalog.degrees(boolean) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(year) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; + +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(bit) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(binary) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(tinyblob) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(nvarchar2) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.inet_ntoa (bit) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.inet_ntoa(cast($1 as int8))'; +CREATE OR REPLACE FUNCTION pg_catalog.inet_ntoa (binary) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.inet_ntoa(cast($1 as int8))'; +CREATE OR REPLACE FUNCTION pg_catalog.inet_ntoa (tinyblob) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.inet_ntoa(cast($1 as int8))'; +CREATE OR REPLACE FUNCTION pg_catalog.inet_ntoa (nvarchar2) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.inet_ntoa(cast($1 as varchar))'; + -- Gitee From 23027fe73e9439d132a8bcb1f15cd96350d4b638 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 15 Jan 2024 11:04:08 +0800 Subject: [PATCH 184/434] Fix dolphin_types func bug. --- contrib/dolphin/expected/test_datatype.out | 7 +++++++ contrib/dolphin/plugin_postgres.cpp | 5 ++--- contrib/dolphin/sql/test_datatype.sql | 3 +++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/test_datatype.out b/contrib/dolphin/expected/test_datatype.out index 119f5125e..5b9d88477 100644 --- a/contrib/dolphin/expected/test_datatype.out +++ b/contrib/dolphin/expected/test_datatype.out @@ -418,5 +418,12 @@ select * from t1 where b = '2000-01-01'::date; (1 row) drop table t1; +--- +select pg_catalog.delete(cast('test=>NULL' as hstore), cast(pg_catalog.dolphin_types() as _text)); + delete +-------------- + "test"=>NULL +(1 row) + drop schema b_datatype_test cascade; reset current_schema; diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index c36275182..e291c5268 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -1345,11 +1345,10 @@ Datum dolphin_types() Datum* datums = NULL; ArrayType* dolphinTypesArray = NULL; int dimension = 2; - int cstringLength = -2; datums = (Datum*)palloc(DOLPHIN_TYPES_NUM * TYPE_ATTRIBUTES_NUM * sizeof(Datum)); for (int row = 0; row < DOLPHIN_TYPES_NUM; row++) { for (int col = 0; col < TYPE_ATTRIBUTES_NUM; col++) { - datums[row * TYPE_ATTRIBUTES_NUM + col] = CStringGetDatum(dolphinTypes[row][col]); + datums[row * TYPE_ATTRIBUTES_NUM + col] = CStringGetTextDatum(dolphinTypes[row][col]); } } int dims[dimension]; @@ -1359,7 +1358,7 @@ Datum dolphin_types() lbs[0] = 1; lbs[1] = 1; - dolphinTypesArray = construct_md_array(datums, NULL, dimension, dims, lbs, CSTRINGOID, cstringLength, false, 'c'); + dolphinTypesArray = construct_md_array(datums, NULL, dimension, dims, lbs, TEXTOID, -1, false, 'i'); pfree_ext(datums); PG_RETURN_ARRAYTYPE_P(dolphinTypesArray); } diff --git a/contrib/dolphin/sql/test_datatype.sql b/contrib/dolphin/sql/test_datatype.sql index 08b5e4203..517217d78 100644 --- a/contrib/dolphin/sql/test_datatype.sql +++ b/contrib/dolphin/sql/test_datatype.sql @@ -147,5 +147,8 @@ select * from t1 where b = '2000-01-01'::date; drop table t1; +--- +select pg_catalog.delete(cast('test=>NULL' as hstore), cast(pg_catalog.dolphin_types() as _text)); + drop schema b_datatype_test cascade; reset current_schema; \ No newline at end of file -- Gitee From 95ffd43ebf3898c23b0a1f1af1fc823072d1d5ca Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Mon, 15 Jan 2024 15:33:31 +0800 Subject: [PATCH 185/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8V4P5=E6=89=80=E7=A4=BA=E7=9A=84cast?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2=E5=B8=83=E5=B0=94=E5=80=BC=E4=B8=BAdate?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE=E9=A2=98.=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8DI8V4P5=E6=89=80=E7=A4=BA=E7=9A=84cast=E8=BD=AC?= =?UTF-8?q?=E6=8D=A2=E5=B8=83=E5=B0=94=E5=80=BC=E4=B8=BAdate=E6=8A=A5?= =?UTF-8?q?=E9=94=99=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90=E6=A0=B9?= =?UTF-8?q?=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20=E5=BD=93=E5=89=8D?= =?UTF-8?q?=E6=9C=AA=E5=AE=9E=E7=8E=B0bool=20to=20date=EF=BC=8C=E5=8F=A6?= =?UTF-8?q?=E5=A4=96=E5=BD=93=E5=89=8D=E5=A4=84=E7=90=86Number=20to=20Date?= =?UTF-8?q?=E4=B9=9F=E6=9C=89=E9=97=AE=E9=A2=98=EF=BC=8Cmysql=E6=98=BE?= =?UTF-8?q?=E7=A4=BANULL=EF=BC=8COpenGuass=E6=98=BE=E7=A4=BA0=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0bool=20to=20date=E7=9A=84Cast=E5=92=8C=E5=8F=82?= =?UTF-8?q?=E7=85=A7!1179=20=E5=AE=9E=E7=8E=B0number=20to=20date=E4=B8=BAN?= =?UTF-8?q?ULL=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90=E5=85=B3=E8=81=94?= =?UTF-8?q?=E9=9C=80=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.co?= =?UTF-8?q?m/opengaussorg/dashboard=3Fissue=3DI8V4P5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 135 ++++++++- contrib/dolphin/expected/conv_cast_test.out | 28 +- contrib/dolphin/expected/uint_cast3.out | 48 ++-- contrib/dolphin/include/plugin_utils/date.h | 11 +- .../dolphin/include/plugin_utils/timestamp.h | 2 + .../dolphin/plugin_parser/parse_coerce.cpp | 20 +- contrib/dolphin/plugin_utils/adt/date.cpp | 260 ++++++++++++++---- .../rollback_script/dolphin--3.0--2.0.sql | 15 + .../dolphin/sql/b_compatibility_time_type.sql | 15 + .../upgrade_script/dolphin--2.0--3.0.sql | 28 ++ 10 files changed, 445 insertions(+), 117 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index 710af5768..aeee9f520 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -233,9 +233,9 @@ SELECT 990101::date; SELECT 100::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) SELECT 101::date; @@ -253,9 +253,9 @@ SELECT 10228::date; SELECT 10229::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) SELECT 991231::date; @@ -279,25 +279,25 @@ SELECT 21000228::date; SELECT 21000229::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) SELECT 100000000::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) SELECT 100101231::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) -- test time @@ -2314,6 +2314,111 @@ SELECT * FROM test_timestamp; drop table if exists t_date; drop table if exists t_datetime; drop table if exists test_timestamp; +set dolphin.b_compatibility_mode = true; +select cast(true as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: date +SQL function "bool_date" statement 1 +referenced column: result + result +-------- + +(1 row) + +select cast(false as date) as result; + result +------------ + 0000-00-00 +(1 row) + +select cast(100::int1 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::uint1 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::int2 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::uint2 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::int4 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::uint4 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::int4 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::uint8 as date) as result; +WARNING: Out of range value for date +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::float4 as date) as result; +WARNING: date/time field value out of range: "000100" +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::float8 as date) as result; +WARNING: date/time field value out of range: "000100" +CONTEXT: referenced column: result + result +-------- + +(1 row) + +select cast(100::numeric as date) as result; +WARNING: date/time field value out of range: "000100" +CONTEXT: referenced column: result + result +-------- + +(1 row) + \c postgres DROP DATABASE b_time_type; DROP TABLESPACE b_time_type_example; diff --git a/contrib/dolphin/expected/conv_cast_test.out b/contrib/dolphin/expected/conv_cast_test.out index 90b62f7a6..51b7d25d3 100644 --- a/contrib/dolphin/expected/conv_cast_test.out +++ b/contrib/dolphin/expected/conv_cast_test.out @@ -784,8 +784,6 @@ select 'false'::bool::float8; create table test_date(a date); set dolphin.sql_mode = sql_mode_strict,sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; select 0::date; -WARNING: Out of range value for date -CONTEXT: referenced column: date date ------------ 0000-00-00 @@ -797,9 +795,9 @@ CONTEXT: referenced column: a select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) insert into test_date values(1); @@ -816,9 +814,9 @@ insert into test_date values(0); select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) insert into test_date values(1); @@ -826,8 +824,6 @@ ERROR: Out of range value for date CONTEXT: referenced column: a set dolphin.sql_mode = sql_mode_full_group,pipes_as_concat,ansi_quotes,no_zero_date,pad_char_to_full_length,auto_recompile_function; select 0::date; -WARNING: Out of range value for date -CONTEXT: referenced column: date date ------------ 0000-00-00 @@ -839,9 +835,9 @@ CONTEXT: referenced column: a select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) insert into test_date values(1); @@ -858,9 +854,9 @@ insert into test_date values(0); select 1::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) insert into test_date values(1); diff --git a/contrib/dolphin/expected/uint_cast3.out b/contrib/dolphin/expected/uint_cast3.out index 630f4d73e..6d983cf4d 100644 --- a/contrib/dolphin/expected/uint_cast3.out +++ b/contrib/dolphin/expected/uint_cast3.out @@ -383,9 +383,9 @@ WARNING: Cast to tinyint unsigned converted negative integer to it's positive c CONTEXT: referenced column: date WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select '256'::uint1::date; @@ -393,17 +393,17 @@ WARNING: tinyint unsigned out of range CONTEXT: referenced column: date WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select 2004::uint2::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select '65536'::uint2::date; @@ -411,25 +411,25 @@ WARNING: smallint unsigned out of range CONTEXT: referenced column: date WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select '65535'::uint2::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select 2067::uint8::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select '-1'::uint8::date; @@ -437,17 +437,17 @@ WARNING: Cast to bigint unsigned converted negative integer to it's positive co CONTEXT: referenced column: date WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select '18446744073709551615'::uint8::date; WARNING: Out of range value for date CONTEXT: referenced column: date - date ------------- - 0000-00-00 + date +------ + (1 row) select 1999::uint4::year; diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index 2f834b67d..f42a4877c 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -122,8 +122,17 @@ typedef enum extern Datum time_internal(PG_FUNCTION_ARGS, char* str, int is_time_sconst, TimeErrorType* time_error_type); -extern Datum int_cast_time_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull); +Datum int64_number_cast_time(PG_FUNCTION_ARGS, int64 number); +Datum int_cast_time_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull); +Datum uint64_number_cast_time(PG_FUNCTION_ARGS, uint64 number); + char* parser_function_input(Datum txt, Oid oid); +Datum int64_number_cast_date(PG_FUNCTION_ARGS, int64 number); +Datum int_cast_date_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull); +Datum uint64_number_cast_date(PG_FUNCTION_ARGS, uint64 number); + +extern "C" DLL_PUBLIC Datum float8_b_format_datetime(PG_FUNCTION_ARGS); +extern "C" DLL_PUBLIC Datum numeric_b_format_datetime(PG_FUNCTION_ARGS); #endif diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index dc1859c45..9d0edf31e 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -64,6 +64,8 @@ do { #define ENABLE_ZERO_MONTH 2 #define ENABLE_ZERO_DATE 0 /* enable date like 0000-00-00, work in cstring_to_datetime */ #define NO_ZERO_DATE_SET() (SQL_MODE_NO_ZERO_DATE() ? TIME_NO_ZERO_DATE : ENABLE_ZERO_DATE) +#define EANBLE_ERROR_ON_DATE_LESS_THAN_MIN 128 /* not allow value less than 101 when enable */ +#define ENABLE_ZERO_DATE_BYPASSED 256 /* bypass the zero date check directly if set */ #define DTK_DATE_TIME 5 diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index ba4407ae2..40ca303c7 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -128,6 +128,12 @@ static const char* numCastTimeFunction[NUM_CAST_TIME_IDX] = {"int8_cast_time", " "uint32_cast_time", "uint64_cast_time", "float4_cast_time", "float8_cast_time", "numeric_cast_time"}; +static const char* numCastDateFunction[NUM_CAST_TIME_IDX] = {"int8_cast_date", "int16_cast_date", "int32_cast_date", + "int64_cast_date", "uint8_cast_date", "uint16_cast_date", + "uint32_cast_date", "uint64_cast_date", "float4_cast_date", + "float8_cast_date", "numeric_cast_date"}; + + typedef enum { INVALID_COLUMN = -1, UINT1, @@ -3384,6 +3390,15 @@ Oid findNumTimeExplicitCastFunction(Oid sourceTypeId, Oid funcid) return (cast_oid != InvalidOid) ? cast_oid : funcid; } +Oid findNumDateExplicitCastFunction(Oid sourceTypeId, Oid funcid) +{ + int idx = findNumTimeFunctionIdx(sourceTypeId); + Oid cast_oid = (idx == INVALID_IDX) ? InvalidOid : + get_func_oid(numCastDateFunction[idx], PG_CATALOG_NAMESPACE, NULL); + return (cast_oid != InvalidOid) ? cast_oid : funcid; +} + + int findEnumFunctionIdx(Oid typeId) { switch (typeId) { @@ -3554,7 +3569,10 @@ void TryFindSpecifiedCastFunction(const Oid sourceTypeId, const Oid targetTypeId *funcId = findBitCastTimeFunction(targetTypeId, defaultFuncId); } else if (targetTypeId == TIMEOID) { *funcId = findNumTimeExplicitCastFunction(sourceTypeId, defaultFuncId); - } else { + } else if (targetTypeId == DATEOID) { + *funcId = findNumDateExplicitCastFunction(sourceTypeId, defaultFuncId); + } + else { *funcId = findUnsignedExplicitCastFunction(targetTypeId, sourceTypeId, defaultFuncId); } } diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index 1a2678950..dde936d42 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -115,6 +115,32 @@ extern "C" DLL_PUBLIC Datum float8_cast_time(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(numeric_cast_time); extern "C" DLL_PUBLIC Datum numeric_cast_time(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(int8_cast_date); +extern "C" DLL_PUBLIC Datum int8_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int16_cast_date); +extern "C" DLL_PUBLIC Datum int16_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int32_cast_date); +extern "C" DLL_PUBLIC Datum int32_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int64_cast_date); +extern "C" DLL_PUBLIC Datum int64_cast_date(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(uint8_cast_date); +extern "C" DLL_PUBLIC Datum uint8_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint16_cast_date); +extern "C" DLL_PUBLIC Datum uint16_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint32_cast_date); +extern "C" DLL_PUBLIC Datum uint32_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint64_cast_date); +extern "C" DLL_PUBLIC Datum uint64_cast_date(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(float4_cast_date); +extern "C" DLL_PUBLIC Datum float4_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(float8_cast_date); +extern "C" DLL_PUBLIC Datum float8_cast_date(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(numeric_cast_date); +extern "C" DLL_PUBLIC Datum numeric_cast_date(PG_FUNCTION_ARGS); + PG_FUNCTION_INFO_V1_PUBLIC(int8_b_format_date); extern "C" DLL_PUBLIC Datum int8_b_format_date(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(int16_b_format_date); @@ -730,10 +756,25 @@ int NumberDate(char *str, pg_tm *tm, unsigned int date_flag) return int32_b_format_date_internal(tm, date, strlen(adjusted) != DATE_YYYYMMDD_LEN, date_flag); } +inline bool is_flag_enable(unsigned int date_flag, unsigned int flag) +{ + return (date_flag & flag) != 0; +} + +inline bool is_enable_zero_date_bypassed(unsigned int date_flag) +{ + return is_flag_enable(date_flag, ENABLE_ZERO_DATE_BYPASSED); +} + +inline bool is_enable_error_on_date_less_than_min_value(unsigned int date_flag) +{ + return !date_flag || is_flag_enable(date_flag, EANBLE_ERROR_ON_DATE_LESS_THAN_MIN); +} + + int int32_b_format_date_internal(struct pg_tm *tm, int4 date, bool mayBe2Digit, unsigned int date_flag) { int dterr; - int errlevel = SQL_MODE_STRICT() ? ERROR : WARNING; /* YYYYMMDD or YYMMDD*/ tm->tm_mday = date % 100; /* DD */ tm->tm_mon = date / 100 % 100; /* MM */ @@ -742,16 +783,16 @@ int int32_b_format_date_internal(struct pg_tm *tm, int4 date, bool mayBe2Digit, /* validate b format date */ if (tm->tm_year > B_FORMAT_MAX_YEAR_OF_DATE) { dterr = DTERR_FIELD_OVERFLOW; + } else if (is2digits && is_enable_zero_date_bypassed(date_flag) && date == 0) { + return 0; } else if (is2digits && !date_flag && date == 0 && !(SQL_MODE_NO_ZERO_DATE() && SQL_MODE_STRICT())) { return 0; - } else if (is2digits && !date_flag && date > 0 && date < B_FORMAT_DATE_INT_MIN) { - ereport(errlevel, - (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("Out of range value for date"))); + } else if (is2digits && is_enable_error_on_date_less_than_min_value(date_flag) && + date > 0 && date < B_FORMAT_DATE_INT_MIN) { tm->tm_year = 0; tm->tm_mon = 0; tm->tm_mday = 0; - return 0; + return DTERR_FIELD_OVERFLOW; } else { dterr = ValidateDateForBDatabase(is2digits, tm, date_flag); } @@ -2072,6 +2113,39 @@ Datum float4_cast_time(PG_FUNCTION_ARGS) return datum_internal; } +Datum numeric_cast_date(PG_FUNCTION_ARGS) +{ + Numeric n = PG_GETARG_NUMERIC(0); + bool isRetNull = false; + Datum datetime = DirectCall1(&isRetNull, numeric_b_format_datetime, InvalidOid, NumericGetDatum(n)); + if (isRetNull) { + PG_RETURN_NULL(); + } + return DirectFunctionCall1(timestamp_date, datetime); +} + +Datum float8_cast_date(PG_FUNCTION_ARGS) +{ + float8 n = PG_GETARG_FLOAT8(0); + bool isRetNull = false; + Datum datetime = DirectCall1(&isRetNull, float8_b_format_datetime, InvalidOid, Float8GetDatum(n)); + if (isRetNull) { + PG_RETURN_NULL(); + } + return DirectFunctionCall1(timestamp_date, datetime); +} + +Datum float4_cast_date(PG_FUNCTION_ARGS) +{ + float8 n = (float8)PG_GETARG_FLOAT4(0); + bool isRetNull = false; + Datum datetime = DirectCall1(&isRetNull, float8_b_format_datetime, InvalidOid, Float8GetDatum(n)); + if (isRetNull) { + PG_RETURN_NULL(); + } + return DirectFunctionCall1(timestamp_date, datetime); +} + Datum uint8_b_format_time(PG_FUNCTION_ARGS) { return DirectFunctionCall1Coll(uint64_b_format_time, InvalidOid, UInt64GetDatum((uint64)PG_GETARG_UINT8(0)), @@ -2100,31 +2174,26 @@ Datum uint64_b_format_time(PG_FUNCTION_ARGS) Datum uint8_cast_time(PG_FUNCTION_ARGS) { - uint64 number = (uint64)PG_GETARG_UINT8(0); - bool isnull = false; - Datum result = int_cast_time_internal(fcinfo, number, &isnull); - if (isnull) { - PG_RETURN_NULL(); - } else { - return result; - } + return uint64_number_cast_time(fcinfo, (uint64)PG_GETARG_UINT8(0)); } Datum uint16_cast_time(PG_FUNCTION_ARGS) { - uint64 number = (uint64)PG_GETARG_UINT16(0); - bool isnull = false; - Datum result = int_cast_time_internal(fcinfo, number, &isnull); - if (isnull) { - PG_RETURN_NULL(); - } else { - return result; - } + return uint64_number_cast_time(fcinfo, (uint64)PG_GETARG_UINT16(0)); } Datum uint32_cast_time(PG_FUNCTION_ARGS) { - uint64 number = (uint64)PG_GETARG_UINT32(0); + return uint64_number_cast_time(fcinfo, (uint64)PG_GETARG_UINT32(0)); +} + +Datum uint64_cast_time(PG_FUNCTION_ARGS) +{ + return uint64_number_cast_time(fcinfo, (uint64)PG_GETARG_UINT64(0)); +} + +Datum uint64_number_cast_time(PG_FUNCTION_ARGS, uint64 number) +{ bool isnull = false; Datum result = int_cast_time_internal(fcinfo, number, &isnull); if (isnull) { @@ -2134,11 +2203,30 @@ Datum uint32_cast_time(PG_FUNCTION_ARGS) } } -Datum uint64_cast_time(PG_FUNCTION_ARGS) +Datum uint8_cast_date(PG_FUNCTION_ARGS) +{ + return uint64_number_cast_date(fcinfo, (uint64)PG_GETARG_UINT8(0)); +} + +Datum uint16_cast_date(PG_FUNCTION_ARGS) +{ + return uint64_number_cast_date(fcinfo, (uint64)PG_GETARG_UINT16(0)); +} + +Datum uint32_cast_date(PG_FUNCTION_ARGS) +{ + return uint64_number_cast_date(fcinfo, (uint64)PG_GETARG_UINT32(0)); +} + +Datum uint64_cast_date(PG_FUNCTION_ARGS) +{ + return uint64_number_cast_date(fcinfo, (uint64)PG_GETARG_UINT64(0)); +} + +Datum uint64_number_cast_date(PG_FUNCTION_ARGS, uint64 number) { - uint64 number = (uint64)PG_GETARG_UINT64(0); bool isnull = false; - Datum result = int_cast_time_internal(fcinfo, number, &isnull); + Datum result = int_cast_date_internal(fcinfo, number, &isnull); if (isnull) { PG_RETURN_NULL(); } else { @@ -2146,16 +2234,6 @@ Datum uint64_cast_time(PG_FUNCTION_ARGS) } } -Datum uint_cast_time_internal(PG_FUNCTION_ARGS, uint64 number, bool* isnull) -{ - char *str = DatumGetCString(DirectFunctionCall1(uint8out, UInt64GetDatum(number))); - TimeErrorType time_error_type = TIME_CORRECT; - Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); - if (time_error_type == TIME_INCORRECT) { - *isnull = true; - } - return datum_internal; -} Datum int8_b_format_time(PG_FUNCTION_ARGS) { @@ -2207,31 +2285,26 @@ Datum int64_b_format_time(PG_FUNCTION_ARGS) Datum int8_cast_time(PG_FUNCTION_ARGS) { - int64 number = (int64)PG_GETARG_INT8(0); - bool isnull = false; - Datum result = int_cast_time_internal(fcinfo, number, &isnull); - if (isnull) { - PG_RETURN_NULL(); - } else { - return result; - } + return int64_number_cast_time(fcinfo, (int64)PG_GETARG_INT8(0)); } Datum int16_cast_time(PG_FUNCTION_ARGS) { - int64 number = (int64)PG_GETARG_INT16(0); - bool isnull = false; - Datum result = int_cast_time_internal(fcinfo, number, &isnull); - if (isnull) { - PG_RETURN_NULL(); - } else { - return result; - } + return int64_number_cast_time(fcinfo, (int64)PG_GETARG_INT16(0)); } Datum int32_cast_time(PG_FUNCTION_ARGS) { - int64 number = (int64)PG_GETARG_INT32(0); + return int64_number_cast_time(fcinfo, (int64)PG_GETARG_INT32(0)); +} + +Datum int64_cast_time(PG_FUNCTION_ARGS) +{ + return int64_number_cast_time(fcinfo, PG_GETARG_INT64(0)); +} + +Datum int64_number_cast_time(PG_FUNCTION_ARGS, int64 number) +{ bool isnull = false; Datum result = int_cast_time_internal(fcinfo, number, &isnull); if (isnull) { @@ -2241,11 +2314,47 @@ Datum int32_cast_time(PG_FUNCTION_ARGS) } } -Datum int64_cast_time(PG_FUNCTION_ARGS) + +Datum int_cast_time_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull) +{ + if (number >= (int64)pow_of_10[10]) { /* datetime: 0001-00-00 00-00-00 */ + Datum datetime = DirectFunctionCall1(int64_b_format_datetime, Int64GetDatum(number)); + return DirectFunctionCall1(timestamp_time, datetime); + } + char *str = DatumGetCString(DirectFunctionCall1(int8out, Int64GetDatum(number))); + TimeErrorType time_error_type = TIME_CORRECT; + Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + *isnull = true; + } + return datum_internal; +} + + +Datum int8_cast_date(PG_FUNCTION_ARGS) +{ + return int64_number_cast_date(fcinfo, (int64)PG_GETARG_INT8(0)); +} + +Datum int16_cast_date(PG_FUNCTION_ARGS) +{ + return int64_number_cast_date(fcinfo, (int64)PG_GETARG_INT16(0)); +} + +Datum int32_cast_date(PG_FUNCTION_ARGS) +{ + return int64_number_cast_date(fcinfo, (int64)PG_GETARG_INT32(0)); +} + +Datum int64_cast_date(PG_FUNCTION_ARGS) +{ + return int64_number_cast_date(fcinfo, PG_GETARG_INT64(0)); +} + +Datum int64_number_cast_date(PG_FUNCTION_ARGS, int64 number) { - int64 number = PG_GETARG_INT64(0); bool isnull = false; - Datum result = int_cast_time_internal(fcinfo, number, &isnull); + Datum result = int_cast_date_internal(fcinfo, number, &isnull); if (isnull) { PG_RETURN_NULL(); } else { @@ -2253,21 +2362,52 @@ Datum int64_cast_time(PG_FUNCTION_ARGS) } } -Datum int_cast_time_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull) +/* int4 to b format date type conversion */ +Datum int32_b_format_date(int64 number, bool can_ignore, TimeErrorType* time_error_type) +{ + int4 date = (int4)number; + DateADT result; + struct pg_tm tt, *tm = &tt; + int errlevel = can_ignore && SQL_MODE_STRICT() ? ERROR : WARNING; + if (int32_b_format_date_internal(tm, date, true, + (EANBLE_ERROR_ON_DATE_LESS_THAN_MIN | ENABLE_ZERO_DATE_BYPASSED))) { + ereport(errlevel, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("Out of range value for date"))); + tm->tm_year = 0; + tm->tm_mon = 0; + tm->tm_mday = 0; + *time_error_type = TIME_INCORRECT; + } + if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday)) { + ereport(errlevel, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("date out of range: \"%d\"", date))); + tm->tm_year = 0; + tm->tm_mon = 0; + tm->tm_mday = 0; + *time_error_type = TIME_INCORRECT; + } + result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; + PG_RETURN_DATEADT(result); +} + + +Datum int_cast_date_internal(PG_FUNCTION_ARGS, int64 number, bool* isnull) { if (number >= (int64)pow_of_10[10]) { /* datetime: 0001-00-00 00-00-00 */ Datum datetime = DirectFunctionCall1(int64_b_format_datetime, Int64GetDatum(number)); - return DirectFunctionCall1(timestamp_time, datetime); + return DirectFunctionCall1(timestamp_date, datetime); } - char *str = DatumGetCString(DirectFunctionCall1(int8out, Int64GetDatum(number))); TimeErrorType time_error_type = TIME_CORRECT; - Datum datum_internal = time_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + Datum datum_internal = int32_b_format_date(number, fcinfo->can_ignore, &time_error_type); if (time_error_type == TIME_INCORRECT) { *isnull = true; } return datum_internal; } + static char* adjust_b_format_time(char *str, int *timeSign, int *D, bool *hasD) { *timeSign = 1; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index b724c2b48..7b9d8768a 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -359,3 +359,18 @@ DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); DROP FUNCTION IF EXISTS pg_catalog.exp(year); DROP FUNCTION IF EXISTS pg_catalog.exp(json); + +DROP CAST IF EXISTS (boolean AS date); +DROP FUNCTION IF EXISTS pg_catalog.bool_date(boolean); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int8_cast_date(int1); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int16_cast_date(int2); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(int4); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(int8); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint1); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint2); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint4); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint8); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(float4); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(float8); +DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(numeric); + diff --git a/contrib/dolphin/sql/b_compatibility_time_type.sql b/contrib/dolphin/sql/b_compatibility_time_type.sql index 17a64cce8..2eacd2718 100644 --- a/contrib/dolphin/sql/b_compatibility_time_type.sql +++ b/contrib/dolphin/sql/b_compatibility_time_type.sql @@ -665,6 +665,21 @@ drop table if exists t_date; drop table if exists t_datetime; drop table if exists test_timestamp; +set dolphin.b_compatibility_mode = true; +select cast(true as date) as result; +select cast(false as date) as result; +select cast(100::int1 as date) as result; +select cast(100::uint1 as date) as result; +select cast(100::int2 as date) as result; +select cast(100::uint2 as date) as result; +select cast(100::int4 as date) as result; +select cast(100::uint4 as date) as result; +select cast(100::int4 as date) as result; +select cast(100::uint8 as date) as result; +select cast(100::float4 as date) as result; +select cast(100::float8 as date) as result; +select cast(100::numeric as date) as result; + \c postgres DROP DATABASE b_time_type; DROP TABLESPACE b_time_type_example; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 471e157d0..df389bd21 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -601,3 +601,31 @@ CREATE OR REPLACE FUNCTION pg_catalog.degrees(year) RETURNS double precision LAN CREATE OR REPLACE FUNCTION pg_catalog.degrees(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.exp(year) RETURNS numeric LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.exp(cast($1 as numeric))'; CREATE OR REPLACE FUNCTION pg_catalog.exp(json) RETURNS numeric LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.exp(cast($1 as numeric))'; + +CREATE OR REPLACE FUNCTION pg_catalog.int8_cast_date(int1) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int8_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.int16_cast_date(int2) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int16_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.int32_cast_date(int4) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int32_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.int64_cast_date(int8) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int64_cast_date'; + +CREATE OR REPLACE FUNCTION pg_catalog.uint8_cast_date(uint1) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint8_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.uint16_cast_date(uint2) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint16_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.uint32_cast_date(uint4) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint32_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.uint64_cast_date(uint8) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint64_cast_date'; + +CREATE OR REPLACE FUNCTION pg_catalog.float4_cast_date(float4) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float4_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.float8_cast_date(float8) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float8_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.numeric_cast_date(numeric) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'numeric_cast_date'; + +CREATE OR REPLACE FUNCTION pg_catalog.bool_date(boolean) RETURNS date LANGUAGE SQL IMMUTABLE STRICT as 'select $1::integer::date'; +CREATE CAST (boolean AS date) WITH FUNCTION bool_date(boolean) AS ASSIGNMENT; -- Gitee From 40b583365efd529be3c574a6e34f45b5ebdd66e5 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 15 Jan 2024 15:47:33 +0800 Subject: [PATCH 186/434] Fix generate always grammar. --- contrib/dolphin/expected/test_default.out | 15 +++++++++++++++ contrib/dolphin/plugin_parser/gram.y | 11 ++++++++--- contrib/dolphin/sql/test_default.sql | 5 +++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/test_default.out b/contrib/dolphin/expected/test_default.out index bb0d7b88d..9f8fa32ad 100644 --- a/contrib/dolphin/expected/test_default.out +++ b/contrib/dolphin/expected/test_default.out @@ -124,5 +124,20 @@ drop table t4; drop table t5; drop table t6; drop table t7; +create table t1(a int, b int as (a) stored); +alter table t1 add gc bigint as (a) stored; +show create table t1; + Table | Create Table +-------+----------------------------------------------- + t1 | SET search_path = test_default; + + | CREATE TABLE t1 ( + + | a integer, + + | b integer GENERATED ALWAYS AS (a) STORED,+ + | gc bigint GENERATED ALWAYS AS (a) STORED + + | ) + + | WITH (orientation=row, compression=no); +(1 row) + +drop table t1; reset current_schema; drop schema test_default cascade; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e5485985c..01bf7461b 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -10448,7 +10448,7 @@ ColConstraintElem: } #endif } - | GENERATED ALWAYS AS '(' a_expr ')' generated_column_option + | opt_generated_always AS '(' a_expr ')' generated_column_option { #ifdef ENABLE_MULTIPLE_NODES const char* message = "Generated column is not yet supported"; @@ -10459,7 +10459,7 @@ ColConstraintElem: Constraint *n = makeNode(Constraint); n->contype = CONSTR_GENERATED; n->generated_when = ATTRIBUTE_IDENTITY_ALWAYS; - n->raw_expr = $5; + n->raw_expr = $4; n->cooked_expr = NULL; n->location = @1; $$ = (Node *)n; @@ -10507,7 +10507,12 @@ ColConstraintElem: $$ = (Node *)n; } ; - + +opt_generated_always: + GENERATED ALWAYS + | /* EMPTY */ + ; + opt_unique_key: UNIQUE { $$ = NULL; } | UNIQUE KEY diff --git a/contrib/dolphin/sql/test_default.sql b/contrib/dolphin/sql/test_default.sql index 232923a3d..1c45863cb 100644 --- a/contrib/dolphin/sql/test_default.sql +++ b/contrib/dolphin/sql/test_default.sql @@ -39,5 +39,10 @@ drop table t5; drop table t6; drop table t7; +create table t1(a int, b int as (a) stored); +alter table t1 add gc bigint as (a) stored; +show create table t1; +drop table t1; + reset current_schema; drop schema test_default cascade; -- Gitee From 32f3f1115c0810f6f1d2de06711c3002815dcdeb Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Mon, 15 Jan 2024 17:17:13 +0800 Subject: [PATCH 187/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dln=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E4=B8=8D=E6=94=AF=E6=8C=81year=E5=92=8Cjson?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/math_func.out | 62 +++++++++++++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 2 + .../dolphin/sql/builtin_funcs/math_func.sql | 37 +++++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 10 +++ 4 files changed, 111 insertions(+) diff --git a/contrib/dolphin/expected/builtin_funcs/math_func.out b/contrib/dolphin/expected/builtin_funcs/math_func.out index 6627cda06..799cdbe84 100644 --- a/contrib/dolphin/expected/builtin_funcs/math_func.out +++ b/contrib/dolphin/expected/builtin_funcs/math_func.out @@ -201,6 +201,68 @@ select * from test_double_exp order by 1; 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 1096.63315842846 | 2.71828182845905 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 2.71828182845905 | 148.413159102577 | 1 (2 rows) +-- ln math function +select +ln(`int1`), +ln(`uint1`), +ln(`int2`), +ln(`uint2`), +ln(`int4`), +ln(`uint4`), +ln(`int8`), +ln(`uint8`), +ln(`float4`), +ln(`float8`), +ln(`numeric`), +ln(`bit1`), +ln(`bit64`), +ln(`boolean`), +ln(`date`), +ln(`time`), +ln(`time(4)`), +ln(`datetime`), +ln(`datetime(4)`), +ln(`timestamp`), +ln(`timestamp(4)`), +ln(`year`), +ln(`char`), +ln(`varchar`), +ln(`binary`), +ln(`varbinary`), +ln(`tinyblob`), +ln(`blob`), +ln(`mediumblob`), +ln(`longblob`), +ln(`text`), +ln(`enum_t`), +ln(`set_t`), +ln(`json`) +from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: ln +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: ln + ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln | ln +----+--------------------+----+--------------------+----+--------------------+----+--------------------+----+----+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+------------------+--------------------+--------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+--------------------+--------------------+--------------------+---- + 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0 | 0.0000000000000000 | 0.0000000000000000 | 1.9459101490553133 | 0.0000000000000000 | 16.822687342557736 | 12.160290452875046 | 12.160292261823946 | 30.638197909965809 | 30.638197909965832 | 30.638197909965809 | 30.638197909965832 | 7.61233683716775 | 0.2070141693843261 | 0.2070141693843261 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.2070141693843261 | 0.0000000000000000 | 1.6094379124341004 | +(1 row) + select exp(709); exp ----------------------- diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index e260cd555..68a7efd5b 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -391,3 +391,5 @@ DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(float4); DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(float8); DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(numeric); +DROP FUNCTION IF EXISTS pg_catalog.ln(year); +DROP FUNCTION IF EXISTS pg_catalog.ln(json); diff --git a/contrib/dolphin/sql/builtin_funcs/math_func.sql b/contrib/dolphin/sql/builtin_funcs/math_func.sql index 6ac8dd6e2..e83a91a51 100644 --- a/contrib/dolphin/sql/builtin_funcs/math_func.sql +++ b/contrib/dolphin/sql/builtin_funcs/math_func.sql @@ -76,6 +76,43 @@ insert into test_double_exp select exp(`int1`), exp(`uint1`), exp(`int2`), exp(` select * from test_double_exp order by 1; +-- ln math function +select +ln(`int1`), +ln(`uint1`), +ln(`int2`), +ln(`uint2`), +ln(`int4`), +ln(`uint4`), +ln(`int8`), +ln(`uint8`), +ln(`float4`), +ln(`float8`), +ln(`numeric`), +ln(`bit1`), +ln(`bit64`), +ln(`boolean`), +ln(`date`), +ln(`time`), +ln(`time(4)`), +ln(`datetime`), +ln(`datetime(4)`), +ln(`timestamp`), +ln(`timestamp(4)`), +ln(`year`), +ln(`char`), +ln(`varchar`), +ln(`binary`), +ln(`varbinary`), +ln(`tinyblob`), +ln(`blob`), +ln(`mediumblob`), +ln(`longblob`), +ln(`text`), +ln(`enum_t`), +ln(`set_t`), +ln(`json`) +from test_type_table; select exp(709); select exp(710); select exp(-1000); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index b7ef81fa0..ecb4efe80 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -678,3 +678,13 @@ RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'numeric_cast_date'; CREATE OR REPLACE FUNCTION pg_catalog.bool_date(boolean) RETURNS date LANGUAGE SQL IMMUTABLE STRICT as 'select $1::integer::date'; CREATE CAST (boolean AS date) WITH FUNCTION bool_date(boolean) AS ASSIGNMENT; + +DROP FUNCTION IF EXISTS pg_catalog.ln(year); +CREATE OR REPLACE FUNCTION pg_catalog.ln(year) +RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as +'select pg_catalog.ln(cast($1 as double precision))'; + +DROP FUNCTION IF EXISTS pg_catalog.ln(json); +CREATE OR REPLACE FUNCTION pg_catalog.ln(json) +RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as +'select pg_catalog.ln(cast($1 as double precision))'; -- Gitee From 0a4dc5a60d080665eeaab9bbc584cdcd651dce04 Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Mon, 15 Jan 2024 18:05:36 +0800 Subject: [PATCH 188/434] =?UTF-8?q?=E4=B8=BAbinary=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2=E5=9C=A8=E9=95=BF=E5=BA=A6=E8=B6=85=E8=BF=87?= =?UTF-8?q?typmod=E6=97=B6=E6=B7=BB=E5=8A=A0=E4=B8=A5=E6=A0=BC=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E5=88=A4=E6=96=AD=EF=BC=8C=E5=85=81=E8=AE=B8=E8=BF=9B?= =?UTF-8?q?=E8=A1=8C=E6=88=AA=E6=96=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/test_binary.out | 194 ++++++++++++++++++- contrib/dolphin/plugin_utils/adt/varlena.cpp | 12 +- contrib/dolphin/sql/test_binary.sql | 38 ++++ 3 files changed, 238 insertions(+), 6 deletions(-) diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index 6aabeb77f..1cb4f3e6b 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -357,8 +357,13 @@ LINE 1: select 'abc'::binary(-1); ^ CONTEXT: referenced column: binary select 'abc'::binary(1); -ERROR: The input length:3 exceeds the maximum length:1. +WARNING: The input length:3 exceeds the maximum length:1. CONTEXT: referenced column: binary + binary +-------- + \x61 +(1 row) + --binary in like and escape select 'abcd' like binary 'abc%'; ?column? @@ -911,6 +916,193 @@ SELECT HEX(s1), HEX(s2), HEX('*') FROM t1; (1 row) DROP TABLE t1; +-- test about set to binary +set dolphin.b_compatibility_mode=on; +set bytea_output=escape; +drop table if exists t_set0004; +NOTICE: table "t_set0004" does not exist, skipping +create table t_set0004( +c1 int not null auto_increment primary key, +c2 set('2011-11-11', '2023-02-28 11:23:00', '2024-01', '2025/01/01') +default null, +c3 set('red', 'yellow', 'blue') not null, +c4 set('0', '1', '1.01314')); +NOTICE: CREATE TABLE will create implicit sequence "t_set0004_c1_seq" for serial column "t_set0004.c1" +NOTICE: CREATE TABLE will create implicit set "t_set0004_c2_set" for column "t_set0004.c2" +NOTICE: CREATE TABLE will create implicit set "t_set0004_c3_set" for column "t_set0004.c3" +NOTICE: CREATE TABLE will create implicit set "t_set0004_c4_set" for column "t_set0004.c4" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_set0004_pkey" for table "t_set0004" +insert into t_set0004(c2, c3, c4) values ('2025/01/01', 'blue', '0'); +insert into t_set0004(c2, c3, c4) values ( +'2011-11-11,2023-02-28 11:23:00', 'red,yellow', '0,1'); +insert into t_set0004(c2, c3, c4) values ( +'2024-01,2011-11-11,2025/01/01', 'red,blue', '0,1.01314'); +insert into t_set0004(c2, c3) values ('2023-02-28 11:23:00', 'red'); +insert into t_set0004(c2, c3) values ( +'2023-02-28 11:23:00,2025/01/01,2025/01/01', 'blue,blue,yellow'); +insert into t_set0004(c3) values ('yellow'); +insert into t_set0004(c3) values ('yellow,yellow,yellow,yellow'); +insert into t_set0004(c3) values ('blue,yellow,red,red'); +insert into t_set0004(c3) values ('blue,red'); +insert into t_set0004(c3, c4) values ('red', '1'); +insert into t_set0004(c3, c4) values ('red,red', '1.01314,1.01314'); +insert into t_set0004(c3, c4) values ('red,blue', '0,1,1.01314'); +select cast(c1 as binary(1)), cast(c2 as binary(1)), cast(c3 as binary(1)), +cast(c4 as binary(1)) from t_set0004 order by 1,2,3,4; +WARNING: The input length:10 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:4 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:30 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:10 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c4 +WARNING: The input length:29 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:8 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: c4 +WARNING: The input length:19 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:30 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:11 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:6 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:6 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:15 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:8 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:2 exceeds the maximum length:1. +CONTEXT: referenced column: c1 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:2 exceeds the maximum length:1. +CONTEXT: referenced column: c1 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:7 exceeds the maximum length:1. +CONTEXT: referenced column: c4 +WARNING: The input length:2 exceeds the maximum length:1. +CONTEXT: referenced column: c1 +WARNING: The input length:8 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:11 exceeds the maximum length:1. +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 +----+----+----+---- + 1 | | r | 0 + 1 | | r | 1 + 1 | | r | 1 + 1 | 2 | b | 0 + 2 | 2 | r | 0 + 3 | 2 | r | 0 + 4 | 2 | r | + 5 | 2 | y | + 6 | | y | + 7 | | y | + 8 | | r | + 9 | | r | +(12 rows) + +select convert(c1, binary(1)), convert(c2, binary(1)), convert(c3, binary(1)), +convert(c4, binary(1)) from t_set0004 order by 1,2,3,4; +WARNING: The input length:10 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:4 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:30 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:10 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c4 +WARNING: The input length:29 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:8 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: c4 +WARNING: The input length:19 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:30 exceeds the maximum length:1. +CONTEXT: referenced column: c2 +WARNING: The input length:11 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:6 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:6 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:15 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:8 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:2 exceeds the maximum length:1. +CONTEXT: referenced column: c1 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:2 exceeds the maximum length:1. +CONTEXT: referenced column: c1 +WARNING: The input length:3 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:7 exceeds the maximum length:1. +CONTEXT: referenced column: c4 +WARNING: The input length:2 exceeds the maximum length:1. +CONTEXT: referenced column: c1 +WARNING: The input length:8 exceeds the maximum length:1. +CONTEXT: referenced column: c3 +WARNING: The input length:11 exceeds the maximum length:1. +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 +----+----+----+---- + 1 | | r | 0 + 1 | | r | 1 + 1 | | r | 1 + 1 | 2 | b | 0 + 2 | 2 | r | 0 + 3 | 2 | r | 0 + 4 | 2 | r | + 5 | 2 | y | + 6 | | y | + 7 | | y | + 8 | | r | + 9 | | r | +(12 rows) + +select cast('2023-1-12' as binary(1)); +WARNING: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: binary + binary +-------- + 2 +(1 row) + +drop table t_set0004; +drop table if exists test_ignore; +NOTICE: table "test_ignore" does not exist, skipping +create table test_ignore (a binary(1), b varbinary(1)); +insert into test_ignore(a) values(cast('2023-1-12' as binary(1))); +ERROR: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: a +insert into test_ignore(b) values(cast('2023-1-12' as binary(1))); +ERROR: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: b +insert ignore into test_ignore values(cast('2023-1-12' as binary(1)), cast('2023-1-12' as varbinary(1))); +WARNING: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: a +WARNING: The input length:9 exceeds the maximum length:1. +CONTEXT: referenced column: b +drop table test_ignore; drop table if exists binary_operator; reset dolphin.b_compatibility_mode; drop schema test_binary cascade; diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 15498e48c..1332f45ac 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -192,7 +192,7 @@ static text* get_result_of_concat(text* result, FunctionCallInfo fcinfo); static void check_blob_size(Datum blob, int64 max_size); static int32 anybinary_typmodin(ArrayType* ta, const char* typname, uint32 max); static char* anybinary_typmodout(int32 typmod); -static Datum copy_binary(Datum source, int typmod, bool target_is_var); +static Datum copy_binary(Datum source, int typmod, bool target_is_var, bool can_ignore); static bytea* copy_blob(bytea* source, int64 max_size); static CmpType get_cmp_type(CmpType a, CmpType b); static bool is_unsigned_intType(Oid oid); @@ -8502,13 +8502,15 @@ Datum text_interval(PG_FUNCTION_ARGS) } #ifdef DOLPHIN -static Datum copy_binary(Datum source, int typmod, bool target_is_var) +static Datum copy_binary(Datum source, int typmod, bool target_is_var, bool can_ignore) { int maxlen = typmod - (int32)VARHDRSZ; int length = VARSIZE(source) - VARHDRSZ; if (maxlen > 0 && length > maxlen) { - ereport(ERROR, (errmsg("The input length:%d exceeds the maximum length:%d.", length, maxlen))); + int elevel = (SQL_MODE_STRICT() && !can_ignore) ? ERROR : WARNING; + ereport(elevel, (errmsg("The input length:%d exceeds the maximum length:%d.", length, maxlen))); + length = maxlen; } char* data = NULL; @@ -8634,14 +8636,14 @@ Datum bytea2binary(PG_FUNCTION_ARGS) ereport(ERROR, (errmsg("The input length:%d exceeds the maximum length:0.", length))); } - return copy_binary(PointerGetDatum(source), maxlen, false); + return copy_binary(PointerGetDatum(source), maxlen, false, fcinfo->can_ignore); } Datum bytea2var(PG_FUNCTION_ARGS) { bytea* source = PG_GETARG_BYTEA_P(0); int32 maxlen = PG_GETARG_INT32(1); - return copy_binary(PointerGetDatum(source), maxlen, true); + return copy_binary(PointerGetDatum(source), maxlen, true, fcinfo->can_ignore); } Datum tinyblob_rawin(PG_FUNCTION_ARGS) diff --git a/contrib/dolphin/sql/test_binary.sql b/contrib/dolphin/sql/test_binary.sql index 113ffd218..587c71cfa 100644 --- a/contrib/dolphin/sql/test_binary.sql +++ b/contrib/dolphin/sql/test_binary.sql @@ -260,6 +260,44 @@ SELECT HEX(concat('*', s1, '*', s2, '*')) FROM t1; SELECT HEX(s1), HEX(s2), HEX('*') FROM t1; DROP TABLE t1; +-- test about set to binary +set dolphin.b_compatibility_mode=on; +set bytea_output=escape; +drop table if exists t_set0004; +create table t_set0004( +c1 int not null auto_increment primary key, +c2 set('2011-11-11', '2023-02-28 11:23:00', '2024-01', '2025/01/01') +default null, +c3 set('red', 'yellow', 'blue') not null, +c4 set('0', '1', '1.01314')); +insert into t_set0004(c2, c3, c4) values ('2025/01/01', 'blue', '0'); +insert into t_set0004(c2, c3, c4) values ( +'2011-11-11,2023-02-28 11:23:00', 'red,yellow', '0,1'); +insert into t_set0004(c2, c3, c4) values ( +'2024-01,2011-11-11,2025/01/01', 'red,blue', '0,1.01314'); +insert into t_set0004(c2, c3) values ('2023-02-28 11:23:00', 'red'); +insert into t_set0004(c2, c3) values ( +'2023-02-28 11:23:00,2025/01/01,2025/01/01', 'blue,blue,yellow'); +insert into t_set0004(c3) values ('yellow'); +insert into t_set0004(c3) values ('yellow,yellow,yellow,yellow'); +insert into t_set0004(c3) values ('blue,yellow,red,red'); +insert into t_set0004(c3) values ('blue,red'); +insert into t_set0004(c3, c4) values ('red', '1'); +insert into t_set0004(c3, c4) values ('red,red', '1.01314,1.01314'); +insert into t_set0004(c3, c4) values ('red,blue', '0,1,1.01314'); +select cast(c1 as binary(1)), cast(c2 as binary(1)), cast(c3 as binary(1)), +cast(c4 as binary(1)) from t_set0004 order by 1,2,3,4; +select convert(c1, binary(1)), convert(c2, binary(1)), convert(c3, binary(1)), +convert(c4, binary(1)) from t_set0004 order by 1,2,3,4; +select cast('2023-1-12' as binary(1)); +drop table t_set0004; +drop table if exists test_ignore; +create table test_ignore (a binary(1), b varbinary(1)); +insert into test_ignore(a) values(cast('2023-1-12' as binary(1))); +insert into test_ignore(b) values(cast('2023-1-12' as binary(1))); +insert ignore into test_ignore values(cast('2023-1-12' as binary(1)), cast('2023-1-12' as varbinary(1))); +drop table test_ignore; + drop table if exists binary_operator; reset dolphin.b_compatibility_mode; -- Gitee From 41bf92050455b4689e38494d9a60a69d60b25c61 Mon Sep 17 00:00:00 2001 From: totaj Date: Mon, 15 Jan 2024 18:10:58 +0800 Subject: [PATCH 189/434] Fix convert bug. --- .../expected/builtin_funcs/convert.out | 25 ++++++++++++++ .../case_sensitive_test/create_view5.out | 2 ++ .../dolphin/expected/column_quote_alias.out | 10 ++++++ .../expected/convert_truncated_warning.out | 8 ++--- contrib/dolphin/expected/mysqlmode_strict.out | 34 +++++++++++++++++++ .../dolphin/expected/mysqlmode_strict2.out | 22 ++++++++++++ .../string_func_test/db_b_compress_test.out | 6 ++++ .../string_func_test/db_b_nameconst_test.out | 2 ++ contrib/dolphin/plugin_parser/gram.y | 3 +- contrib/dolphin/plugin_utils/adt/varchar.cpp | 18 +++------- contrib/dolphin/sql/builtin_funcs/convert.sql | 5 +++ 11 files changed, 117 insertions(+), 18 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/convert.out b/contrib/dolphin/expected/builtin_funcs/convert.out index 48bd0e9bf..33925827e 100644 --- a/contrib/dolphin/expected/builtin_funcs/convert.out +++ b/contrib/dolphin/expected/builtin_funcs/convert.out @@ -88,5 +88,30 @@ select convert(1 using decimal(10,3)); 1.000 (1 row) +select pg_typeof(convert('1', char)); + pg_typeof +----------- + character +(1 row) + +select pg_typeof(convert('1', char(10))); + pg_typeof +----------- + character +(1 row) + +set dolphin.b_compatibility_mode = on; +select pg_typeof(convert('1', char)); + pg_typeof +------------------- + character varying +(1 row) + +select pg_typeof(convert('1', char(10))); + pg_typeof +------------------- + character varying +(1 row) + drop schema db_convert cascade; reset current_schema; diff --git a/contrib/dolphin/expected/case_sensitive_test/create_view5.out b/contrib/dolphin/expected/case_sensitive_test/create_view5.out index 2a4fc4a22..8d1d2e34f 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_view5.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_view5.out @@ -151,6 +151,8 @@ SELECT * FROM tt1; (2 rows) SELECT a::varchar(3) FROM tt1; +WARNING: value too long for type character varying(3) +CONTEXT: referenced column: a a ----- abc diff --git a/contrib/dolphin/expected/column_quote_alias.out b/contrib/dolphin/expected/column_quote_alias.out index 6460eded2..53ad61bdd 100644 --- a/contrib/dolphin/expected/column_quote_alias.out +++ b/contrib/dolphin/expected/column_quote_alias.out @@ -5,6 +5,8 @@ NOTICE: table "t_alias_case0001_1" does not exist, skipping CREATE TABLE t_alias_case0001_1 (a int); INSERT INTO t_alias_case0001_1 SET a=1; select character varying 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character varying(1) +CONTEXT: referenced column: varchar varchar --------- k @@ -37,12 +39,16 @@ select national varchar 'keyword_test' from t_alias_case0001_1; (1 row) select VARCHAR 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character varying(1) +CONTEXT: referenced column: varchar varchar --------- k (1 row) select VARCHAR2 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character varying(1) +CONTEXT: referenced column: varchar varchar --------- k @@ -73,6 +79,8 @@ CONTEXT: referenced column: bpchar (1 row) select NCHAR VARYING 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character varying(1) +CONTEXT: referenced column: varchar varchar --------- k @@ -121,6 +129,8 @@ CONTEXT: referenced column: bpchar (1 row) select char varying(1) 'keyword_test' from t_alias_case0001_1; +WARNING: value too long for type character varying(1) +CONTEXT: referenced column: varchar varchar --------- k diff --git a/contrib/dolphin/expected/convert_truncated_warning.out b/contrib/dolphin/expected/convert_truncated_warning.out index e86f6fcc0..465f9d585 100644 --- a/contrib/dolphin/expected/convert_truncated_warning.out +++ b/contrib/dolphin/expected/convert_truncated_warning.out @@ -3,10 +3,10 @@ set current_schema = convert_truncated_warning; set dolphin.sql_mode = default; set dolphin.b_compatibility_mode = on; SELECT CONVERT(TIMESTAMP '2004-01-22 21:45:33',CHAR(4)); -WARNING: value too long for type character(4) -CONTEXT: referenced column: bpchar - bpchar --------- +WARNING: value too long for type character varying(4) +CONTEXT: referenced column: varchar + varchar +--------- 2004 (1 row) diff --git a/contrib/dolphin/expected/mysqlmode_strict.out b/contrib/dolphin/expected/mysqlmode_strict.out index 083eb967f..0dc0344a0 100644 --- a/contrib/dolphin/expected/mysqlmode_strict.out +++ b/contrib/dolphin/expected/mysqlmode_strict.out @@ -262,8 +262,14 @@ select * from test_char; (4 rows) insert into test_varchar7(a) values('adsfsdaf'); +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a insert into test_varchar7(a) values('-1234998.55'); +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a insert into test_varchar7(a) values(13434.77); +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a insert into test_varchar7(a) values(13434.8); select * from test_varchar7; a @@ -956,6 +962,20 @@ select * from test_varchar7_1; delete from test_varchar7_1; insert into test_varchar7_1 select * from test_int; +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a select * from test_varchar7_1; a --------- @@ -976,6 +996,20 @@ select * from test_varchar7_1; delete from test_varchar7_1; insert into test_varchar7_1 select * from test_bint; +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a select * from test_varchar7_1; a --------- diff --git a/contrib/dolphin/expected/mysqlmode_strict2.out b/contrib/dolphin/expected/mysqlmode_strict2.out index 125bdf01a..7ad1cc501 100644 --- a/contrib/dolphin/expected/mysqlmode_strict2.out +++ b/contrib/dolphin/expected/mysqlmode_strict2.out @@ -278,8 +278,14 @@ select * from test_char; (4 rows) insert into test_varchar7(a) values('adsfsdaf'); +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a insert into test_varchar7(a) values('-1234998.55'); +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a insert into test_varchar7(a) values(13434.77); +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a insert into test_varchar7(a) values(13434.8); select * from test_varchar7; a @@ -940,6 +946,14 @@ select * from test_varchar7_1; delete from test_varchar7_1; insert into test_varchar7_1 select * from test_int; +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a select * from test_varchar7_1; a --------- @@ -960,6 +974,14 @@ select * from test_varchar7_1; delete from test_varchar7_1; insert into test_varchar7_1 select * from test_bint; +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a +WARNING: value too long for type character varying(7) +CONTEXT: referenced column: a select * from test_varchar7_1; a --------- diff --git a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out index 91561a1a4..ebabbf64a 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_compress_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_compress_test.out @@ -131,6 +131,8 @@ CONTEXT: referenced column: hex (1 row) SELECT HEX(COMPRESS('string for test compress function '::varchar(10))); +WARNING: value too long for type character varying(10) +CONTEXT: referenced column: hex hex ---------------------------------------------- 0a000000789c2b2e29cacc4b5748cb2f020016a403ff @@ -414,6 +416,8 @@ CONTEXT: referenced column: uncompress (1 row) SELECT UNCOMPRESS(COMPRESS('string for test compress function '::varchar(10))); +WARNING: value too long for type character varying(10) +CONTEXT: referenced column: uncompress uncompress ------------ string for @@ -751,6 +755,8 @@ CONTEXT: referenced column: uncompressed_length (1 row) SELECT UNCOMPRESSED_LENGTH(COMPRESS('string for test compress function '::varchar(10))); +WARNING: value too long for type character varying(10) +CONTEXT: referenced column: uncompressed_length uncompressed_length --------------------- 10 diff --git a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out index 15aab5342..a413edb3b 100644 --- a/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out +++ b/contrib/dolphin/expected/string_func_test/db_b_nameconst_test.out @@ -261,6 +261,8 @@ CONTEXT: referenced column: name_const (1 row) SELECT NAME_CONST('2023-06-01 00:23:59'::varchar(10), 'test'); +WARNING: value too long for type character varying(10) +CONTEXT: referenced column: name_const 2023-06-01 ------------ test diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e5485985c..5d200e0bb 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -34295,7 +34295,8 @@ func_expr_common_subexpr: } | CONVERT '(' a_expr ',' Typename ')' { - $$ = makeTypeCast($3, $5, @1); + ChangeBpcharCastType($5); + $$ = makeTypeCast($3, $5, @1); } | CONVERT '(' a_expr ',' UNSIGNED INTEGER ')' { diff --git a/contrib/dolphin/plugin_utils/adt/varchar.cpp b/contrib/dolphin/plugin_utils/adt/varchar.cpp index b9d06669a..0102a6b26 100644 --- a/contrib/dolphin/plugin_utils/adt/varchar.cpp +++ b/contrib/dolphin/plugin_utils/adt/varchar.cpp @@ -680,12 +680,10 @@ Datum varchar_launch(bool can_ignore, VarChar* source, int32 &typmod, bool isExp ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("value too long for type character varying(%d)", maxlen))); - /* truncate multibyte string preserving multibyte boundary */ maxmblen = pg_mbcharcliplen(s_data, len, maxlen); - - if (!isExplicit) { #ifndef DOLPHIN + if (!isExplicit) { for (i = maxmblen; i < len; i++) if (s_data[i] != ' ') { ereport(can_ignore ? WARNING : ERROR, @@ -702,22 +700,16 @@ Datum varchar_launch(bool can_ignore, VarChar* source, int32 &typmod, bool isExp * 2. With can_ignore == false && SQL_MODE_STRICT() == false, do nothing but break in order to keep functionality integrity of SQL MODE * 3. With can_ignore == false && SQL_MODE_STRICT() == true, we raise ERROR. */ - if (can_ignore) { - ereport(WARNING, - (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), - errmsg("value too long for type character varying(%d)", maxlen))); - break; - } else if (!SQL_MODE_STRICT()) { - break; - } else { - ereport(ERROR, + ereport(can_ignore || !SQL_MODE_STRICT() ? WARNING : ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("value too long for type character varying(%d)", maxlen))); - } + break; } } #endif +#ifndef DOLPHIN } +#endif PG_RETURN_VARCHAR_P((VarChar*)cstring_to_text_with_len(s_data, maxmblen)); } diff --git a/contrib/dolphin/sql/builtin_funcs/convert.sql b/contrib/dolphin/sql/builtin_funcs/convert.sql index 2f3ef1225..516e10cb0 100644 --- a/contrib/dolphin/sql/builtin_funcs/convert.sql +++ b/contrib/dolphin/sql/builtin_funcs/convert.sql @@ -15,5 +15,10 @@ select convert('测试' using gbk); select convert('测试' using latin1); select convert(11.1, decimal(10,3)); select convert(1 using decimal(10,3)); +select pg_typeof(convert('1', char)); +select pg_typeof(convert('1', char(10))); +set dolphin.b_compatibility_mode = on; +select pg_typeof(convert('1', char)); +select pg_typeof(convert('1', char(10))); drop schema db_convert cascade; reset current_schema; -- Gitee From 9adfc572ca0976594f607a05dbd9f8d5bd036cd1 Mon Sep 17 00:00:00 2001 From: li-qinlang Date: Tue, 16 Jan 2024 09:48:38 +0800 Subject: [PATCH 190/434] =?UTF-8?q?=E6=96=B0=E5=A2=9EMySQL=E5=8D=8F?= =?UTF-8?q?=E8=AE=AEhot=5Fstandby=E6=A8=A1=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_protocol/auth.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/contrib/dolphin/plugin_protocol/auth.cpp b/contrib/dolphin/plugin_protocol/auth.cpp index 303035dd5..1d5a92bbf 100644 --- a/contrib/dolphin/plugin_protocol/auth.cpp +++ b/contrib/dolphin/plugin_protocol/auth.cpp @@ -42,6 +42,12 @@ static bool exec_native_password_auth(Port *port); int dophin_conn_handshake(Port* port) { + if (t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && + (!g_instance.attr.attr_network.dolphin_hot_standby || !g_instance.attr.attr_storage.EnableHotStandby)) { + ereport(ERROR, (errcode(ERRCODE_CANNOT_CONNECT_NOW), + errmsg("can not accept connection if dolphin hot standby off"))); + } + StringInfo buf = makeStringInfo(); next_seqid = 0; -- Gitee From 9e5f64345b6e37da02afc6f45ab77ad5be6227e3 Mon Sep 17 00:00:00 2001 From: Julong-Li <584147810@qq.com> Date: Tue, 16 Jan 2024 11:47:31 +0800 Subject: [PATCH 191/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8DB=E5=BA=93char?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E8=A1=A5=E9=9B=B6=E9=80=BB=E8=BE=91=E5=92=8C?= =?UTF-8?q?length=E5=87=BD=E6=95=B0=E9=80=BB=E8=BE=91=E4=B8=8Emysql?= =?UTF-8?q?=E4=B8=80=E8=87=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/db_b_parser2.out | 2 +- .../expected/pad_char_to_full_length.out | 20 ++++++- contrib/dolphin/expected/prefixkey_index.out | 56 +++++++++---------- .../dolphin/expected/test_enum_collation.out | 8 +-- contrib/dolphin/plugin_utils/adt/varchar.cpp | 20 +++++-- .../dolphin/sql/pad_char_to_full_length.sql | 8 +++ 6 files changed, 76 insertions(+), 38 deletions(-) diff --git a/contrib/dolphin/expected/db_b_parser2.out b/contrib/dolphin/expected/db_b_parser2.out index de0c19c29..cf09b17fb 100644 --- a/contrib/dolphin/expected/db_b_parser2.out +++ b/contrib/dolphin/expected/db_b_parser2.out @@ -767,7 +767,7 @@ CONTEXT: referenced column: fchar select fchar,length(fchar) from fchar_test order by 1,2; fchar | length ----------------------+-------- - é›¶ä¸€äºŒä¸‰å››äº”å…­ä¸ƒå…«ä¹ | 10 + é›¶ä¸€äºŒä¸‰å››äº”å…­ä¸ƒå…«ä¹ | 30 (1 row) drop table fchar_test; diff --git a/contrib/dolphin/expected/pad_char_to_full_length.out b/contrib/dolphin/expected/pad_char_to_full_length.out index c37e9a122..b0e7eabf1 100644 --- a/contrib/dolphin/expected/pad_char_to_full_length.out +++ b/contrib/dolphin/expected/pad_char_to_full_length.out @@ -596,6 +596,23 @@ select * from t_col where name not like 'Test %'; test (3 rows) +--To test the length of a multi-byte character string. +set dolphin.sql_mode = 'pad_char_to_full_length'; +create table test_mb_char(a char(10)); +insert into test_mb_char values('谢谢你'); +select a, length(a), char_length(a), length(trim(a)) from test_mb_char; + a | length | char_length | length +---------------+--------+-------------+-------- + 谢谢你 | 16 | 10 | 9 +(1 row) + +set dolphin.sql_mode = ''; +select a, length(a), char_length(a), length(trim(a)) from test_mb_char; + a | length | char_length | length +--------+--------+-------------+-------- + 谢谢你 | 9 | 3 | 9 +(1 row) + set try_vector_engine_strategy='force'; select char_length(name), length(name) from t_col; char_length | length @@ -617,7 +634,8 @@ select char_length(name), length(name) from t_col; (3 rows) drop schema pad_char_to_full_length cascade; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table t drop cascades to table t_col +drop cascades to table test_mb_char reset current_schema; diff --git a/contrib/dolphin/expected/prefixkey_index.out b/contrib/dolphin/expected/prefixkey_index.out index 76c9ec9ed..bc52a9ab2 100644 --- a/contrib/dolphin/expected/prefixkey_index.out +++ b/contrib/dolphin/expected/prefixkey_index.out @@ -400,13 +400,13 @@ insert into test_prefix_key_len select length(fchar),length(ftext),length(fbytea) from test_prefix_key_len; length | length | length --------+--------+-------- - 3000 | 9000 | 9000 + 10096 | 9000 | 9000 (1 row) select lengthb(fchar),lengthb(ftext),octet_length(fbytea) from test_prefix_key_len; lengthb | lengthb | octet_length ---------+---------+-------------- - 9000 | 9000 | 9000 + 10096 | 9000 | 9000 (1 row) CREATE INDEX idx_prefix_len_test_fchar ON test_prefix_key_len (fchar(2677)); @@ -1420,9 +1420,9 @@ SELECT fchar FROM test_prefix_table where fchar ~ '^å¼€' ORDER BY 1; (5 rows) SELECT fchar FROM test_prefix_table where fchar ~ '^å¼€' ORDER BY 1; - fchar -------------------------------------------------------------- - å¼€æºæ•°æ®åº“-210 + fchar +----------------------------------------------------------------------- + å¼€æºæ•°æ®åº“-210 (1 row) EXPLAIN (costs false) @@ -1888,8 +1888,8 @@ SELECT t1.ftext,t2.fchar FROM test_prefix_table t1 join test_prefix_table t2 on (7 rows) SELECT t1.ftext,t2.fchar FROM test_prefix_table t1 join test_prefix_table t2 on t1.ftext = t2.fchar ORDER BY 1,2; - ftext | fchar -----------------+------------------------------------------------------------------ + ftext | fchar +----------------+----------------------------------------------------------------------- AAAAA-000 | AAAAA-000 AAAAA-111 | AAAAA-111 AAAAA-210 | AAAAA-210 @@ -1907,9 +1907,9 @@ SELECT t1.ftext,t2.fchar FROM test_prefix_table t1 join test_prefix_table t2 on ZZZZZ-210 | ZZZZZ-210 ZZZZZ-211 | ZZZZZ-211 ZZZZZ-300 | ZZZZZ-300 - å¼€æºæ•°æ®åº“-210 | å¼€æºæ•°æ®åº“-210 - 高 | 高 - 高斯数æ®åº“-210 | 高斯数æ®åº“-210 + å¼€æºæ•°æ®åº“-210 | å¼€æºæ•°æ®åº“-210 + 高 | 高 + 高斯数æ®åº“-210 | 高斯数æ®åº“-210 (20 rows) EXPLAIN (costs false) @@ -2124,11 +2124,11 @@ SELECT fbpchar FROM test_prefix_tb where fbpchar > 'å¼€æº' ORDER BY 1; (5 rows) SELECT fbpchar FROM test_prefix_tb where fbpchar > 'å¼€æº' ORDER BY 1; - fbpchar ------------------------------------------------------------------ - å¼€æºæ•°æ®åº“-210 - 高 - 高斯数æ®åº“-210 + fbpchar +----------------------------------------------------------------------- + å¼€æºæ•°æ®åº“-210 + 高 + 高斯数æ®åº“-210 (3 rows) ALTER INDEX prefix_index_fchar_fbytea UNUSABLE; @@ -2536,9 +2536,9 @@ SELECT fchar FROM test_prefix_ustore t where fchar ~ '^å¼€' ORDER BY 1; (5 rows) SELECT fchar FROM test_prefix_ustore t where fchar ~ '^å¼€' ORDER BY 1; - fchar -------------------------------------------------------------- - å¼€æºæ•°æ®åº“-210 + fchar +----------------------------------------------------------------------- + å¼€æºæ•°æ®åº“-210 (1 row) EXPLAIN (costs false) @@ -2997,8 +2997,8 @@ SELECT t1.ftext,t2.fchar FROM test_prefix_ustore t1 join test_prefix_ustore t2 o (7 rows) SELECT t1.ftext,t2.fchar FROM test_prefix_ustore t1 join test_prefix_ustore t2 on t1.ftext = t2.fchar ORDER BY 1,2; - ftext | fchar -----------------+------------------------------------------------------------------ + ftext | fchar +----------------+----------------------------------------------------------------------- AAAAA-000 | AAAAA-000 AAAAA-111 | AAAAA-111 AAAAA-210 | AAAAA-210 @@ -3016,9 +3016,9 @@ SELECT t1.ftext,t2.fchar FROM test_prefix_ustore t1 join test_prefix_ustore t2 o ZZZZZ-210 | ZZZZZ-210 ZZZZZ-211 | ZZZZZ-211 ZZZZZ-300 | ZZZZZ-300 - å¼€æºæ•°æ®åº“-210 | å¼€æºæ•°æ®åº“-210 - 高 | 高 - 高斯数æ®åº“-210 | 高斯数æ®åº“-210 + å¼€æºæ•°æ®åº“-210 | å¼€æºæ•°æ®åº“-210 + 高 | 高 + 高斯数æ®åº“-210 | 高斯数æ®åº“-210 (20 rows) EXPLAIN (costs false) @@ -3223,11 +3223,11 @@ SELECT fbpchar FROM test_prefix_utb where fbpchar > 'å¼€æº' ORDER BY 1; (5 rows) SELECT fbpchar FROM test_prefix_utb where fbpchar > 'å¼€æº' ORDER BY 1; - fbpchar ------------------------------------------------------------------ - å¼€æºæ•°æ®åº“-210 - 高 - 高斯数æ®åº“-210 + fbpchar +----------------------------------------------------------------------- + å¼€æºæ•°æ®åº“-210 + 高 + 高斯数æ®åº“-210 (3 rows) ALTER INDEX prefix_index_fchar_fbytea UNUSABLE; diff --git a/contrib/dolphin/expected/test_enum_collation.out b/contrib/dolphin/expected/test_enum_collation.out index e26825ea6..8ece61621 100644 --- a/contrib/dolphin/expected/test_enum_collation.out +++ b/contrib/dolphin/expected/test_enum_collation.out @@ -598,10 +598,10 @@ select a from test_collation2 where a = '高斯ss'; (0 rows) select a from test_collation2 where a = '高斯sS' collate utf8_bin; - a ----------- - 高斯sS - 高斯sS + a +-------------- + 高斯sS + 高斯sS (2 rows) alter table test_collation2 modify a enum('高斯sS', '汉字sS', 'aaa', 'bbb'); diff --git a/contrib/dolphin/plugin_utils/adt/varchar.cpp b/contrib/dolphin/plugin_utils/adt/varchar.cpp index b9d06669a..c31d5c717 100644 --- a/contrib/dolphin/plugin_utils/adt/varchar.cpp +++ b/contrib/dolphin/plugin_utils/adt/varchar.cpp @@ -40,6 +40,9 @@ } \ } while (0) +#define CHAR_LENGTH_FUNC_OID 1372 +#define CHARACTER_LENGTH_FUNC_OID 1367 + int bpcharcase(PG_FUNCTION_ARGS); /* common code for bpchartypmodin and varchartypmodin */ @@ -302,7 +305,9 @@ Datum bpchar_launch(bool can_ignore, BpChar* source, int32 &maxlen, bool isExpli PG_RETURN_BPCHAR_P(source); maxlen -= VARHDRSZ; - +#ifdef DOLPHIN + int maxCharLen = maxlen; +#endif len = VARSIZE_ANY_EXHDR(source); s = VARDATA_ANY(source); @@ -375,7 +380,12 @@ Datum bpchar_launch(bool can_ignore, BpChar* source, int32 &maxlen, bool isExpli } Assert(maxlen >= len); - +#ifdef DOLPHIN + int padLen = maxCharLen - pg_mbstrlen_with_len(VARDATA_ANY(source), len); + if (padLen > 0) { + maxlen = len + padLen; + } +#endif result = (BpChar*)palloc(maxlen + VARHDRSZ); SET_VARSIZE(result, maxlen + VARHDRSZ); r = VARDATA(result); @@ -801,7 +811,7 @@ int bpchartruelen(const char* s, int len) return i + 1; } -// return number of char in a char(n) type string. +// return number of char or byte in a char(n) type string. // when calculating the length,we do not ignoring the trailing // spaces, which is different with pg9.2 Datum bpcharlen(PG_FUNCTION_ARGS) @@ -818,12 +828,14 @@ Datum bpcharlen(PG_FUNCTION_ARGS) len = bcTruelen(arg); /* in multibyte encoding, convert to number of characters */ - if (pg_database_encoding_max_length() != 1) + if (pg_database_encoding_max_length() != 1 && (fcinfo->flinfo->fn_oid == CHAR_LENGTH_FUNC_OID || + fcinfo->flinfo->fn_oid == CHARACTER_LENGTH_FUNC_OID)) len = pg_mbstrlen_with_len(VARDATA_ANY(arg), len); PG_RETURN_INT32(len); } + // return number of byte in a char(n) type string. // when calculating the length,we do not ignoring the trailing spaces. Datum bpcharlenb(PG_FUNCTION_ARGS) diff --git a/contrib/dolphin/sql/pad_char_to_full_length.sql b/contrib/dolphin/sql/pad_char_to_full_length.sql index 269acb0e2..2ad60a4db 100644 --- a/contrib/dolphin/sql/pad_char_to_full_length.sql +++ b/contrib/dolphin/sql/pad_char_to_full_length.sql @@ -134,6 +134,14 @@ select * from t_col where name not like 'test %'; select * from t_col where name like 'Test %'; select * from t_col where name not like 'Test %'; +--To test the length of a multi-byte character string. +set dolphin.sql_mode = 'pad_char_to_full_length'; +create table test_mb_char(a char(10)); +insert into test_mb_char values('谢谢你'); +select a, length(a), char_length(a), length(trim(a)) from test_mb_char; +set dolphin.sql_mode = ''; +select a, length(a), char_length(a), length(trim(a)) from test_mb_char; + set try_vector_engine_strategy='force'; select char_length(name), length(name) from t_col; set try_vector_engine_strategy='off'; -- Gitee From bc0aa3247b6802d139d93756b17acb2ddf8a13a2 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Tue, 16 Jan 2024 19:02:38 +0800 Subject: [PATCH 192/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8V51J=E6=89=80=E7=A4=BA=E7=9A=84str=5Fto=5Fd?= =?UTF-8?q?ate=E5=85=A5=E5=8F=82=E4=B8=BAbool=E7=9A=84=E9=97=AE=E9=A2=98.?= =?UTF-8?q?=20=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8V51J=E6=89=80=E7=A4=BA=E7=9A=84str=5Fto=5Fd?= =?UTF-8?q?ate=E5=85=A5=E5=8F=82=E4=B8=BAbool=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=20st?= =?UTF-8?q?r=5Fto=5Fdate=E5=85=A5=E5=8F=82=E4=B8=BAtext=E5=92=8Ctext,?= =?UTF-8?q?=E6=9C=AA=E6=94=AF=E6=8C=81bool=E7=B1=BB=E5=9E=8B=E5=85=A5?= =?UTF-8?q?=E5=8F=82=EF=BC=8C=E5=9B=A0=E6=AD=A4bool=E5=85=A5=E5=8F=82?= =?UTF-8?q?=E6=97=B6=E5=80=99=E6=8A=9B=E5=BC=82=E5=B8=B8=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=20=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0bool=E7=B1=BB=E5=9E=8B=E7=9A=84str=5Fto=5Fdate?= =?UTF-8?q?=EF=BC=8C=E5=8F=A6=E5=A4=96=E4=B9=9F=E4=BF=AE=E5=A4=8Dstr=5Fto?= =?UTF-8?q?=5Fdate=E7=9A=84=E5=85=B6=E4=BB=96=E7=B1=BB=E5=9E=8B=E9=97=AE?= =?UTF-8?q?=E9=A2=98=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82?= =?UTF-8?q?=E6=88=96issue=E3=80=91:=20=E5=9B=A0=E4=B8=BAbool=20=E8=BD=ACte?= =?UTF-8?q?xt=20=E4=B8=8D=E6=94=AF=E6=8C=81=E5=88=A0=E9=99=A4=EF=BC=8C?= =?UTF-8?q?=E5=9B=A0=E6=AD=A4=E5=8F=AA=E8=83=BD=E6=96=B0=E5=BB=BAbool?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E7=9A=84=E5=85=A5=E5=8F=82=E7=9A=84str=5Fto?= =?UTF-8?q?=5Fdate=E5=87=BD=E6=95=B0=E8=A7=A3=E5=86=B3=20=E3=80=90?= =?UTF-8?q?=E6=B5=8B=E8=AF=95=E7=B1=BB=E5=9E=8B=EF=BC=9ASQL=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=E3=80=91:https://e.gitee.com/opengaussorg/dashboard?= =?UTF-8?q?=3Fissue=3DI8V51J?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../b_compatibility_time_funcs3.out | 283 ++++++++++++++++++ .../dolphin/plugin_utils/adt/timestamp.cpp | 2 +- .../rollback_script/dolphin--3.0--2.0.sql | 27 +- .../b_compatibility_time_funcs3.sql | 74 +++++ .../upgrade_script/dolphin--2.0--3.0.sql | 5 + 5 files changed, 379 insertions(+), 12 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out index eba2785b0..9edfa641d 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs3.out @@ -1484,6 +1484,289 @@ SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); 0000-00-00 (1 row) +select str_to_date(true, '%Y-%m-%d') as result; + result +------------ + 2001-00-00 +(1 row) + +reset dolphin.sql_mode; +select str_to_date(true, '%Y-%m-%d') as result; +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: result + result +-------- + +(1 row) + +select str_to_date('2023-10-01'::tinyblob, '%Y-%m-%d'); + str_to_date +------------- + 2023-10-01 +(1 row) + +select str_to_date('2023-10-01'::blob, '%Y-%m-%d'); + str_to_date +------------- + 2023-10-01 +(1 row) + +select str_to_date('2023-10-01'::mediumblob, '%Y-%m-%d'); + str_to_date +------------- + 2023-10-01 +(1 row) + +select str_to_date('2023-10-01'::longblob, '%Y-%m-%d'); + str_to_date +------------- + 2023-10-01 +(1 row) + +select str_to_date(json_object('2023-10-01', '2023-10-01'), '%Y-%m-%d'); +WARNING: Incorrect date value: '{"2023-10-01": "2023-10-01"}' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date + str_to_date +------------- + +(1 row) + +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('2023-01-01', '2024-01-01', '2025-01-01'), + `set_t` set('2023-01-01', '2024-01-01', '2025-01-01'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, +'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', +'2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', +'2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', +'2023-01-01', '2023-01-01', json_object('a', 1, 'b', 2)); +select str_to_date(`int1`, '%Y-%m-%d'), str_to_date(`uint1`, '%Y-%m-%d'), str_to_date(`int2`, '%Y-%m-%d'), str_to_date(`uint2`, '%Y-%m-%d'), str_to_date(`int4`, '%Y-%m-%d'), +str_to_date(`uint4`, '%Y-%m-%d'), str_to_date(`int8`, '%Y-%m-%d'), str_to_date(`uint8`, '%Y-%m-%d'), str_to_date(`float4`, '%Y-%m-%d'), str_to_date(`float8`, '%Y-%m-%d'), +str_to_date(`numeric`, '%Y-%m-%d'), str_to_date(`bit1`, '%Y-%m-%d'), str_to_date(`bit64`, '%Y-%m-%d'), str_to_date(`boolean`, '%Y-%m-%d'), str_to_date(`date`, '%Y-%m-%d'), +str_to_date(`time`, '%Y-%m-%d'), str_to_date(`time(4)`, '%Y-%m-%d'), str_to_date(`datetime`, '%Y-%m-%d'), str_to_date(`datetime(4)`, '%Y-%m-%d'), str_to_date(`timestamp`, '%Y-%m-%d'), +str_to_date(`timestamp(4)`, '%Y-%m-%d'), str_to_date(`year`, '%Y-%m-%d'), str_to_date(`char`, '%Y-%m-%d'), str_to_date(`varchar`, '%Y-%m-%d'), str_to_date(`binary`, '%Y-%m-%d'), +str_to_date(`varbinary`, '%Y-%m-%d'), str_to_date(`tinyblob`, '%Y-%m-%d'), str_to_date(`blob`, '%Y-%m-%d'), str_to_date(`mediumblob`, '%Y-%m-%d'), str_to_date(`longblob`, '%Y-%m-%d'), +str_to_date(`text`, '%Y-%m-%d'), str_to_date(`enum_t`, '%Y-%m-%d'), str_to_date(`set_t`, '%Y-%m-%d'), str_to_date(`json`, '%Y-%m-%d') from test_type_table; +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1.000000' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '19:10:50' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '19:10:50.3456' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50' +CONTEXT: referenced column: str_to_date +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50.456' +CONTEXT: referenced column: str_to_date +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50-08' +CONTEXT: referenced column: str_to_date +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50.456-08' +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1.23a ' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +WARNING: Incorrect date value: '{"a": 1, "b": 2}' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: str_to_date + str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date | str_to_date +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+------------- + | | | | | | | | | | | | | | 2023-02-05 | | | 2023-02-05 | 2023-02-05 | 2023-02-05 | 2023-02-05 | | | | | | | | | | | 2023-01-01 | 2023-01-01 | +(1 row) + +create table test_str_to_date(col1 text, col2 text, col3 text, col4 text, col5 text, col6 text, col7 text, col8 text, col9 text, col10 text, col11 text, col12 text, col13 text, col14 text, col15 text, col16 text, col17 text, col18 text, col19 text, col20 text, col21 text, col22 text, col23 text, col24 text, col25 text, col26 text, col27 text, col28 text, col29 text, col30 text, col31 text, col32 text, +col33 text, col34 text); +insert ignore into test_str_to_date select str_to_date(`int1`, '%Y-%m-%d'), str_to_date(`uint1`, '%Y-%m-%d'), str_to_date(`int2`, '%Y-%m-%d'), str_to_date(`uint2`, '%Y-%m-%d'), str_to_date(`int4`, '%Y-%m-%d'), +str_to_date(`uint4`, '%Y-%m-%d'), str_to_date(`int8`, '%Y-%m-%d'), str_to_date(`uint8`, '%Y-%m-%d'), str_to_date(`float4`, '%Y-%m-%d'), str_to_date(`float8`, '%Y-%m-%d'), +str_to_date(`numeric`, '%Y-%m-%d'), str_to_date(`bit1`, '%Y-%m-%d'), str_to_date(`bit64`, '%Y-%m-%d'), str_to_date(`boolean`, '%Y-%m-%d'), str_to_date(`date`, '%Y-%m-%d'), +str_to_date(`time`, '%Y-%m-%d'), str_to_date(`time(4)`, '%Y-%m-%d'), str_to_date(`datetime`, '%Y-%m-%d'), str_to_date(`datetime(4)`, '%Y-%m-%d'), str_to_date(`timestamp`, '%Y-%m-%d'), +str_to_date(`timestamp(4)`, '%Y-%m-%d'), str_to_date(`year`, '%Y-%m-%d'), str_to_date(`char`, '%Y-%m-%d'), str_to_date(`varchar`, '%Y-%m-%d'), str_to_date(`binary`, '%Y-%m-%d'), +str_to_date(`varbinary`, '%Y-%m-%d'), str_to_date(`tinyblob`, '%Y-%m-%d'), str_to_date(`blob`, '%Y-%m-%d'), str_to_date(`mediumblob`, '%Y-%m-%d'), str_to_date(`longblob`, '%Y-%m-%d'), +str_to_date(`text`, '%Y-%m-%d'), str_to_date(`enum_t`, '%Y-%m-%d'), str_to_date(`set_t`, '%Y-%m-%d'), str_to_date(`json`, '%Y-%m-%d') from test_type_table; +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col1 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col2 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col3 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col4 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col5 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col6 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col7 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col8 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col9 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: col10 +WARNING: Incorrect date value: '1.000000' for function str_to_date +CONTEXT: referenced column: col11 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col12 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col13 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col14 +WARNING: Incorrect date value: '19:10:50' for function str_to_date +CONTEXT: referenced column: col16 +WARNING: Incorrect date value: '19:10:50.3456' for function str_to_date +CONTEXT: referenced column: col17 +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50' +CONTEXT: referenced column: col18 +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50.456' +CONTEXT: referenced column: col19 +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50-08' +CONTEXT: referenced column: col20 +WARNING: Truncated incorrect date value: '2023-02-05 19:10:50.456-08' +CONTEXT: referenced column: col21 +WARNING: Incorrect date value: '1' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col22 +WARNING: Incorrect date value: '1.23a ' for function str_to_date +CONTEXT: referenced column: col23 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: col24 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: col25 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: col26 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col27 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col28 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col29 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col30 +WARNING: Incorrect date value: '1.23a' for function str_to_date +CONTEXT: referenced column: col31 +WARNING: Incorrect date value: '{"a": 1, "b": 2}' for function str_to_date +CONTEXT: referenced column: str_to_date +SQL function "str_to_date" statement 1 +referenced column: col34 +select * from test_str_to_date; + col1 | col2 | col3 | col4 | col5 | col6 | col7 | col8 | col9 | col10 | col11 | col12 | col13 | col14 | col15 | col16 | col17 | col18 | col19 | col20 | col21 | col22 | col23 | col24 | col25 | col26 | col27 | col28 | col29 | col30 | col31 | col32 | col33 | col34 +------+------+------+------+------+------+------+------+------+-------+-------+-------+-------+-------+------------+-------+-------+------------+------------+------------+------------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+------------+------------+------- + | | | | | | | | | | | | | | 2023-02-05 | | | 2023-02-05 | 2023-02-05 | 2023-02-05 | 2023-02-05 | | | | | | | | | | | 2023-01-01 | 2023-01-01 | +(1 row) + +drop table if exists test_str_to_date; +drop table if exists test_type_table; set dolphin.b_compatibility_mode = false; select time('23:65:66'); WARNING: date/time field value out of range: "23:65:66" diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index 689a3341e..c550a60ad 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -10323,7 +10323,7 @@ static bool date_should_be_null(int target_type, const pg_tm* time, time_flags f */ Datum str_to_date(PG_FUNCTION_ARGS) { - int errlevel = (SQL_MODE_STRICT() ? ERROR : WARNING); + int errlevel = !fcinfo->can_ignore && SQL_MODE_STRICT() ? ERROR : WARNING; if (PG_ARGISNULL(0)) { PG_RETURN_NULL(); } diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 68a7efd5b..a7013e9a2 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -379,17 +379,22 @@ DROP FUNCTION IF EXISTS pg_catalog.exp(json); DROP CAST IF EXISTS (boolean AS date); DROP FUNCTION IF EXISTS pg_catalog.bool_date(boolean); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int8_cast_date(int1); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int16_cast_date(int2); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(int4); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(int8); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint1); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint2); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint4); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(uint8); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(float4); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(float8); -DROP FUNCTION IF EXISTS FUNCTION pg_catalog.int32_cast_date(numeric); +DROP FUNCTION IF EXISTS pg_catalog.int8_cast_date(int1); +DROP FUNCTION IF EXISTS pg_catalog.int16_cast_date(int2); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(int4); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(int8); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint1); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint2); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint4); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint8); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(float4); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(float8); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(numeric); DROP FUNCTION IF EXISTS pg_catalog.ln(year); DROP FUNCTION IF EXISTS pg_catalog.ln(json); + +DROP FUNCTION IF EXISTS pg_catalog.str_to_date(boolean, TEXT); +DROP FUNCTION IF EXISTS pg_catalog.str_to_date(longblob, TEXT); +DROP FUNCTION IF EXISTS pg_catalog.str_to_date(anyenum, TEXT); +DROP FUNCTION IF EXISTS pg_catalog.str_to_date(json, TEXT); diff --git a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql index f24b8649c..3a5c94781 100644 --- a/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql +++ b/contrib/dolphin/sql/builtin_funcs/b_compatibility_time_funcs3.sql @@ -391,7 +391,81 @@ SELECT STR_TO_DATE('31/11/22', '%d/%m/%Y'); SELECT STR_TO_DATE('2023-02-31','%Y-%m-%d'); SELECT STR_TO_DATE('2004.09.12 10:61:59','%Y.%m.%d %T'); SELECT STR_TO_DATE('0000-00-00','%Y-%m-%d'); +select str_to_date(true, '%Y-%m-%d') as result; +reset dolphin.sql_mode; +select str_to_date(true, '%Y-%m-%d') as result; +select str_to_date('2023-10-01'::tinyblob, '%Y-%m-%d'); +select str_to_date('2023-10-01'::blob, '%Y-%m-%d'); +select str_to_date('2023-10-01'::mediumblob, '%Y-%m-%d'); +select str_to_date('2023-10-01'::longblob, '%Y-%m-%d'); +select str_to_date(json_object('2023-10-01', '2023-10-01'), '%Y-%m-%d'); + +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('2023-01-01', '2024-01-01', '2025-01-01'), + `set_t` set('2023-01-01', '2024-01-01', '2025-01-01'), + `json` json +); + +insert into test_type_table values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, +'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', +'2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', +'2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', +'2023-01-01', '2023-01-01', json_object('a', 1, 'b', 2)); + +select str_to_date(`int1`, '%Y-%m-%d'), str_to_date(`uint1`, '%Y-%m-%d'), str_to_date(`int2`, '%Y-%m-%d'), str_to_date(`uint2`, '%Y-%m-%d'), str_to_date(`int4`, '%Y-%m-%d'), +str_to_date(`uint4`, '%Y-%m-%d'), str_to_date(`int8`, '%Y-%m-%d'), str_to_date(`uint8`, '%Y-%m-%d'), str_to_date(`float4`, '%Y-%m-%d'), str_to_date(`float8`, '%Y-%m-%d'), +str_to_date(`numeric`, '%Y-%m-%d'), str_to_date(`bit1`, '%Y-%m-%d'), str_to_date(`bit64`, '%Y-%m-%d'), str_to_date(`boolean`, '%Y-%m-%d'), str_to_date(`date`, '%Y-%m-%d'), +str_to_date(`time`, '%Y-%m-%d'), str_to_date(`time(4)`, '%Y-%m-%d'), str_to_date(`datetime`, '%Y-%m-%d'), str_to_date(`datetime(4)`, '%Y-%m-%d'), str_to_date(`timestamp`, '%Y-%m-%d'), +str_to_date(`timestamp(4)`, '%Y-%m-%d'), str_to_date(`year`, '%Y-%m-%d'), str_to_date(`char`, '%Y-%m-%d'), str_to_date(`varchar`, '%Y-%m-%d'), str_to_date(`binary`, '%Y-%m-%d'), +str_to_date(`varbinary`, '%Y-%m-%d'), str_to_date(`tinyblob`, '%Y-%m-%d'), str_to_date(`blob`, '%Y-%m-%d'), str_to_date(`mediumblob`, '%Y-%m-%d'), str_to_date(`longblob`, '%Y-%m-%d'), +str_to_date(`text`, '%Y-%m-%d'), str_to_date(`enum_t`, '%Y-%m-%d'), str_to_date(`set_t`, '%Y-%m-%d'), str_to_date(`json`, '%Y-%m-%d') from test_type_table; + +create table test_str_to_date(col1 text, col2 text, col3 text, col4 text, col5 text, col6 text, col7 text, col8 text, col9 text, col10 text, col11 text, col12 text, col13 text, col14 text, col15 text, col16 text, col17 text, col18 text, col19 text, col20 text, col21 text, col22 text, col23 text, col24 text, col25 text, col26 text, col27 text, col28 text, col29 text, col30 text, col31 text, col32 text, +col33 text, col34 text); + +insert ignore into test_str_to_date select str_to_date(`int1`, '%Y-%m-%d'), str_to_date(`uint1`, '%Y-%m-%d'), str_to_date(`int2`, '%Y-%m-%d'), str_to_date(`uint2`, '%Y-%m-%d'), str_to_date(`int4`, '%Y-%m-%d'), +str_to_date(`uint4`, '%Y-%m-%d'), str_to_date(`int8`, '%Y-%m-%d'), str_to_date(`uint8`, '%Y-%m-%d'), str_to_date(`float4`, '%Y-%m-%d'), str_to_date(`float8`, '%Y-%m-%d'), +str_to_date(`numeric`, '%Y-%m-%d'), str_to_date(`bit1`, '%Y-%m-%d'), str_to_date(`bit64`, '%Y-%m-%d'), str_to_date(`boolean`, '%Y-%m-%d'), str_to_date(`date`, '%Y-%m-%d'), +str_to_date(`time`, '%Y-%m-%d'), str_to_date(`time(4)`, '%Y-%m-%d'), str_to_date(`datetime`, '%Y-%m-%d'), str_to_date(`datetime(4)`, '%Y-%m-%d'), str_to_date(`timestamp`, '%Y-%m-%d'), +str_to_date(`timestamp(4)`, '%Y-%m-%d'), str_to_date(`year`, '%Y-%m-%d'), str_to_date(`char`, '%Y-%m-%d'), str_to_date(`varchar`, '%Y-%m-%d'), str_to_date(`binary`, '%Y-%m-%d'), +str_to_date(`varbinary`, '%Y-%m-%d'), str_to_date(`tinyblob`, '%Y-%m-%d'), str_to_date(`blob`, '%Y-%m-%d'), str_to_date(`mediumblob`, '%Y-%m-%d'), str_to_date(`longblob`, '%Y-%m-%d'), +str_to_date(`text`, '%Y-%m-%d'), str_to_date(`enum_t`, '%Y-%m-%d'), str_to_date(`set_t`, '%Y-%m-%d'), str_to_date(`json`, '%Y-%m-%d') from test_type_table; +select * from test_str_to_date; +drop table if exists test_str_to_date; +drop table if exists test_type_table; set dolphin.b_compatibility_mode = false; select time('23:65:66'); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index ecb4efe80..4e398c63b 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -688,3 +688,8 @@ DROP FUNCTION IF EXISTS pg_catalog.ln(json); CREATE OR REPLACE FUNCTION pg_catalog.ln(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.ln(cast($1 as double precision))'; + +CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(boolean, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(longblob, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(anyenum, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(json, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; -- Gitee From f47a655a05ccfeaf6415304781836670c0fff933 Mon Sep 17 00:00:00 2001 From: Mijamind Date: Tue, 16 Jan 2024 19:37:09 +0800 Subject: [PATCH 193/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E4=BF=AE=E5=A4=8Dnodename=E9=85=8D?= =?UTF-8?q?=E7=BD=AE=E9=94=99=E8=AF=AF=E6=95=B0=E6=8D=AE=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/src/guc_spq.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/contrib/spq_plugin/src/guc_spq.cpp b/contrib/spq_plugin/src/guc_spq.cpp index 31c19bcb4..14b871a31 100644 --- a/contrib/spq_plugin/src/guc_spq.cpp +++ b/contrib/spq_plugin/src/guc_spq.cpp @@ -1730,6 +1730,16 @@ static bool spq_verify_gauss_cluster_map_syntax(const char *liststring) if (t_thrd.spq_ctx.nodesDefinition != nullptr) { pfree(t_thrd.spq_ctx.nodesDefinition); } + for (int i = 0; i < array_size; ++i) { + for (int j = i + 1; j < array_size; ++j) { + if (strcmp(nodesDefinition[i].nodename.data, nodesDefinition[j].nodename.data) == 0) { + GUC_check_errdetail("spq cluster map is invalid, have duplicate node name."); + pfree(nodesDefinition); + MemoryContextSwitchTo(oldContext); + return false; + } + } + } t_thrd.spq_ctx.num_nodes = array_size; t_thrd.spq_ctx.nodesDefinition = nodesDefinition; MemoryContextSwitchTo(oldContext); -- Gitee From 0fc0145291f3d525d71ada6a24c35950805eeb1a Mon Sep 17 00:00:00 2001 From: luozihao <1165977584@qq.com> Date: Tue, 16 Jan 2024 20:59:12 +0800 Subject: [PATCH 194/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dblob=E7=AD=89?= =?UTF-8?q?=E7=B1=BB=E5=9E=8B=E4=B8=8D=E8=B5=B0=E7=B4=A2=E5=BC=95=E6=89=AB?= =?UTF-8?q?=E6=8F=8F=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../json_cmp_operator_test.out | 6 +- contrib/dolphin/expected/test_blob.out | 207 ++++++++++ contrib/dolphin/plugin_parser/parse_oper.cpp | 7 - .../rollback_script/dolphin--3.0--2.0.sql | 332 ++++++++++++++++ contrib/dolphin/sql/test_blob.sql | 48 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 370 ++++++++++++++++++ 6 files changed, 960 insertions(+), 10 deletions(-) diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out index e3289233e..208084ed8 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_cmp_operator_test.out @@ -579,7 +579,7 @@ select `tinyblob`, `json`, `tinyblob` < `json` as `tinyblob `json` as `tinyblob<>json`, `tinyblob` = `json` as `tinyblob=json`, `tinyblob` <=> `json` as `tinyblob<=>json` from test_json_table; -ERROR: operator does not exist: blob > json +ERROR: operator does not exist: tinyblob > json LINE 2: `tinyblob` > `json` as `tinyblob>json`, `tinyblob` >=... ^ HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. @@ -619,7 +619,7 @@ select `mediumblob`, `json`, `mediumblob` < `json` as `mediumblob `json` as `mediumblob<>json`, `mediumblob` = `json` as `mediumblob=json`, `mediumblob` <=> `json` as `mediumblob<=>json` from test_json_table; -ERROR: operator does not exist: blob > json +ERROR: operator does not exist: mediumblob > json LINE 2: `mediumblob` > `json` as `mediumblob>json`, `mediumbl... ^ HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. @@ -639,7 +639,7 @@ select `longblob`, `json`, `longblob` < `json` as `longblob `json` as `longblob<>json`, `longblob` = `json` as `longblob=json`, `longblob` <=> `json` as `longblob<=>json` from test_json_table; -ERROR: operator does not exist: blob > json +ERROR: operator does not exist: longblob > json LINE 2: `longblob` > `json` as `longblob>json`, `longblob` >=... ^ HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. diff --git a/contrib/dolphin/expected/test_blob.out b/contrib/dolphin/expected/test_blob.out index 0344c0b04..bb3da408b 100644 --- a/contrib/dolphin/expected/test_blob.out +++ b/contrib/dolphin/expected/test_blob.out @@ -1252,6 +1252,213 @@ select * from t_dezebium_0007_02; update t_dezebium_0007_02 set c2='101',c5='FG'; delete from t_dezebium_0007_02 where hex(c23)=6162; +DROP TABLE IF EXISTS t_index_test; +NOTICE: table "t_index_test" does not exist, skipping +create table t_index_test(a blob, b tinyblob, c mediumblob, d longblob); +insert into t_index_test select i,i,i,i from generate_series(1,10000) i; +create index i_a on t_index_test(a); +create index i_b on t_index_test(b); +create index i_c on t_index_test(c); +create index i_d on t_index_test(d); +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_a on t_index_test + Index Cond: (a = '1'::blob) +(3 rows) + +explain (costs off) select * from t_index_test where a>='a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_a on t_index_test + Index Cond: (a >= 'a1'::blob) +(3 rows) + +explain (costs off) select * from t_index_test where a>'a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_a on t_index_test + Index Cond: (a > 'a1'::blob) +(3 rows) + +explain (costs off) select * from t_index_test where a<='0'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_a on t_index_test + Index Cond: (a <= '0'::blob) +(3 rows) + +explain (costs off) select * from t_index_test where a<'1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_a on t_index_test + Index Cond: (a < '1'::blob) +(3 rows) + +explain (costs off) select * from t_index_test where b='1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (b = '1'::tinyblob) +(3 rows) + +explain (costs off) select * from t_index_test where b>='a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (b >= 'a1'::tinyblob) +(3 rows) + +explain (costs off) select * from t_index_test where b>'a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (b > 'a1'::tinyblob) +(3 rows) + +explain (costs off) select * from t_index_test where b<='0'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (b <= '0'::tinyblob) +(3 rows) + +explain (costs off) select * from t_index_test where b<'1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_b on t_index_test + Index Cond: (b < '1'::tinyblob) +(3 rows) + +explain (costs off) select * from t_index_test where c='1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_c on t_index_test + Index Cond: (c = '1'::mediumblob) +(3 rows) + +explain (costs off) select * from t_index_test where c>='a1'; + QUERY PLAN +--------------------------------------- + [Bypass] + Index Scan using i_c on t_index_test + Index Cond: (c >= 'a1'::mediumblob) +(3 rows) + +explain (costs off) select * from t_index_test where c>'a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_c on t_index_test + Index Cond: (c > 'a1'::mediumblob) +(3 rows) + +explain (costs off) select * from t_index_test where c<='0'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_c on t_index_test + Index Cond: (c <= '0'::mediumblob) +(3 rows) + +explain (costs off) select * from t_index_test where c<'1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_c on t_index_test + Index Cond: (c < '1'::mediumblob) +(3 rows) + +explain (costs off) select * from t_index_test where d='1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_d on t_index_test + Index Cond: (d = '1'::longblob) +(3 rows) + +explain (costs off) select * from t_index_test where d>='a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_d on t_index_test + Index Cond: (d >= 'a1'::longblob) +(3 rows) + +explain (costs off) select * from t_index_test where d>'a1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_d on t_index_test + Index Cond: (d > 'a1'::longblob) +(3 rows) + +explain (costs off) select * from t_index_test where d<='0'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_d on t_index_test + Index Cond: (d <= '0'::longblob) +(3 rows) + +explain (costs off) select * from t_index_test where d<'1'; + QUERY PLAN +-------------------------------------- + [Bypass] + Index Scan using i_d on t_index_test + Index Cond: (d < '1'::longblob) +(3 rows) + +drop index i_a; +drop index i_b; +drop index i_c; +drop index i_d; +create index i_a on t_index_test(a) using hash; +create index i_b on t_index_test(b) using hash; +create index i_c on t_index_test(c) using hash; +create index i_d on t_index_test(d) using hash; +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; + QUERY PLAN +-------------------------------------- + Index Scan using i_a on t_index_test + Index Cond: (a = '1'::blob) +(2 rows) + +explain (costs off) select * from t_index_test where b='1'; + QUERY PLAN +-------------------------------------- + Index Scan using i_b on t_index_test + Index Cond: (b = '1'::tinyblob) +(2 rows) + +explain (costs off) select * from t_index_test where c='1'; + QUERY PLAN +-------------------------------------- + Index Scan using i_c on t_index_test + Index Cond: (c = '1'::mediumblob) +(2 rows) + +explain (costs off) select * from t_index_test where d='1'; + QUERY PLAN +-------------------------------------- + Index Scan using i_d on t_index_test + Index Cond: (d = '1'::longblob) +(2 rows) + +DROP TABLE t_index_test; drop schema test_blob cascade; NOTICE: drop cascades to table t_dezebium_0007_02 reset current_schema; diff --git a/contrib/dolphin/plugin_parser/parse_oper.cpp b/contrib/dolphin/plugin_parser/parse_oper.cpp index 4ca9b5c9e..5978bce82 100644 --- a/contrib/dolphin/plugin_parser/parse_oper.cpp +++ b/contrib/dolphin/plugin_parser/parse_oper.cpp @@ -546,13 +546,6 @@ Operator oper(ParseState* pstate, List* opname, Oid ltypeId, Oid rtypeId, bool n } #ifdef DOLPHIN - /* Use BLOB like a pseudo types */ - if (IsBlobClassType(ltypeId)) { - ltypeId = BLOBOID; - } - if (IsBlobClassType(rtypeId)) { - rtypeId = BLOBOID; - } /** * If GUC parameter b_compatibility_mode is true, * and the expression is adding a string constant and an interval, diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index a7013e9a2..753417018 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -398,3 +398,335 @@ DROP FUNCTION IF EXISTS pg_catalog.str_to_date(boolean, TEXT); DROP FUNCTION IF EXISTS pg_catalog.str_to_date(longblob, TEXT); DROP FUNCTION IF EXISTS pg_catalog.str_to_date(anyenum, TEXT); DROP FUNCTION IF EXISTS pg_catalog.str_to_date(json, TEXT); +DROP OPERATOR CLASS IF EXISTS pg_catalog.tinyblob_ops USING BTREE; +DROP OPERATOR CLASS IF EXISTS pg_catalog.tinyblob_ops USING HASH; +DROP OPERATOR CLASS IF EXISTS pg_catalog.blob_ops USING BTREE; +DROP OPERATOR CLASS IF EXISTS pg_catalog.blob_ops USING HASH; +DROP OPERATOR CLASS IF EXISTS pg_catalog.mediumblob_ops USING BTREE; +DROP OPERATOR CLASS IF EXISTS pg_catalog.mediumblob_ops USING HASH; +DROP OPERATOR CLASS IF EXISTS pg_catalog.longblob_ops USING BTREE; +DROP OPERATOR CLASS IF EXISTS pg_catalog.longblob_ops USING HASH; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.tinyblob_ops USING BTREE; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.tinyblob_ops USING HASH; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.blob_ops USING BTREE; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.blob_ops USING HASH; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.mediumblob_ops USING BTREE; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.mediumblob_ops USING HASH; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.longblob_ops USING BTREE; +DROP OPERATOR FAMILY IF EXISTS pg_catalog.longblob_ops USING HASH; + +DROP OPERATOR IF EXISTS pgcatalog.=(tinyblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(tinyblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>(tinyblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(tinyblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<(tinyblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(tinyblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.=(tinyblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<>(tinyblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.>(tinyblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.>=(tinyblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<(tinyblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<=(tinyblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.=(tinyblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(tinyblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>(tinyblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(tinyblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<(tinyblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(tinyblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.=(tinyblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(tinyblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>(tinyblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(tinyblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<(tinyblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(tinyblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.=(tinyblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<>(tinyblob, text); +DROP OPERATOR IF EXISTS pgcatalog.>(tinyblob, text); +DROP OPERATOR IF EXISTS pgcatalog.>=(tinyblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<(tinyblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<=(tinyblob, text); +DROP OPERATOR IF EXISTS pgcatalog.=(text, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(text, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>(text, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(text, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<(text, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(text, tinyblob); + +DROP OPERATOR IF EXISTS pgcatalog.=(blob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(blob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>(blob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(blob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<(blob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(blob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.=(blob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(blob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>(blob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(blob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<(blob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(blob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.=(blob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(blob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>(blob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(blob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<(blob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(blob, longblob); + +DROP OPERATOR IF EXISTS pgcatalog.=(mediumblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(mediumblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>(mediumblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(mediumblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<(mediumblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(mediumblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.=(mediumblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<>(mediumblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.>(mediumblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.>=(mediumblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<(mediumblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<=(mediumblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.=(mediumblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(mediumblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>(mediumblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(mediumblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<(mediumblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(mediumblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.=(mediumblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(mediumblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>(mediumblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(mediumblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<(mediumblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(mediumblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.=(mediumblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<>(mediumblob, text); +DROP OPERATOR IF EXISTS pgcatalog.>(mediumblob, text); +DROP OPERATOR IF EXISTS pgcatalog.>=(mediumblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<(mediumblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<=(mediumblob, text); +DROP OPERATOR IF EXISTS pgcatalog.=(text, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(text, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>(text, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(text, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<(text, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(text, mediumblob); + +DROP OPERATOR IF EXISTS pgcatalog.=(longblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(longblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>(longblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(longblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<(longblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(longblob, tinyblob); +DROP OPERATOR IF EXISTS pgcatalog.=(longblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<>(longblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.>(longblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.>=(longblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<(longblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.<=(longblob, blob); +DROP OPERATOR IF EXISTS pgcatalog.=(longblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(longblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>(longblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(longblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<(longblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(longblob, mediumblob); +DROP OPERATOR IF EXISTS pgcatalog.=(longblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(longblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>(longblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(longblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<(longblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(longblob, longblob); +DROP OPERATOR IF EXISTS pgcatalog.=(longblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<>(longblob, text); +DROP OPERATOR IF EXISTS pgcatalog.>(longblob, text); +DROP OPERATOR IF EXISTS pgcatalog.>=(longblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<(longblob, text); +DROP OPERATOR IF EXISTS pgcatalog.<=(longblob, text); +DROP OPERATOR IF EXISTS pgcatalog.=(text, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<>(text, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>(text, longblob); +DROP OPERATOR IF EXISTS pgcatalog.>=(text, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<(text, longblob); +DROP OPERATOR IF EXISTS pgcatalog.<=(text, longblob); + +DROP FUNCTION IF EXISTS pg_catalog.blob_cmp(blob, blob); + +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_eq(arg1 tinyblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_ne(arg1 tinyblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_lt(arg1 tinyblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_le(arg1 tinyblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_gt(arg1 tinyblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_ge(arg1 tinyblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_cmp(tinyblob, tinyblob); + +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_eq_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_ne_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_lt_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_le_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_gt_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_ge_text(arg1 tinyblob, arg2 text); + +DROP FUNCTION IF EXISTS pg_catalog.text_eq_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.text_ne_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.text_lt_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.text_le_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.test_gt_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.test_ge_tinyblob(arg1 text, arg2 tinyblob); + +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_eq(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_ne(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_lt(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_le(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_gt(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_ge(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_cmp(mediumblob, mediumblob); + +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_eq_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_ne_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_lt_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_le_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_gt_text(arg1 tinyblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_ge_text(arg1 tinyblob, arg2 text); + +DROP FUNCTION IF EXISTS pg_catalog.text_eq_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.text_ne_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.text_lt_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.text_le_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.test_gt_tinyblob(arg1 text, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.test_ge_tinyblob(arg1 text, arg2 tinyblob); + +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_eq(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_ne(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_lt(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_le(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_gt(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_ge(arg1 mediumblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_cmp(mediumblob, mediumblob); + +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_eq_text(arg1 mediumblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_ne_text(arg1 mediumblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_lt_text(arg1 mediumblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_le_text(arg1 mediumblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_gt_text(arg1 mediumblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_ge_text(arg1 mediumblob, arg2 text); + +DROP FUNCTION IF EXISTS pg_catalog.text_eq_mediumblob(arg1 text, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.text_ne_mediumblob(arg1 text, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.text_lt_mediumblob(arg1 text, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.text_le_mediumblob(arg1 text, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.test_gt_mediumblob(arg1 text, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.test_ge_mediumblob(arg1 text, arg2 mediumblob); + +DROP FUNCTION IF EXISTS pg_catalog.longblob_eq(arg1 longblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_ne(arg1 longblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_lt(arg1 longblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_le(arg1 longblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_gt(arg1 longblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_ge(arg1 longblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_cmp(longblob, longblob); + +DROP FUNCTION IF EXISTS pg_catalog.longblob_eq_text(arg1 longblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.longblob_ne_text(arg1 longblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.longblob_lt_text(arg1 longblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.longblob_le_text(arg1 longblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.longblob_gt_text(arg1 longblob, arg2 text); +DROP FUNCTION IF EXISTS pg_catalog.longblob_ge_text(arg1 longblob, arg2 text); + +DROP FUNCTION IF EXISTS pg_catalog.text_eq_longblob(arg1 text, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.text_ne_longblob(arg1 text, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.text_lt_longblob(arg1 text, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.text_le_longblob(arg1 text, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.test_gt_longblob(arg1 text, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.test_ge_longblob(arg1 text, arg2 longblob); + +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_eq(arg1 tinyblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_ne(arg1 tinyblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_lt(arg1 tinyblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_le(arg1 tinyblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_gt(arg1 tinyblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_ge(arg1 tinyblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_blob_cmp(tinyblob, blob); + +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_eq(arg1 tinyblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_ne(arg1 tinyblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_lt(arg1 tinyblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_le(arg1 tinyblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_gt(arg1 tinyblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_ge(arg1 tinyblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_mediumblob_cmp(tinyblob, mediumblob); + +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_eq(arg1 tinyblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_ne(arg1 tinyblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_lt(arg1 tinyblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_le(arg1 tinyblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_gt(arg1 tinyblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_ge(arg1 tinyblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.tinyblob_longblob_cmp(tinyblob, longblob); + +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_eq(arg1 blob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_ne(arg1 blob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_lt(arg1 blob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_le(arg1 blob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_gt(arg1 blob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_ge(arg1 blob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_tinyblob_cmp(blob, tinyblob); + +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_eq(arg1 blob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_ne(arg1 blob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_lt(arg1 blob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_le(arg1 blob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_gt(arg1 blob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_ge(arg1 blob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_mediumblob_cmp(blob, mediumblob); + +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_eq(arg1 blob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_ne(arg1 blob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_lt(arg1 blob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_le(arg1 blob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_gt(arg1 blob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_ge(arg1 blob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.blob_longblob_cmp(longblob, longblob); + +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_eq(arg1 mediumblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_ne(arg1 mediumblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_lt(arg1 mediumblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_le(arg1 mediumblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_gt(arg1 mediumblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_ge(arg1 mediumblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_tinyblob_cmp(mediumblob, tinyblob); + +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_eq(arg1 mediumblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_ne(arg1 mediumblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_lt(arg1 mediumblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_le(arg1 mediumblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_gt(arg1 mediumblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_ge(arg1 mediumblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_blob_cmp(mediumblob, blob); + +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_eq(arg1 mediumblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_ne(arg1 mediumblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_lt(arg1 mediumblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_le(arg1 mediumblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_gt(arg1 mediumblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_ge(arg1 mediumblob, arg2 longblob); +DROP FUNCTION IF EXISTS pg_catalog.mediumblob_longblob_cmp(longblob, longblob); + +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_eq(arg1 longblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_ne(arg1 longblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_lt(arg1 longblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_le(arg1 longblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_gt(arg1 longblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_ge(arg1 longblob, arg2 tinyblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_tinyblob_cmp(longblob, tinyblob); + +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_eq(arg1 longblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_ne(arg1 longblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_lt(arg1 longblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_le(arg1 longblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_gt(arg1 longblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_ge(arg1 longblob, arg2 blob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_blob_cmp(longblob, blob); + +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_eq(arg1 longblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_ne(arg1 longblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_lt(arg1 longblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_le(arg1 longblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_gt(arg1 longblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_ge(arg1 longblob, arg2 mediumblob); +DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_cmp(longblob, longblob); diff --git a/contrib/dolphin/sql/test_blob.sql b/contrib/dolphin/sql/test_blob.sql index 493d99cb4..55211f174 100644 --- a/contrib/dolphin/sql/test_blob.sql +++ b/contrib/dolphin/sql/test_blob.sql @@ -317,5 +317,53 @@ select * from t_dezebium_0007_02; update t_dezebium_0007_02 set c2='101',c5='FG'; delete from t_dezebium_0007_02 where hex(c23)=6162; +DROP TABLE IF EXISTS t_index_test; +create table t_index_test(a blob, b tinyblob, c mediumblob, d longblob); +insert into t_index_test select i,i,i,i from generate_series(1,10000) i; +create index i_a on t_index_test(a); +create index i_b on t_index_test(b); +create index i_c on t_index_test(c); +create index i_d on t_index_test(d); +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; +explain (costs off) select * from t_index_test where a>='a1'; +explain (costs off) select * from t_index_test where a>'a1'; +explain (costs off) select * from t_index_test where a<='0'; +explain (costs off) select * from t_index_test where a<'1'; + +explain (costs off) select * from t_index_test where b='1'; +explain (costs off) select * from t_index_test where b>='a1'; +explain (costs off) select * from t_index_test where b>'a1'; +explain (costs off) select * from t_index_test where b<='0'; +explain (costs off) select * from t_index_test where b<'1'; + +explain (costs off) select * from t_index_test where c='1'; +explain (costs off) select * from t_index_test where c>='a1'; +explain (costs off) select * from t_index_test where c>'a1'; +explain (costs off) select * from t_index_test where c<='0'; +explain (costs off) select * from t_index_test where c<'1'; + +explain (costs off) select * from t_index_test where d='1'; +explain (costs off) select * from t_index_test where d>='a1'; +explain (costs off) select * from t_index_test where d>'a1'; +explain (costs off) select * from t_index_test where d<='0'; +explain (costs off) select * from t_index_test where d<'1'; + +drop index i_a; +drop index i_b; +drop index i_c; +drop index i_d; +create index i_a on t_index_test(a) using hash; +create index i_b on t_index_test(b) using hash; +create index i_c on t_index_test(c) using hash; +create index i_d on t_index_test(d) using hash; +analyze t_index_test; +explain (costs off) select * from t_index_test where a='1'; +explain (costs off) select * from t_index_test where b='1'; +explain (costs off) select * from t_index_test where c='1'; +explain (costs off) select * from t_index_test where d='1'; + +DROP TABLE t_index_test; + drop schema test_blob cascade; reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 4e398c63b..04f5f8104 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -630,6 +630,376 @@ CREATE OPERATOR CLASS pg_catalog.varbinary_ops DEFAULT OPERATOR 1 pg_catalog.=(varbinary, varbinary), FUNCTION 1 (varbinary, varbinary) pg_catalog.hashvarlena(internal); +CREATE OR REPLACE FUNCTION pg_catalog.blob_cmp(blob, blob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR FAMILY pg_catalog.blob_ops USING BTREE; +CREATE OPERATOR FAMILY pg_catalog.blob_ops USING HASH; + +-- about blob op family +CREATE OPERATOR CLASS pg_catalog.blob_ops DEFAULT + FOR TYPE blob USING BTREE FAMILY pg_catalog.blob_ops as + OPERATOR 1 pg_catalog.<(blob, blob), + OPERATOR 2 pg_catalog.<=(blob, blob), + OPERATOR 3 pg_catalog.=(blob, blob), + OPERATOR 4 pg_catalog.>=(blob, blob), + OPERATOR 5 pg_catalog.>(blob, blob), + FUNCTION 1 pg_catalog.blob_cmp(blob, blob), + FUNCTION 2 pg_catalog.bytea_sortsupport(internal); + +CREATE OPERATOR CLASS pg_catalog.blob_ops DEFAULT + FOR TYPE blob USING HASH FAMILY blob_ops as + OPERATOR 1 pg_catalog.=(blob, blob), + FUNCTION 1 (blob, blob) pg_catalog.hashvarlena(internal); + +-- about tinyblob op family +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_eq(arg1 tinyblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_ne(arg1 tinyblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_lt(arg1 tinyblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_le(arg1 tinyblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_gt(arg1 tinyblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_ge(arg1 tinyblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_cmp(tinyblob, tinyblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = tinyblob, rightarg = tinyblob, procedure = tinyblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = tinyblob, rightarg = tinyblob, procedure = tinyblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = tinyblob, rightarg = tinyblob, procedure = tinyblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = tinyblob, rightarg = tinyblob, procedure = tinyblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = tinyblob, rightarg = tinyblob, procedure = tinyblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = tinyblob, rightarg = tinyblob, procedure = tinyblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_eq_text(arg1 tinyblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_eq($1, $2::tinyblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_ne_text(arg1 tinyblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_ne($1, $2::tinyblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_lt_text(arg1 tinyblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_lt($1, $2::tinyblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_le_text(arg1 tinyblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_le($1, $2::tinyblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_gt_text(arg1 tinyblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_gt($1, $2::tinyblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_ge_text(arg1 tinyblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_ge($1, $2::tinyblob) $$; +CREATE OPERATOR pg_catalog.=(leftarg = tinyblob, rightarg = text, procedure = tinyblob_eq_text, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = tinyblob, rightarg = text, procedure = tinyblob_ne_text, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = tinyblob, rightarg = text, procedure = tinyblob_lt_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = tinyblob, rightarg = text, procedure = tinyblob_le_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = tinyblob, rightarg = text, procedure = tinyblob_gt_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = tinyblob, rightarg = text, procedure = tinyblob_ge_text, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OR REPLACE FUNCTION pg_catalog.text_eq_tinyblob(arg1 text, arg2 tinyblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_eq($1::tinyblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_ne_tinyblob(arg1 text, arg2 tinyblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_ne($1::tinyblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_lt_tinyblob(arg1 text, arg2 tinyblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_lt($1::tinyblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_le_tinyblob(arg1 text, arg2 tinyblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_le($1::tinyblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.test_gt_tinyblob(arg1 text, arg2 tinyblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_gt($1::tinyblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.test_ge_tinyblob(arg1 text, arg2 tinyblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT tinyblob_ge($1::tinyblob, $2) $$; +CREATE OPERATOR pg_catalog.=(leftarg = text, rightarg = tinyblob, procedure = text_eq_tinyblob, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = text, rightarg = tinyblob, procedure = text_ne_tinyblob, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = text, rightarg = tinyblob, procedure = text_lt_tinyblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = text, rightarg = tinyblob, procedure = text_le_tinyblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = text, rightarg = tinyblob, procedure = test_gt_tinyblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = text, rightarg = tinyblob, procedure = test_ge_tinyblob, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OPERATOR FAMILY pg_catalog.tinyblob_ops USING BTREE; +CREATE OPERATOR FAMILY pg_catalog.tinyblob_ops USING HASH; +CREATE OPERATOR CLASS pg_catalog.tinyblob_ops DEFAULT + FOR TYPE tinyblob USING BTREE FAMILY pg_catalog.tinyblob_ops as + OPERATOR 1 pg_catalog.<(tinyblob, tinyblob), + OPERATOR 2 pg_catalog.<=(tinyblob, tinyblob), + OPERATOR 3 pg_catalog.=(tinyblob, tinyblob), + OPERATOR 4 pg_catalog.>=(tinyblob, tinyblob), + OPERATOR 5 pg_catalog.>(tinyblob, tinyblob), + FUNCTION 1 pg_catalog.tinyblob_cmp(tinyblob, tinyblob), + FUNCTION 2 pg_catalog.bytea_sortsupport(internal); + +CREATE OPERATOR CLASS pg_catalog.tinyblob_ops DEFAULT + FOR TYPE tinyblob USING HASH FAMILY tinyblob_ops as + OPERATOR 1 pg_catalog.=(tinyblob, tinyblob), + FUNCTION 1 (tinyblob, tinyblob) pg_catalog.hashvarlena(internal); + +-- about mediumblob operator family +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_eq(arg1 mediumblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_ne(arg1 mediumblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_lt(arg1 mediumblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_le(arg1 mediumblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_gt(arg1 mediumblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_ge(arg1 mediumblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_cmp(mediumblob, mediumblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = mediumblob, rightarg = mediumblob, procedure = mediumblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = mediumblob, rightarg = mediumblob, procedure = mediumblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = mediumblob, rightarg = mediumblob, procedure = mediumblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = mediumblob, rightarg = mediumblob, procedure = mediumblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = mediumblob, rightarg = mediumblob, procedure = mediumblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = mediumblob, rightarg = mediumblob, procedure = mediumblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_eq_text(arg1 mediumblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_eq($1, $2::mediumblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_ne_text(arg1 mediumblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_ne($1, $2::mediumblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_lt_text(arg1 mediumblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_lt($1, $2::mediumblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_le_text(arg1 mediumblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_le($1, $2::mediumblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_gt_text(arg1 mediumblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_gt($1, $2::mediumblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_ge_text(arg1 mediumblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_ge($1, $2::mediumblob) $$; +CREATE OPERATOR pg_catalog.=(leftarg = mediumblob, rightarg = text, procedure = mediumblob_eq_text, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = mediumblob, rightarg = text, procedure = mediumblob_ne_text, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = mediumblob, rightarg = text, procedure = mediumblob_lt_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = mediumblob, rightarg = text, procedure = mediumblob_le_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = mediumblob, rightarg = text, procedure = mediumblob_gt_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = mediumblob, rightarg = text, procedure = mediumblob_ge_text, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OR REPLACE FUNCTION pg_catalog.text_eq_mediumblob(arg1 text, arg2 mediumblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_eq($1::mediumblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_ne_mediumblob(arg1 text, arg2 mediumblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_ne($1::mediumblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_lt_mediumblob(arg1 text, arg2 mediumblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_lt($1::mediumblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_le_mediumblob(arg1 text, arg2 mediumblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_le($1::mediumblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.test_gt_mediumblob(arg1 text, arg2 mediumblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_gt($1::mediumblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.test_ge_mediumblob(arg1 text, arg2 mediumblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT mediumblob_ge($1::mediumblob, $2) $$; +CREATE OPERATOR pg_catalog.=(leftarg = text, rightarg = mediumblob, procedure = text_eq_mediumblob, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = text, rightarg = mediumblob, procedure = text_ne_mediumblob, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = text, rightarg = mediumblob, procedure = text_lt_mediumblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = text, rightarg = mediumblob, procedure = text_le_mediumblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = text, rightarg = mediumblob, procedure = test_gt_mediumblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = text, rightarg = mediumblob, procedure = test_ge_mediumblob, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OPERATOR FAMILY pg_catalog.mediumblob_ops USING BTREE; +CREATE OPERATOR FAMILY pg_catalog.mediumblob_ops USING HASH; +CREATE OPERATOR CLASS pg_catalog.mediumblob_ops DEFAULT + FOR TYPE mediumblob USING BTREE FAMILY pg_catalog.mediumblob_ops as + OPERATOR 1 pg_catalog.<(mediumblob, mediumblob), + OPERATOR 2 pg_catalog.<=(mediumblob, mediumblob), + OPERATOR 3 pg_catalog.=(mediumblob, mediumblob), + OPERATOR 4 pg_catalog.>=(mediumblob, mediumblob), + OPERATOR 5 pg_catalog.>(mediumblob, mediumblob), + FUNCTION 1 pg_catalog.mediumblob_cmp(mediumblob, mediumblob), + FUNCTION 2 pg_catalog.bytea_sortsupport(internal); + +CREATE OPERATOR CLASS pg_catalog.mediumblob_ops DEFAULT + FOR TYPE mediumblob USING HASH FAMILY mediumblob_ops as + OPERATOR 1 pg_catalog.=(mediumblob, mediumblob), + FUNCTION 1 (mediumblob, mediumblob) pg_catalog.hashvarlena(internal); + +-- about longblob operator family +CREATE OR REPLACE FUNCTION pg_catalog.longblob_eq(arg1 longblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_ne(arg1 longblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_lt(arg1 longblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_le(arg1 longblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_gt(arg1 longblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_ge(arg1 longblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_cmp(longblob, longblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = longblob, rightarg = longblob, procedure = longblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = longblob, rightarg = longblob, procedure = longblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = longblob, rightarg = longblob, procedure = longblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = longblob, rightarg = longblob, procedure = longblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = longblob, rightarg = longblob, procedure = longblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = longblob, rightarg = longblob, procedure = longblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OR REPLACE FUNCTION pg_catalog.longblob_eq_text(arg1 longblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_eq($1, $2::longblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_ne_text(arg1 longblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_ne($1, $2::longblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_lt_text(arg1 longblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_lt($1, $2::longblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_le_text(arg1 longblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_le($1, $2::longblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_gt_text(arg1 longblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_gt($1, $2::longblob) $$; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_ge_text(arg1 longblob, arg2 text) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_ge($1, $2::longblob) $$; +CREATE OPERATOR pg_catalog.=(leftarg = longblob, rightarg = text, procedure = longblob_eq_text, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = longblob, rightarg = text, procedure = longblob_ne_text, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = longblob, rightarg = text, procedure = longblob_lt_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = longblob, rightarg = text, procedure = longblob_le_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = longblob, rightarg = text, procedure = longblob_gt_text, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = longblob, rightarg = text, procedure = longblob_ge_text, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OR REPLACE FUNCTION pg_catalog.text_eq_longblob(arg1 text, arg2 longblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_eq($1::longblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_ne_longblob(arg1 text, arg2 longblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_ne($1::longblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_lt_longblob(arg1 text, arg2 longblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_lt($1::longblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.text_le_longblob(arg1 text, arg2 longblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_le($1::longblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.test_gt_longblob(arg1 text, arg2 longblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_gt($1::longblob, $2) $$; +CREATE OR REPLACE FUNCTION pg_catalog.test_ge_longblob(arg1 text, arg2 longblob) RETURNS bool LANGUAGE SQL STRICT AS $$ SELECT longblob_ge($1::longblob, $2) $$; +CREATE OPERATOR pg_catalog.=(leftarg = text, rightarg = longblob, procedure = text_eq_longblob, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = text, rightarg = longblob, procedure = text_ne_longblob, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = text, rightarg = longblob, procedure = text_lt_longblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = text, rightarg = longblob, procedure = text_le_longblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = text, rightarg = longblob, procedure = test_gt_longblob, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = text, rightarg = longblob, procedure = test_ge_longblob, restrict = scalarltsel, join = scalarltjoinsel); + +CREATE OPERATOR FAMILY pg_catalog.longblob_ops USING BTREE; +CREATE OPERATOR FAMILY pg_catalog.longblob_ops USING HASH; +CREATE OPERATOR CLASS pg_catalog.longblob_ops DEFAULT + FOR TYPE longblob USING BTREE FAMILY pg_catalog.longblob_ops as + OPERATOR 1 pg_catalog.<(longblob, longblob), + OPERATOR 2 pg_catalog.<=(longblob, longblob), + OPERATOR 3 pg_catalog.=(longblob, longblob), + OPERATOR 4 pg_catalog.>=(longblob, longblob), + OPERATOR 5 pg_catalog.>(longblob, longblob), + FUNCTION 1 pg_catalog.longblob_cmp(longblob, longblob), + FUNCTION 2 pg_catalog.bytea_sortsupport(internal); + +CREATE OPERATOR CLASS pg_catalog.longblob_ops DEFAULT + FOR TYPE longblob USING HASH FAMILY longblob_ops as + OPERATOR 1 pg_catalog.=(longblob, longblob), + FUNCTION 1 (longblob, longblob) pg_catalog.hashvarlena(internal); + +-- about tinyblob op othersBlob +-- tinyblob op blob +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_eq(arg1 tinyblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_ne(arg1 tinyblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_lt(arg1 tinyblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_le(arg1 tinyblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_gt(arg1 tinyblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_ge(arg1 tinyblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_blob_cmp(tinyblob, blob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = tinyblob, rightarg = blob, procedure = tinyblob_blob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = tinyblob, rightarg = blob, procedure = tinyblob_blob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = tinyblob, rightarg = blob, procedure = tinyblob_blob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = tinyblob, rightarg = blob, procedure = tinyblob_blob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = tinyblob, rightarg = blob, procedure = tinyblob_blob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = tinyblob, rightarg = blob, procedure = tinyblob_blob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- tinyblob op mediumblob +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_eq(arg1 tinyblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_ne(arg1 tinyblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_lt(arg1 tinyblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_le(arg1 tinyblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_gt(arg1 tinyblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_ge(arg1 tinyblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_mediumblob_cmp(tinyblob, mediumblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = tinyblob, rightarg = mediumblob, procedure = tinyblob_mediumblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = tinyblob, rightarg = mediumblob, procedure = tinyblob_mediumblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = tinyblob, rightarg = mediumblob, procedure = tinyblob_mediumblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = tinyblob, rightarg = mediumblob, procedure = tinyblob_mediumblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = tinyblob, rightarg = mediumblob, procedure = tinyblob_mediumblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = tinyblob, rightarg = mediumblob, procedure = tinyblob_mediumblob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- tinyblob op longblob +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_eq(arg1 tinyblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_ne(arg1 tinyblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_lt(arg1 tinyblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_le(arg1 tinyblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_gt(arg1 tinyblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_ge(arg1 tinyblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.tinyblob_longblob_cmp(tinyblob, longblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = tinyblob, rightarg = longblob, procedure = tinyblob_longblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = tinyblob, rightarg = longblob, procedure = tinyblob_longblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = tinyblob, rightarg = longblob, procedure = tinyblob_longblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = tinyblob, rightarg = longblob, procedure = tinyblob_longblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = tinyblob, rightarg = longblob, procedure = tinyblob_longblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = tinyblob, rightarg = longblob, procedure = tinyblob_longblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + +-- about blob op othersBlob +-- blob op tinyblob +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_eq(arg1 blob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_ne(arg1 blob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_lt(arg1 blob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_le(arg1 blob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_gt(arg1 blob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_ge(arg1 blob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_tinyblob_cmp(blob, tinyblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = blob, rightarg = tinyblob, procedure = blob_tinyblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = blob, rightarg = tinyblob, procedure = blob_tinyblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = blob, rightarg = tinyblob, procedure = blob_tinyblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = blob, rightarg = tinyblob, procedure = blob_tinyblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = blob, rightarg = tinyblob, procedure = blob_tinyblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = blob, rightarg = tinyblob, procedure = blob_tinyblob_ge, restrict = scalarltsel, join = scalarltjoinsel); +--- blob op mediumblob +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_eq(arg1 blob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_ne(arg1 blob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_lt(arg1 blob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_le(arg1 blob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_gt(arg1 blob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_ge(arg1 blob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_mediumblob_cmp(blob, mediumblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = blob, rightarg = mediumblob, procedure = blob_mediumblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = blob, rightarg = mediumblob, procedure = blob_mediumblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = blob, rightarg = mediumblob, procedure = blob_mediumblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = blob, rightarg = mediumblob, procedure = blob_mediumblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = blob, rightarg = mediumblob, procedure = blob_mediumblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = blob, rightarg = mediumblob, procedure = blob_mediumblob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- blob op longblob +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_eq(arg1 blob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_ne(arg1 blob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_lt(arg1 blob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_le(arg1 blob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_gt(arg1 blob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_ge(arg1 blob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.blob_longblob_cmp(longblob, longblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = blob, rightarg = longblob, procedure = blob_longblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = blob, rightarg = longblob, procedure = blob_longblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = blob, rightarg = longblob, procedure = blob_longblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = blob, rightarg = longblob, procedure = blob_longblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = blob, rightarg = longblob, procedure = blob_longblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = blob, rightarg = longblob, procedure = blob_longblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + +-- about mediumblob op othersBlob +-- mediumblob op tinyblob +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_eq(arg1 mediumblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_ne(arg1 mediumblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_lt(arg1 mediumblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_le(arg1 mediumblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_gt(arg1 mediumblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_ge(arg1 mediumblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_tinyblob_cmp(mediumblob, tinyblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = mediumblob, rightarg = tinyblob, procedure = mediumblob_tinyblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = mediumblob, rightarg = tinyblob, procedure = mediumblob_tinyblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = mediumblob, rightarg = tinyblob, procedure = mediumblob_tinyblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = mediumblob, rightarg = tinyblob, procedure = mediumblob_tinyblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = mediumblob, rightarg = tinyblob, procedure = mediumblob_tinyblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = mediumblob, rightarg = tinyblob, procedure = mediumblob_tinyblob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- mediumblob op blob +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_eq(arg1 mediumblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_ne(arg1 mediumblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_lt(arg1 mediumblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_le(arg1 mediumblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_gt(arg1 mediumblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_ge(arg1 mediumblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_blob_cmp(mediumblob, blob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = mediumblob, rightarg = blob, procedure = mediumblob_blob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = mediumblob, rightarg = blob, procedure = mediumblob_blob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = mediumblob, rightarg = blob, procedure = mediumblob_blob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = mediumblob, rightarg = blob, procedure = mediumblob_blob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = mediumblob, rightarg = blob, procedure = mediumblob_blob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = mediumblob, rightarg = blob, procedure = mediumblob_blob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- mediumblob op longblob +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_eq(arg1 mediumblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_ne(arg1 mediumblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_lt(arg1 mediumblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_le(arg1 mediumblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_gt(arg1 mediumblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_ge(arg1 mediumblob, arg2 longblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.mediumblob_longblob_cmp(longblob, longblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = mediumblob, rightarg = longblob, procedure = mediumblob_longblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = mediumblob, rightarg = longblob, procedure = mediumblob_longblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = mediumblob, rightarg = longblob, procedure = mediumblob_longblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = mediumblob, rightarg = longblob, procedure = mediumblob_longblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = mediumblob, rightarg = longblob, procedure = mediumblob_longblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = mediumblob, rightarg = longblob, procedure = mediumblob_longblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + +-- about mediumblob op othersBlob +-- longblob op tinyblob +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_eq(arg1 longblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_ne(arg1 longblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_lt(arg1 longblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_le(arg1 longblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_gt(arg1 longblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_ge(arg1 longblob, arg2 tinyblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_tinyblob_cmp(longblob, tinyblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = longblob, rightarg = tinyblob, procedure = longblob_tinyblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = longblob, rightarg = tinyblob, procedure = longblob_tinyblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = longblob, rightarg = tinyblob, procedure = longblob_tinyblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = longblob, rightarg = tinyblob, procedure = longblob_tinyblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = longblob, rightarg = tinyblob, procedure = longblob_tinyblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = longblob, rightarg = tinyblob, procedure = longblob_tinyblob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- longblob op blob +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_eq(arg1 longblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_ne(arg1 longblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_lt(arg1 longblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_le(arg1 longblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_gt(arg1 longblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_ge(arg1 longblob, arg2 blob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_blob_cmp(longblob, blob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = longblob, rightarg = blob, procedure = longblob_blob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = longblob, rightarg = blob, procedure = longblob_blob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = longblob, rightarg = blob, procedure = longblob_blob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = longblob, rightarg = blob, procedure = longblob_blob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = longblob, rightarg = blob, procedure = longblob_blob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = longblob, rightarg = blob, procedure = longblob_blob_ge, restrict = scalarltsel, join = scalarltjoinsel); +-- longblobg op mediumblob +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_eq(arg1 longblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteaeq'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_ne(arg1 longblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteane'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_lt(arg1 longblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'bytealt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_le(arg1 longblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteale'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_gt(arg1 longblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteagt'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_ge(arg1 longblob, arg2 mediumblob) RETURNS bool LANGUAGE INTERNAL STRICT AS 'byteage'; +CREATE OR REPLACE FUNCTION pg_catalog.longblob_mediumblob_cmp(longblob, longblob) RETURNS integer LANGUAGE INTERNAL IMMUTABLE STRICT as 'byteacmp'; +CREATE OPERATOR pg_catalog.=(leftarg = longblob, rightarg = mediumblob, procedure = longblob_mediumblob_eq, restrict = eqsel, join = eqjoinsel); +CREATE OPERATOR pg_catalog.<>(leftarg = longblob, rightarg = mediumblob, procedure = longblob_mediumblob_ne, restrict = neqsel, join = neqjoinsel); +CREATE OPERATOR pg_catalog.<(leftarg = longblob, rightarg = mediumblob, procedure = longblob_mediumblob_lt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.<=(leftarg = longblob, rightarg = mediumblob, procedure = longblob_mediumblob_le, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>(leftarg = longblob, rightarg = mediumblob, procedure = longblob_mediumblob_gt, restrict = scalarltsel, join = scalarltjoinsel); +CREATE OPERATOR pg_catalog.>=(leftarg = longblob, rightarg = mediumblob, procedure = longblob_mediumblob_ge, restrict = scalarltsel, join = scalarltjoinsel); + DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); -- Gitee From b20c44d6e404baf7b3b7be9528f7996de6ee4f97 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 17 Jan 2024 11:11:55 +0800 Subject: [PATCH 195/434] Fix convert func for all datatype. --- .../expected/builtin_funcs/convert.out | 69 +++++++++++++++++++ contrib/dolphin/plugin_parser/gram.y | 10 ++- .../rollback_script/dolphin--3.0--2.0.sql | 5 ++ contrib/dolphin/sql/builtin_funcs/convert.sql | 64 +++++++++++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 10 +++ 5 files changed, 155 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/convert.out b/contrib/dolphin/expected/builtin_funcs/convert.out index 48bd0e9bf..aa831fdca 100644 --- a/contrib/dolphin/expected/builtin_funcs/convert.out +++ b/contrib/dolphin/expected/builtin_funcs/convert.out @@ -1,5 +1,74 @@ create schema db_convert; set current_schema to 'db_convert'; +set dolphin.b_compatibility_mode to on; +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('2023-01-01', '2024-01-01', '2025-01-01'), + `set_t` set('2023-01-01', '2024-01-01', '2025-01-01'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, +'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', +'2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', +'2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', +'2023-01-01', '2023-01-01', json_object('a', 1, 'b', 2)); +select convert(`int1` using utf8), convert(`uint1` using utf8), convert(`int2` using utf8), convert(`uint2` using utf8), convert(`int4` using utf8), +convert(`uint4` using utf8), convert(`int8` using utf8), convert(`uint8` using utf8), convert(`float4` using utf8), convert(`float8` using utf8), +convert(`numeric` using utf8), convert(`bit1` using utf8), convert(`bit64` using utf8), convert(`boolean` using utf8), convert(`date` using utf8), +convert(`time` using utf8), convert(`time(4)` using utf8), convert(`datetime` using utf8), convert(`datetime(4)` using utf8), convert(`timestamp` using utf8), +convert(`timestamp(4)` using utf8), convert(`year` using utf8), convert(`char` using utf8), convert(`varchar` using utf8), convert(`binary` using utf8), +convert(`varbinary` using utf8), convert(`tinyblob` using utf8), convert(`blob` using utf8), convert(`mediumblob` using utf8), convert(`longblob` using utf8), +convert(`text` using utf8), convert(`enum_t` using utf8), convert(`set_t` using utf8), convert(`json` using utf8) from test_type_table; + convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert +---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+----------+---------+---------+---------+------------+----------+---------------+---------------------+-------------------------+------------------------+----------------------------+---------+------------------------------------------------------------------------------------------------------+---------+---------+---------+---------+---------+---------+---------+---------+------------+------------+------------------ + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1.000000 | 1 | 1 | 1 | 2023-02-05 | 19:10:50 | 19:10:50.3456 | 2023-02-05 19:10:50 | 2023-02-05 19:10:50.456 | 2023-02-05 19:10:50-08 | 2023-02-05 19:10:50.456-08 | 1 | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 2023-01-01 | 2023-01-01 | {"a": 1, "b": 2} +(1 row) + +select convert(`int1` using 'utf8'), convert(`uint1` using 'utf8'), convert(`int2` using 'utf8'), convert(`uint2` using 'utf8'), convert(`int4` using 'utf8'), +convert(`uint4` using 'utf8'), convert(`int8` using 'utf8'), convert(`uint8` using 'utf8'), convert(`float4` using 'utf8'), convert(`float8` using 'utf8'), +convert(`numeric` using 'utf8'), convert(`bit1` using 'utf8'), convert(`bit64` using 'utf8'), convert(`boolean` using 'utf8'), convert(`date` using 'utf8'), +convert(`time` using 'utf8'), convert(`time(4)` using 'utf8'), convert(`datetime` using 'utf8'), convert(`datetime(4)` using 'utf8'), convert(`timestamp` using 'utf8'), +convert(`timestamp(4)` using 'utf8'), convert(`year` using 'utf8'), convert(`char` using 'utf8'), convert(`varchar` using 'utf8'), convert(`binary` using 'utf8'), +convert(`varbinary` using 'utf8'), convert(`tinyblob` using 'utf8'), convert(`blob` using 'utf8'), convert(`mediumblob` using 'utf8'), convert(`longblob` using 'utf8'), +convert(`text` using 'utf8'), convert(`enum_t` using 'utf8'), convert(`set_t` using 'utf8'), convert(`json` using 'utf8') from test_type_table; + convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert | convert +---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+----------+---------+---------+---------+------------+----------+---------------+---------------------+-------------------------+------------------------+----------------------------+---------+------------------------------------------------------------------------------------------------------+---------+---------+---------+---------+---------+---------+---------+---------+------------+------------+------------------ + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1.000000 | 1 | 1 | 1 | 2023-02-05 | 19:10:50 | 19:10:50.3456 | 2023-02-05 19:10:50 | 2023-02-05 19:10:50.456 | 2023-02-05 19:10:50-08 | 2023-02-05 19:10:50.456-08 | 1 | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 1.23a | 2023-01-01 | 2023-01-01 | {"a": 1, "b": 2} +(1 row) + +drop table test_type_table; select convert(1 using 'utf8'); convert --------- diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e5485985c..b9d4f546d 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -34263,8 +34263,10 @@ func_expr_common_subexpr: Type typtup = LookupTypeName(NULL, typname, NULL); FuncCall *n = makeNode(FuncCall); if (NULL == typtup) { - n->funcname = SystemFuncName("convert"); - n->args = list_make2($3, makeStringConst(strVal(linitial(typname->names)), -1)); + n->funcname = SystemFuncName("convert"); + TypeName * tmp = SystemTypeName("name"); + tmp->location = @5; + n->args = list_make2($3, makeStringConstCast(strVal(linitial(typname->names)), -1, tmp)); n->agg_order = NIL; n->agg_star = FALSE; n->agg_distinct = FALSE; @@ -34283,7 +34285,9 @@ func_expr_common_subexpr: { FuncCall *n = makeNode(FuncCall); n->funcname = SystemFuncName("convert"); - n->args = list_make2($3, makeStringConst($5, -1)); + TypeName * tmp = SystemTypeName("name"); + tmp->location = @5; + n->args = list_make2($3, makeStringConstCast($5, -1, tmp)); n->agg_order = NIL; n->agg_star = FALSE; n->agg_distinct = FALSE; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 68a7efd5b..be26d559e 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -354,6 +354,11 @@ DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_enumnot(anyenum); DROP OPERATOR IF EXISTS dolphin_catalog.~(NONE, anyset); DROP FUNCTION IF EXISTS dolphin_catalog.dolphin_setnot(anyset); +DROP FUNCTION IF EXISTS pg_catalog.convert(boolean, name); +DROP FUNCTION IF EXISTS pg_catalog.convert(longblob, name); +DROP FUNCTION IF EXISTS pg_catalog.convert(anyenum, name); +DROP FUNCTION IF EXISTS pg_catalog.convert(json, name); + DROP OPERATOR CLASS IF EXISTS pg_catalog.varbinary_ops USING BTREE; DROP OPERATOR CLASS IF EXISTS pg_catalog.varbinary_ops USING HASH; DROP OPERATOR CLASS IF EXISTS pg_catalog.binary_ops USING BTREE; diff --git a/contrib/dolphin/sql/builtin_funcs/convert.sql b/contrib/dolphin/sql/builtin_funcs/convert.sql index 2f3ef1225..2cbed19e3 100644 --- a/contrib/dolphin/sql/builtin_funcs/convert.sql +++ b/contrib/dolphin/sql/builtin_funcs/convert.sql @@ -1,5 +1,69 @@ create schema db_convert; set current_schema to 'db_convert'; +set dolphin.b_compatibility_mode to on; + +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('2023-01-01', '2024-01-01', '2025-01-01'), + `set_t` set('2023-01-01', '2024-01-01', '2025-01-01'), + `json` json +); + +insert into test_type_table values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, +'2023-02-05', '19:10:50', '19:10:50.3456', '2023-02-05 19:10:50', +'2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', +'2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', +'2023-01-01', '2023-01-01', json_object('a', 1, 'b', 2)); + +select convert(`int1` using utf8), convert(`uint1` using utf8), convert(`int2` using utf8), convert(`uint2` using utf8), convert(`int4` using utf8), +convert(`uint4` using utf8), convert(`int8` using utf8), convert(`uint8` using utf8), convert(`float4` using utf8), convert(`float8` using utf8), +convert(`numeric` using utf8), convert(`bit1` using utf8), convert(`bit64` using utf8), convert(`boolean` using utf8), convert(`date` using utf8), +convert(`time` using utf8), convert(`time(4)` using utf8), convert(`datetime` using utf8), convert(`datetime(4)` using utf8), convert(`timestamp` using utf8), +convert(`timestamp(4)` using utf8), convert(`year` using utf8), convert(`char` using utf8), convert(`varchar` using utf8), convert(`binary` using utf8), +convert(`varbinary` using utf8), convert(`tinyblob` using utf8), convert(`blob` using utf8), convert(`mediumblob` using utf8), convert(`longblob` using utf8), +convert(`text` using utf8), convert(`enum_t` using utf8), convert(`set_t` using utf8), convert(`json` using utf8) from test_type_table; + +select convert(`int1` using 'utf8'), convert(`uint1` using 'utf8'), convert(`int2` using 'utf8'), convert(`uint2` using 'utf8'), convert(`int4` using 'utf8'), +convert(`uint4` using 'utf8'), convert(`int8` using 'utf8'), convert(`uint8` using 'utf8'), convert(`float4` using 'utf8'), convert(`float8` using 'utf8'), +convert(`numeric` using 'utf8'), convert(`bit1` using 'utf8'), convert(`bit64` using 'utf8'), convert(`boolean` using 'utf8'), convert(`date` using 'utf8'), +convert(`time` using 'utf8'), convert(`time(4)` using 'utf8'), convert(`datetime` using 'utf8'), convert(`datetime(4)` using 'utf8'), convert(`timestamp` using 'utf8'), +convert(`timestamp(4)` using 'utf8'), convert(`year` using 'utf8'), convert(`char` using 'utf8'), convert(`varchar` using 'utf8'), convert(`binary` using 'utf8'), +convert(`varbinary` using 'utf8'), convert(`tinyblob` using 'utf8'), convert(`blob` using 'utf8'), convert(`mediumblob` using 'utf8'), convert(`longblob` using 'utf8'), +convert(`text` using 'utf8'), convert(`enum_t` using 'utf8'), convert(`set_t` using 'utf8'), convert(`json` using 'utf8') from test_type_table; + +drop table test_type_table; + select convert(1 using 'utf8'); select convert('1' using 'utf8'); select convert('a' using 'utf8'); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index ecb4efe80..9ecddde64 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -639,6 +639,16 @@ CREATE OR REPLACE FUNCTION pg_catalog.degrees(boolean) RETURNS double precision CREATE OR REPLACE FUNCTION pg_catalog.degrees(year) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; +DROP FUNCTION IF EXISTS pg_catalog.convert(boolean, name); +DROP FUNCTION IF EXISTS pg_catalog.convert(longblob, name); +DROP FUNCTION IF EXISTS pg_catalog.convert(anyenum, name); +DROP FUNCTION IF EXISTS pg_catalog.convert(json, name); + +CREATE FUNCTION pg_catalog.convert(boolean,name) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.convert(cast($1 as TEXT), $2)'; +CREATE FUNCTION pg_catalog.convert(longblob,name) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.convert(cast($1 as TEXT), $2)'; +CREATE FUNCTION pg_catalog.convert(anyenum,name) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.convert(cast($1 as TEXT), $2)'; +CREATE FUNCTION pg_catalog.convert(json,name) RETURNS text LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.convert(cast($1 as TEXT), $2)'; + DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(bit) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(binary) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(tinyblob) CASCADE; -- Gitee From 3cfc7a9414ff8b8f0692e88c8db30b0ddcb1a77f Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Wed, 17 Jan 2024 11:38:10 +0800 Subject: [PATCH 196/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BB=8Evarbinary?= =?UTF-8?q?=E5=9C=A8mysql=E5=AF=BC=E5=87=BA=E7=9A=84csv=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E5=AF=BC=E5=85=A5=E6=97=B6=E5=87=BA=E9=94=99=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/plugin_utils/adt/varlena.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 1332f45ac..adf9738c9 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -706,7 +706,9 @@ Datum dolphin_binaryin(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("value too long for type binary(%d)", atttypmod - VARHDRSZ))); } - binary_length = fcinfo->flinfo->fn_rettype == BINARYOID ? atttypmod : BINARY_LEN(len) + VARHDRSZ; + binary_length = (fcinfo->flinfo == NULL || fcinfo->flinfo->fn_rettype == BINARYOID) + ? atttypmod + : BINARY_LEN(len) + VARHDRSZ; } result = (bytea*)palloc0(binary_length); (void)hex_decode(inputText + 2, len - 2, VARDATA(result)); @@ -745,17 +747,16 @@ Datum dolphin_binaryin(PG_FUNCTION_ARGS) } if (atttypmod < VARHDRSZ) { - bc += VARHDRSZ; - result = (bytea*)palloc(bc); - SET_VARSIZE(result, bc); + binary_length = bc + VARHDRSZ; /* maximum possible length */ } else { if (bc > atttypmod - VARHDRSZ) { ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), errmsg("value too long for type binary(%d)", atttypmod - VARHDRSZ))); } - result = (bytea*)palloc0(atttypmod); /* palloc0, pad with zero */ - SET_VARSIZE(result, atttypmod); + binary_length = (fcinfo->flinfo == NULL || fcinfo->flinfo->fn_rettype == BINARYOID) ? atttypmod : bc + VARHDRSZ; } + result = (bytea*)palloc0(binary_length); + SET_VARSIZE(result, binary_length); tp = inputText; rp = VARDATA(result); -- Gitee From e857e39e74d7d75965db40d5a8eb2918ac7b9bf9 Mon Sep 17 00:00:00 2001 From: totaj Date: Wed, 17 Jan 2024 17:37:18 +0800 Subject: [PATCH 197/434] Sync server code. 16cbdbdb43c1c5dc9722d4fbb26e85df7b35fa6f --- contrib/dolphin/include/builtin_funcs.ini | 4 + .../include/plugin_nodes/parsenodes_common.h | 1 + .../include/plugin_parser/parse_node.h | 1 + .../dolphin/include/plugin_utils/plpgsql.h | 1 + contrib/dolphin/plugin_executor/execQual.cpp | 54 +++-- .../dolphin/plugin_optimizer/plan/planner.cpp | 162 +++++++------- contrib/dolphin/plugin_parser/analyze.cpp | 54 ++++- contrib/dolphin/plugin_parser/gram.y | 162 +++++++++++++- contrib/dolphin/plugin_parser/hint_scan.l | 21 +- contrib/dolphin/plugin_parser/parse_expr.cpp | 199 ++++++++++-------- contrib/dolphin/plugin_parser/parse_func.cpp | 61 +++++- .../dolphin/plugin_pl/plpgsql/src/pl_comp.cpp | 1 + .../plugin_pl/plpgsql/src/pl_handler.cpp | 2 +- .../dolphin/plugin_utils/adt/jsonfuncs.cpp | 8 +- .../dolphin/plugin_utils/adt/pgstatfuncs.cpp | 117 +++++++++- 15 files changed, 629 insertions(+), 219 deletions(-) diff --git a/contrib/dolphin/include/builtin_funcs.ini b/contrib/dolphin/include/builtin_funcs.ini index ded5c7c4d..02298e162 100755 --- a/contrib/dolphin/include/builtin_funcs.ini +++ b/contrib/dolphin/include/builtin_funcs.ini @@ -2500,6 +2500,10 @@ "dsqrt", 1, AddBuiltinFunc(_0(230), _1("dsqrt"), _2(1), _3(true), _4(false), _5(dsqrt), _6(701), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("dsqrt"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("square root"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "ondemand_recovery_status", 1, + AddBuiltinFunc(_0(6991), _1("ondemand_recovery_status"), _2(0), _3(false), _4(false), _5(get_ondemand_recovery_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(0), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(0), _21(10, TEXTOID, TEXTOID, OIDOID, OIDOID, OIDOID, OIDOID, BOOLOID, TEXTOID, TEXTOID, TEXTOID), _22(10, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "primary_checkpoint_redo_lsn", "realtime_build_replayed_lsn", "hashmap_used_blocks", "hashmap_total_blocks", "trxn_queue_blocks", "seg_queue_blocks", "in_ondemand_recovery", "ondemand_recovery_status", "realtime_build_status", "recovery_pause_status"), _24(NULL), _25("get_ondemand_recovery_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "dss_io_stat", 1, AddBuiltinFunc(_0(6990), _1("dss_io_stat"), _2(1), _3(true), _4(false), _5(dss_io_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, INT4OID), _21(4, INT4OID, INT8OID, INT8OID, INT4OID), _22(4, 'i', 'o', 'o', 'o'), _23(4, "duration", "read_kilobyte_per_sec", "write_kilobyte_per_sec", "io_times"), _24(NULL), _25("dss_io_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) diff --git a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h index 25f193d18..15a86f80e 100644 --- a/contrib/dolphin/include/plugin_nodes/parsenodes_common.h +++ b/contrib/dolphin/include/plugin_nodes/parsenodes_common.h @@ -2156,6 +2156,7 @@ typedef struct Query { void* intoPolicy; ParentStmtType parentStmtType; #endif + bool has_uservar; } Query; /* ---------------------- diff --git a/contrib/dolphin/include/plugin_parser/parse_node.h b/contrib/dolphin/include/plugin_parser/parse_node.h index fe2856a26..f53bc83a8 100644 --- a/contrib/dolphin/include/plugin_parser/parse_node.h +++ b/contrib/dolphin/include/plugin_parser/parse_node.h @@ -284,6 +284,7 @@ struct ParseState { */ List* orderbyCols; List* p_indexhintLists; /*Force or use index in index hint list*/ + bool has_uservar; }; /* An element of p_relnamespace or p_varnamespace */ diff --git a/contrib/dolphin/include/plugin_utils/plpgsql.h b/contrib/dolphin/include/plugin_utils/plpgsql.h index 5ff2940aa..f70619dbd 100644 --- a/contrib/dolphin/include/plugin_utils/plpgsql.h +++ b/contrib/dolphin/include/plugin_utils/plpgsql.h @@ -1224,6 +1224,7 @@ typedef struct PLpgSQL_function { /* Complete compiled function */ struct DebugInfo* debug; struct PLpgSQL_nsitem* ns_top; + uint64 guc_stat; bool is_autonomous; bool is_plpgsql_func_with_outparam; bool is_insert_gs_source; diff --git a/contrib/dolphin/plugin_executor/execQual.cpp b/contrib/dolphin/plugin_executor/execQual.cpp index 6ae390e7c..cd682901c 100644 --- a/contrib/dolphin/plugin_executor/execQual.cpp +++ b/contrib/dolphin/plugin_executor/execQual.cpp @@ -2199,25 +2199,41 @@ static void tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc) void set_result_for_plpgsql_language_function_with_outparam(FuncExprState *fcache, Datum *result, bool *isNull) { - if (!IsA(fcache->xprstate.expr, FuncExpr)) { - return; - } - FuncExpr *func = (FuncExpr *)fcache->xprstate.expr; - if (!is_function_with_plpgsql_language_and_outparam(func->funcid)) { - return; - } - HeapTupleHeader td = DatumGetHeapTupleHeader(*result); - TupleDesc tupdesc = lookup_rowtype_tupdesc_copy(HeapTupleHeaderGetTypeId(td), HeapTupleHeaderGetTypMod(td)); - HeapTupleData tup; - tup.t_len = HeapTupleHeaderGetDatumLength(td); - tup.t_data = td; - Datum *values = (Datum *)palloc(sizeof(Datum) * tupdesc->natts); - bool *nulls = (bool *)palloc(sizeof(bool) * tupdesc->natts); - heap_deform_tuple(&tup, tupdesc, values, nulls); - *result = values[0]; - *isNull = nulls[0]; - pfree(values); - pfree(nulls); + if (!IsA(fcache->xprstate.expr, FuncExpr)) { + return; + } + FuncExpr *func = (FuncExpr *)fcache->xprstate.expr; + if (!is_function_with_plpgsql_language_and_outparam(func->funcid)) { + return; + } + HeapTupleHeader td = DatumGetHeapTupleHeader(*result); + TupleDesc tupdesc; + PG_TRY(); + { + tupdesc = lookup_rowtype_tupdesc_copy(HeapTupleHeaderGetTypeId(td), HeapTupleHeaderGetTypMod(td)); + } + PG_CATCH(); + { + int ecode = geterrcode(); + if (ecode == ERRCODE_CACHE_LOOKUP_FAILED) { + ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), errmodule(MOD_PLSQL), + errmsg("tuple is null"), + errdetail("it may be because change guc behavior_compat_options in one session"))); + } else { + PG_RE_THROW(); + } + } + PG_END_TRY(); + HeapTupleData tup; + tup.t_len = HeapTupleHeaderGetDatumLength(td); + tup.t_data = td; + Datum *values = (Datum *)palloc(sizeof(Datum) * tupdesc->natts); + bool *nulls = (bool *)palloc(sizeof(bool) * tupdesc->natts); + heap_deform_tuple(&tup, tupdesc, values, nulls); + *result = values[0]; + *isNull = nulls[0]; + pfree(values); + pfree(nulls); } bool ExecSetArgIsByValue(FunctionCallInfo fcinfo) diff --git a/contrib/dolphin/plugin_optimizer/plan/planner.cpp b/contrib/dolphin/plugin_optimizer/plan/planner.cpp index 7fc4d9568..91f67516f 100644 --- a/contrib/dolphin/plugin_optimizer/plan/planner.cpp +++ b/contrib/dolphin/plugin_optimizer/plan/planner.cpp @@ -149,7 +149,10 @@ static Plan* inheritance_planner(PlannerInfo* root); static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction); static void preprocess_rowmarks(PlannerInfo* root); static double preprocess_limit(PlannerInfo* root, double tuple_fraction, int64* offset_est, int64* count_est); - +static void process_sort(Query* parse, PlannerInfo* root, PlannerTargets* plannerTargets, Plan** resultPlan, + List* tlist, List* collectiveGroupExpr, List** currentPathKeys, double limitTuples, FDWUpperRelCxt* ufdwCxt); +static void process_rowMarks(Query* parse, Plan** resultPlan, PlannerInfo* root, List** currentPathKeys, + FDWUpperRelCxt* ufdwCxt); static bool grouping_is_can_hash(Query* parse, AggClauseCosts* agg_costs); static Size compute_hash_entry_size(bool vectorized, Path* cheapest_path, int path_width, AggClauseCosts* agg_costs); static bool choose_hashed_grouping(PlannerInfo* root, double tuple_fraction, double limit_tuples, int path_width, @@ -2734,6 +2737,83 @@ static bool has_ts_func(List* tlist) } #endif +static void process_sort(Query* parse, PlannerInfo* root, PlannerTargets* plannerTargets, Plan** resultPlan, + List* tlist, List* collectiveGroupExpr, List** currentPathKeys, double limitTuples, FDWUpperRelCxt* ufdwCxt) +{ + /* + * If ORDER BY was given and we were not able to make the plan come out in + * the right order, add an explicit sort step. + */ + if (parse->sortClause) { + if (parse->is_flt_frame && parse->hasTargetSRFs) { + List* sortInputTarget = build_plan_tlist(root, plannerTargets->final_target); + (*resultPlan)->targetlist = sortInputTarget; + } + + /* + * Set group_set and again build pathkeys, data's value can be altered groupingSet after, + * so equal expr can not be deleted from pathkeys. Rebuild pathkey EquivalenceClass's ec_group_set + * is true. + */ + rebuild_pathkey_for_groupingSet(root, tlist, NULL, collectiveGroupExpr); + + /* we also need to add sort if the sub node is parallized. */ + if (!pathkeys_contained_in(root->sort_pathkeys, *currentPathKeys) || + ((*resultPlan)->dop > 1 && root->sort_pathkeys)) { + *resultPlan = (Plan*)make_sort_from_pathkeys(root, *resultPlan, root->sort_pathkeys, limitTuples); +#ifdef STREAMPLAN + if (IS_STREAM_PLAN && check_sort_for_upsert(root)) + *resultPlan = make_stream_sort(root, *resultPlan); +#endif /* STREAMPLAN */ + if (IS_PGXC_COORDINATOR && !IS_STREAM && !IsConnFromCoord()) + *resultPlan = (Plan*)create_remotesort_plan(root, *resultPlan); + *currentPathKeys = root->sort_pathkeys; + } + + if (parse->is_flt_frame && parse->hasTargetSRFs) { + *resultPlan = adjust_plan_for_srfs(root, *resultPlan, + plannerTargets->final_targets, + plannerTargets->final_targets_contain_srfs); + } + + if (ufdwCxt != NULL && ufdwCxt->state != FDW_UPPER_REL_END) { + ufdwCxt->orderExtra = (OrderPathExtraData*)palloc(sizeof(OrderPathExtraData)); + ufdwCxt->orderExtra->targetList = (*resultPlan)->targetlist; + AdvanceFDWUpperPlan(ufdwCxt, UPPERREL_ORDERED, *resultPlan); + } + } +} + +static void process_rowMarks(Query* parse, Plan** resultPlan, PlannerInfo* root, List** currentPathKeys, + FDWUpperRelCxt* ufdwCxt) +{ + /* + * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node. (Note: we + * intentionally test parse->rowMarks not root->rowMarks here. If there + * are only non-locking rowmarks, they should be handled by the + * ModifyTable node instead.) + */ + if (parse->rowMarks) { +#ifdef ENABLE_MOT + if (!IsMOTEngineUsed()) { +#endif + *resultPlan = (Plan*)make_lockrows(root, *resultPlan); +#ifdef ENABLE_MOT + } +#endif + + /* + * The result can no longer be assumed sorted, since redistribute add + * for lockrows may cause the data unsorted. + */ + *currentPathKeys = NIL; + + if (ufdwCxt != NULL && ufdwCxt->state != FDW_UPPER_REL_END) { + AdvanceFDWUpperPlan(ufdwCxt, UPPERREL_ROWMARKS, *resultPlan); + } + } +} + /* -------------------- * grouping_planner * Perform planning steps related to grouping, aggregation, etc. @@ -4681,78 +4761,14 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction) AdvanceFDWUpperPlan(ufdwCxt, UPPERREL_WINDOW, result_plan); } } - - /* - * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node. (Note: we - * intentionally test parse->rowMarks not root->rowMarks here. If there - * are only non-locking rowmarks, they should be handled by the - * ModifyTable node instead.) - */ - if (parse->rowMarks) { -#ifdef ENABLE_MOT - if (!IsMOTEngineUsed()) { -#endif - result_plan = (Plan*)make_lockrows(root, result_plan); -#ifdef ENABLE_MOT - } -#endif - - /* - * The result can no longer be assumed sorted, since redistribute add - * for lockrows may cause the data unsorted. - */ -#ifndef PGXC - current_pathkeys = NIL; -#endif - - if (ufdwCxt != NULL && ufdwCxt->state != FDW_UPPER_REL_END) { - AdvanceFDWUpperPlan(ufdwCxt, UPPERREL_ROWMARKS, result_plan); - } - } - - /* - * If ORDER BY was given and we were not able to make the plan come out in - * the right order, add an explicit sort step. - */ - if (parse->sortClause) { - if (parse->is_flt_frame && parse->hasTargetSRFs) { - List* sort_input_target = build_plan_tlist(root, planner_targets->final_target); - result_plan->targetlist = sort_input_target; - } - - /* - * Set group_set and again build pathkeys, data's value can be altered groupingSet after, - * so equal expr can not be deleted from pathkeys. Rebuild pathkey EquivalenceClass's ec_group_set - * is true. - */ - rebuild_pathkey_for_groupingSet(root, tlist, NULL, collectiveGroupExpr); - - /* we also need to add sort if the sub node is parallized. */ - if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys) || - (result_plan->dop > 1 && root->sort_pathkeys)) { - result_plan = (Plan*)make_sort_from_pathkeys(root, result_plan, root->sort_pathkeys, limit_tuples); -#ifdef PGXC -#ifdef STREAMPLAN - if (IS_STREAM_PLAN && check_sort_for_upsert(root)) - result_plan = make_stream_sort(root, result_plan); -#endif /* STREAMPLAN */ - if (IS_PGXC_COORDINATOR && !IS_STREAM && !IsConnFromCoord()) - result_plan = (Plan*)create_remotesort_plan(root, result_plan); -#endif /* PGXC */ - current_pathkeys = root->sort_pathkeys; - } - - if (parse->is_flt_frame && parse->hasTargetSRFs) { - result_plan = adjust_plan_for_srfs(root, result_plan, - planner_targets->final_targets, - planner_targets->final_targets_contain_srfs); - } - - if (ufdwCxt != NULL && ufdwCxt->state != FDW_UPPER_REL_END) { - ufdwCxt->orderExtra = (OrderPathExtraData*)palloc(sizeof(OrderPathExtraData)); - ufdwCxt->orderExtra->targetList = result_plan->targetlist; - AdvanceFDWUpperPlan(ufdwCxt, UPPERREL_ORDERED, result_plan); - } + if (IS_STREAM_PLAN) { + process_rowMarks(parse, &result_plan, root, ¤t_pathkeys, ufdwCxt); + process_sort(parse, root, planner_targets, &result_plan, tlist, collectiveGroupExpr, + ¤t_pathkeys, limit_tuples, ufdwCxt); + } else { + process_sort(parse, root, planner_targets, &result_plan, tlist, collectiveGroupExpr, + ¤t_pathkeys, limit_tuples, ufdwCxt); + process_rowMarks(parse, &result_plan, root, ¤t_pathkeys, ufdwCxt); } /* diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index d03cb4a2c..45045e7c4 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -118,6 +118,9 @@ static const int MILLISECONDS_PER_SECONDS = 1000; static Query* transformDeleteStmt(ParseState* pstate, DeleteStmt* stmt); static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt); +#ifndef DOLPHIN +static void checkUpsertTargetlist(Relation targetTable, List* updateTlist); +#endif static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upsertClause, List* resultRelations); static int count_rowexpr_columns(ParseState* pstate, Node* expr); static void transformVariableSetStmt(ParseState* pstate, VariableSetStmt* stmt); @@ -137,6 +140,7 @@ static Query* transformCreateTableAsStmt(ParseState* pstate, CreateTableAsStmt* static void CheckDeleteRelation(Relation targetrel); static void CheckUpdateRelation(Relation targetrel); static void transformVariableSetValueStmt(ParseState* pstate, VariableSetStmt* stmt); +static bool ContainColStoreWalker(Node* node, Oid targetOid); #ifdef PGXC static Query* transformExecDirectStmt(ParseState* pstate, ExecDirectStmt* stmt); static bool IsExecDirectUtilityStmt(const Node* node); @@ -731,6 +735,7 @@ Query* transformStmt(ParseState* pstate, Node* parseTree, bool isFirstNode, bool /* Mark as original query until we learn differently */ result->querySource = QSRC_ORIGINAL; result->canSetTag = true; + result->has_uservar = pstate->has_uservar; /* Mark whether synonym object is in rtables or not. */ result->hasSynonyms = pstate->p_hasSynonyms; @@ -5805,21 +5810,50 @@ static bool CheckViewBasedOnCstore(Relation targetrel) Query* viewquery = get_view_query(targetrel); ListCell* l = NULL; - - foreach (l, viewquery->jointree->fromlist) { - RangeTblRef* rtr = (RangeTblRef*)lfirst(l); - RangeTblEntry* base_rte = rt_fetch(rtr->rtindex, viewquery->rtable); - Relation base_rel = try_relation_open(base_rte->relid, AccessShareLock); - - if (RelationIsColStore(base_rel) || (RelationIsView(base_rel) && CheckViewBasedOnCstore(base_rel))) { - heap_close(base_rel, AccessShareLock); + foreach (l, viewquery->rtable) { + Node* rte = (Node*)lfirst(l); + if (ContainColStoreWalker(rte, targetrel->rd_id)) { return true; } + } + return false; +} - heap_close(base_rel, AccessShareLock); +static bool ContainColStoreWalker(Node* node, Oid targetOid) +{ + if (node == NULL) { + return false; } + uintptr_t ptrOid = (uintptr_t)targetOid; - return false; + /* Check range table entry */ + if (IsA(node, RangeTblEntry)) { + RangeTblEntry* rte = (RangeTblEntry*)node; + /* only check ordinary relation */ + if ((rte->rtekind != RTE_RELATION)) { + List* rtable = list_make1(node); + return range_table_walker(rtable, (bool (*)())ContainColStoreWalker, (void*)ptrOid, 0); + } + Assert(OidIsValid(rte->relid)); + if (rte->relid == targetOid) { + return false; + } + Relation rel = relation_open(rte->relid, AccessShareLock); + if (!RelationIsValid(rel)) { + return false; + } + if (RelationIsColStore(rel) || (RelationIsView(rel) && CheckViewBasedOnCstore(rel))) { + relation_close(rel, AccessShareLock); + return true; + } + relation_close(rel, AccessShareLock); + return false; + } + /* Check query */ + if (IsA(node, Query)) { + return query_tree_walker((Query*)node, (bool (*)())ContainColStoreWalker, (void*)ptrOid, QTW_EXAMINE_RTES); + } + return expression_tree_walker(node, (bool (*)())ContainColStoreWalker, (void*)ptrOid); } /* diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e5485985c..c0da1efe8 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -757,7 +757,7 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul opt_collation collate_option %type qualified_name insert_target OptConstrFromTable opt_index_name insert_partition_clause update_delete_partition_clause dolphin_qualified_name - qualified_trigger_name + qualified_trigger_name qualified_name_for_delete %type all_Op MathOp OptDbName %type SingleLineProcPart %type proc_arg_no_empty @@ -922,8 +922,8 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul %type index_elem table_index_elem constraint_elem fulltext_index_elem %type table_ref single_table %type joined_table -%type relation_expr -%type relation_expr_opt_alias delete_relation_expr_opt_alias +%type relation_expr relation_expr_for_delete relation_expr_common +%type relation_expr_opt_alias delete_relation_expr_opt_alias relation_expr_opt_alias_for_delete %type target_el single_set_clause set_target insert_column_item connect_by_root_expr %type tablesample_clause timecapsule_clause opt_timecapsule_clause opt_repeatable_clause end_expr start_expr @@ -30884,7 +30884,14 @@ relation_expr: $$->inhOpt = INH_DEFAULT; $$->alias = NULL; } - | dolphin_qualified_name '*' + | relation_expr_common + { + $$ = $1; + } + ; + +relation_expr_common: + dolphin_qualified_name '*' { /* inheritance query */ $$ = $1; @@ -30901,6 +30908,26 @@ relation_expr: } ; +/* used for multi delete stmt, to support writing table's name forms like t.* or schema.t.* */ +relation_expr_for_delete: + qualified_name_for_delete OptSnapshotVersion + { + /* default inheritance */ + $$ = $1; + if ($2 != NULL) + { + char *snapshot_name = (char *)palloc0(strlen($1->relname) + 1 + strlen($2) + 1); + sprintf(snapshot_name, "%s%c%s", $1->relname, DB4AI_SNAPSHOT_VERSION_DELIMITER, $2); + $$->relname = snapshot_name; + } + $$->inhOpt = INH_DEFAULT; + $$->alias = NULL; + } + | relation_expr_common + { + $$ = $1; + } + ; relation_expr_list: relation_expr { $$ = list_make1($1); } @@ -30908,7 +30935,7 @@ relation_expr_list: ; delete_relation_expr_opt_alias: - relation_expr_opt_alias %prec UMINUS + relation_expr_opt_alias_for_delete %prec UMINUS { /* * When sql_compatibility is B, name in PARTITION(name) can be @@ -30923,7 +30950,7 @@ delete_relation_expr_opt_alias: } $$ = $1; } - | relation_expr PARTITION '(' name ',' name_list ')' + | relation_expr_for_delete PARTITION '(' name ',' name_list ')' { #ifdef ENABLE_MULTIPLE_NODES const char* message = "partition syntax is not yet supported"; @@ -30939,7 +30966,7 @@ delete_relation_expr_opt_alias: $1->partitionNameList = lcons(makeString($4), $6); $$ = $1; } - | relation_expr ColId PARTITION '(' name_list ')' + | relation_expr_for_delete ColId PARTITION '(' name_list ')' { #ifdef ENABLE_MULTIPLE_NODES const char* message = "partition syntax is not yet supported"; @@ -30958,7 +30985,7 @@ delete_relation_expr_opt_alias: $1->partitionNameList = $5; $$ = $1; } - | relation_expr AS ColId PARTITION '(' name_list ')' + | relation_expr_for_delete AS ColId PARTITION '(' name_list ')' { #ifdef ENABLE_MULTIPLE_NODES const char* message = "partition syntax is not yet supported"; @@ -31047,6 +31074,66 @@ relation_expr_opt_alias: relation_expr %prec UMINUS } ; +/* used for multi delete stmt */ +relation_expr_opt_alias_for_delete: relation_expr_for_delete %prec UMINUS + { + $$ = $1; + } + | relation_expr_for_delete DolphinColId + { + Alias *alias = makeNode(Alias); + alias->aliasname = GetDolphinObjName($2->str, $2->is_quoted); + $1->alias = alias; + $$ = $1; + } + | relation_expr_for_delete AS DolphinColId + { + Alias *alias = makeNode(Alias); + alias->aliasname = GetDolphinObjName($3->str, $3->is_quoted); + $1->alias = alias; + $$ = $1; + } + | relation_expr_for_delete update_delete_partition_clause %prec UMINUS + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + $$ = $1; + } + | relation_expr_for_delete update_delete_partition_clause DolphinColId + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + Alias *alias = makeNode(Alias); + alias->aliasname = GetDolphinObjName($3->str, $3->is_quoted); + $1->alias = alias; + $$ = $1; + } + | relation_expr_for_delete update_delete_partition_clause AS DolphinColId + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + Alias *alias = makeNode(Alias); + alias->aliasname = GetDolphinObjName($4->str, $4->is_quoted); + $1->alias = alias; + $$ = $1; + } + ; + relation_expr_opt_alias_list: delete_relation_expr_opt_alias { $$ = list_make1($1); } | relation_expr_opt_alias_list ',' delete_relation_expr_opt_alias { $$ = lappend($1, $3); } @@ -36656,6 +36743,65 @@ qualified_name: } } ; +/* + * used for multi delete stmt, to support writing table's name forms like t.* or schema.t.* + * when modify qualified_name, please check whether need to sync modifications here. + */ +qualified_name_for_delete: + DolphinColId + { + $$ = makeRangeVar(NULL, GetDolphinObjName($1->str, $1->is_quoted), @1); + } + | DolphinColId dolphin_indirection + { + $$ = makeRangeVar(NULL, NULL, @1); + const char* message = "improper qualified name (too many dotted names)"; + DolphinString* first = NULL; + DolphinString* second = NULL; + switch (list_length($2)) + { + case 1: + if (IsA(linitial($2), A_Star)) { + $$->catalogname = NULL; + $$->schemaname = NULL; + $$->relname = GetDolphinObjName($1->str, $1->is_quoted);; + } else { + check_dolphin_qualified_name($2, yyscanner); + $$->catalogname = NULL; + $$->schemaname = GetDolphinSchemaName($1->str, $1->is_quoted); + first = (DolphinString*)linitial($2); + $$->relname = GetDolphinObjName(first->str, first->is_quoted); + } + break; + case 2: + if (IsA(lsecond($2), A_Star)) { + $$->catalogname = NULL; + $$->schemaname = GetDolphinSchemaName($1->str, $1->is_quoted);; + if (!(IsA(linitial($2), String))) { + parser_yyerror("syntax error"); + } + first = (DolphinString*)linitial($2); + $$->relname = GetDolphinSchemaName(first->str, first->is_quoted); + } else { + check_dolphin_qualified_name($2, yyscanner); + $$->catalogname = downcase_str($1->str, $1->is_quoted); + first = (DolphinString*)linitial($2); + second = (DolphinString*)lsecond($2); + $$->schemaname = GetDolphinSchemaName(first->str, first->is_quoted); + $$->relname = GetDolphinObjName(second->str, second->is_quoted); + } + break; + default: + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("improper qualified name (too many dotted names): %s", + NameListToString(lcons(makeString($1->str), GetNameListFromDolphinString($2)))), + parser_errposition(@1))); + break; + } + } + ; dolphin_qualified_name: DolphinColId diff --git a/contrib/dolphin/plugin_parser/hint_scan.l b/contrib/dolphin/plugin_parser/hint_scan.l index bb2468ef9..388c4720a 100644 --- a/contrib/dolphin/plugin_parser/hint_scan.l +++ b/contrib/dolphin/plugin_parser/hint_scan.l @@ -60,6 +60,7 @@ static char * litbufdup(yyscan_t yyscanner); static void addlit(const char *ytext, int yleng, yyscan_t yyscanner); static void addlitchar(unsigned char ychar, yyscan_t yyscanner); extern void hint_scanner_yyerror(const char *msg, yyscan_t yyscanner); +static void hint_scanner_yyerror_emit(const char *msg, yyscan_t yyscanner); static char *litbuf_udeescape(unsigned char escape, yyscan_t yyscanner); static unsigned int hexval(unsigned char c, yyscan_t yyscanner); static unsigned char unescape_single_char(unsigned char c, yyscan_t yyscanner); @@ -70,6 +71,7 @@ static void addunicode(pg_wchar c, yyscan_t yyscanner); static void check_string_escape_warning(unsigned char ychar, yyscan_t yyscanner); static void check_escape_warning(yyscan_t yyscanner); static void check_unicode_value(pg_wchar c, char *loc, yyscan_t yyscanner); +extern void output_hint_warning(List* warning, int lev); %} @@ -275,7 +277,7 @@ xufailed [uU]& {quotecontinue} { /* ignore */ } -<> { hint_scanner_yyerror("unterminated bit string literal", yyscanner); return 0;} +<> { hint_scanner_yyerror_emit("unterminated bit string literal", yyscanner); return 0;} {xhstart} { /* Hexadecimal bit type. @@ -296,7 +298,7 @@ xufailed [uU]& yyextra->is_hint_str = true; return XCONST; } -<> { hint_scanner_yyerror("unterminated hexadecimal string literal", yyscanner); return 0;} +<> { hint_scanner_yyerror_emit("unterminated hexadecimal string literal", yyscanner); return 0;} {xnstart} { /* National character. @@ -453,7 +455,7 @@ xufailed [uU]& /* This is only needed for \ just before EOF */ addlitchar(yytext[0], yyscanner); } -<> { hint_scanner_yyerror("unterminated quoted string", yyscanner); return 0; } +<> { hint_scanner_yyerror_emit("unterminated quoted string", yyscanner); return 0; } {dolqdelim} { yyextra->dolqstart = pstrdup(yytext); @@ -498,7 +500,7 @@ xufailed [uU]& /* This is only needed for $ inside the quoted text */ addlitchar(yytext[0], yyscanner); } -<> { hint_scanner_yyerror("unterminated dollar-quoted string", yyscanner); return 0; } +<> { hint_scanner_yyerror_emit("unterminated dollar-quoted string", yyscanner); return 0; } {xdstart} { BEGIN(xd); @@ -562,7 +564,7 @@ xufailed [uU]& {xdinside} { addlit(yytext, yyleng, yyscanner); } -<> { hint_scanner_yyerror("unterminated quoted identifier", yyscanner); return 0; } +<> { hint_scanner_yyerror_emit("unterminated quoted identifier", yyscanner); return 0; } {xufailed} { char *ident; @@ -729,6 +731,15 @@ hint_scanner_yyerror(const char *msg, yyscan_t yyscanner) return; } +static void +hint_scanner_yyerror_emit(const char* msg, yyscan_t yyscanner) +{ + output_hint_warning(u_sess->parser_cxt.hint_warning, WARNING); + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("LINE %d: %s at '%s'", yyget_lineno(yyscanner), msg, yyget_text(yyscanner)))); +} + static int process_integer_literal(const char *token, YYSTYPE *lval) { diff --git a/contrib/dolphin/plugin_parser/parse_expr.cpp b/contrib/dolphin/plugin_parser/parse_expr.cpp index 23b5677d9..41ec4be7e 100644 --- a/contrib/dolphin/plugin_parser/parse_expr.cpp +++ b/contrib/dolphin/plugin_parser/parse_expr.cpp @@ -500,6 +500,7 @@ Node *transformExprRecurse(ParseState *pstate, Node *expr) case T_UserVar: { result = transformUserVar((UserVar *)expr); + pstate->has_uservar = true; break; } @@ -1029,64 +1030,68 @@ Node* transformColumnRef(ParseState* pstate, ColumnRef* cref) AssertEreport(IsA(field1, String), MOD_OPT, ""); relname = strVal(field1); - /* Locate the referenced RTE */ - if (hasplus) { - rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, NULL); - } else { - rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, &levels_up); - } + ParseState* old_pstate = pstate; + do { + /* Locate the referenced RTE */ + if (hasplus) { + rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, NULL); + } else { + rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, &levels_up); + } - /* check if it's sequence function call, like: sequence1.nextval */ - if (rte == NULL && IsSequenceFuncCall(NULL, field1, field2)) { - return transformSequenceFuncCall(pstate, NULL, field1, field2, cref->location); + /* check if it's sequence function call, like: sequence1.nextval */ + if (rte == NULL && IsSequenceFuncCall(NULL, field1, field2)) { + return transformSequenceFuncCall(pstate, NULL, field1, field2, cref->location); - } else if (rte == NULL) { - crerr = CRERR_NO_RTE; - break; - } + } else if (rte == NULL) { + crerr = CRERR_NO_RTE; + break; + } - /* Whole-row reference? */ - if (IsA(field2, A_Star)) { - if (OrientedIsCOLorPAX(rte) || RelIsSpecifiedFTbl(rte, HDFS) || RelIsSpecifiedFTbl(rte, OBS)) { - Node* row_expr = convertStarToCRef(rte, NULL, NULL, relname, cref->location); - node = transformExprRecurse(pstate, row_expr); - } else { - node = transformWholeRowRef(pstate, rte, cref->location); + /* Whole-row reference? */ + if (IsA(field2, A_Star)) { + if (OrientedIsCOLorPAX(rte) || RelIsSpecifiedFTbl(rte, HDFS) || RelIsSpecifiedFTbl(rte, OBS)) { + Node* row_expr = convertStarToCRef(rte, NULL, NULL, relname, cref->location); + node = transformExprRecurse(pstate, row_expr); + } else { + node = transformWholeRowRef(pstate, rte, cref->location); + } + break; } - break; - } - AssertEreport(IsA(field2, String), MOD_OPT, ""); - colname = strVal(field2); + AssertEreport(IsA(field2, String), MOD_OPT, ""); + colname = strVal(field2); - if (rte->rtekind == RTE_SUBQUERY && rte->swSubExist) { - cref = fixSWNameSubLevel(rte, relname, &colname); - } + if (rte->rtekind == RTE_SUBQUERY && rte->swSubExist) { + cref = fixSWNameSubLevel(rte, relname, &colname); + } - if (pstate->p_hasStartWith || rte->swConverted) { - Node *expr = transformStartWithColumnRef(pstate, cref, &colname); + if (pstate->p_hasStartWith || rte->swConverted) { + Node *expr = transformStartWithColumnRef(pstate, cref, &colname); - /* function case, return directly */ - if (expr != NULL) { - return expr; - } + /* function case, return directly */ + if (expr != NULL) { + return expr; + } - if (strstr(colname, "@")) { - ListCell *lc = NULL; + if (strstr(colname, "@")) { + ListCell *lc = NULL; - foreach(lc, pstate->p_rtable) { - RangeTblEntry *tbl = (RangeTblEntry *)lfirst(lc); + foreach(lc, pstate->p_rtable) { + RangeTblEntry *tbl = (RangeTblEntry *)lfirst(lc); - if (tbl->relname != NULL && - strcmp(tbl->relname, "tmp_reuslt") == 0) { - rte = tbl; - break; + if (tbl->relname != NULL && + strcmp(tbl->relname, "tmp_reuslt") == 0) { + rte = tbl; + break; + } } } } - } - node = ParseColumnRef(pstate, rte, colname, cref); + node = ParseColumnRef(old_pstate, rte, colname, cref); + } while (NULL == node && NULL != (pstate = pstate->parentParseState) && DB_IS_CMPT(A_FORMAT)); + pstate = old_pstate; break; } case 3: { @@ -1099,37 +1104,41 @@ Node* transformColumnRef(ParseState* pstate, ColumnRef* cref) AssertEreport(IsA(field2, String), MOD_OPT, ""); relname = strVal(field2); - /* Locate the referenced RTE */ - if (hasplus) { - rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, NULL); - } else { - rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, &levels_up); - } + ParseState* old_pstate = pstate; + do { + /* Locate the referenced RTE */ + if (hasplus) { + rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, NULL); + } else { + rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, &levels_up); + } - /* check if it's sequence function call, like: nsp.sequence.nextval */ - if (rte == NULL && IsSequenceFuncCall(field1, field2, field3)) { - return transformSequenceFuncCall(pstate, field1, field2, field3, cref->location); + /* check if it's sequence function call, like: nsp.sequence.nextval */ + if (rte == NULL && IsSequenceFuncCall(field1, field2, field3)) { + return transformSequenceFuncCall(pstate, field1, field2, field3, cref->location); - } else if (rte == NULL) { - crerr = CRERR_NO_RTE; - break; - } + } else if (rte == NULL) { + crerr = CRERR_NO_RTE; + break; + } - /* Whole-row reference? */ - if (IsA(field3, A_Star)) { - if (OrientedIsCOLorPAX(rte) || RelIsSpecifiedFTbl(rte, HDFS) || RelIsSpecifiedFTbl(rte, OBS)) { - Node* row_expr = convertStarToCRef(rte, NULL, nspname, relname, cref->location); - node = transformExprRecurse(pstate, row_expr); - } else { - node = transformWholeRowRef(pstate, rte, cref->location); + /* Whole-row reference? */ + if (IsA(field3, A_Star)) { + if (OrientedIsCOLorPAX(rte) || RelIsSpecifiedFTbl(rte, HDFS) || RelIsSpecifiedFTbl(rte, OBS)) { + Node* row_expr = convertStarToCRef(rte, NULL, nspname, relname, cref->location); + node = transformExprRecurse(pstate, row_expr); + } else { + node = transformWholeRowRef(pstate, rte, cref->location); + } + break; } - break; - } - AssertEreport(IsA(field3, String), MOD_OPT, ""); - colname = strVal(field3); + AssertEreport(IsA(field3, String), MOD_OPT, ""); + colname = strVal(field3); - node = ParseColumnRef(pstate, rte, colname, cref); + node = ParseColumnRef(old_pstate, rte, colname, cref); + } while (NULL == node && NULL != (pstate = pstate->parentParseState) && DB_IS_CMPT(A_FORMAT)); + pstate = old_pstate; break; } case 4: { @@ -1154,37 +1163,41 @@ Node* transformColumnRef(ParseState* pstate, ColumnRef* cref) break; } - /* Locate the referenced RTE */ - if (hasplus) { - rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, NULL); - } else { - rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, &levels_up); - } + ParseState* old_pstate = pstate; + do { + /* Locate the referenced RTE */ + if (hasplus) { + rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, NULL); + } else { + rte = refnameRangeTblEntry(pstate, nspname, relname, cref->location, &levels_up); + } - /* check if it's sequence function call, like: nsp.sequence.nextval */ - if (rte == NULL && IsSequenceFuncCall(field2, field3, field4)) { - return transformSequenceFuncCall(pstate, field2, field3, field4, cref->location); + /* check if it's sequence function call, like: nsp.sequence.nextval */ + if (rte == NULL && IsSequenceFuncCall(field2, field3, field4)) { + return transformSequenceFuncCall(pstate, field2, field3, field4, cref->location); - } else if (rte == NULL) { - crerr = CRERR_NO_RTE; - break; - } + } else if (rte == NULL) { + crerr = CRERR_NO_RTE; + break; + } - /* Whole-row reference? */ - if (IsA(field4, A_Star)) { - if (OrientedIsCOLorPAX(rte) || RelIsSpecifiedFTbl(rte, HDFS) || RelIsSpecifiedFTbl(rte, OBS)) { - Node* row_expr = convertStarToCRef(rte, catname, nspname, relname, cref->location); - node = transformExprRecurse(pstate, row_expr); - } else { - node = transformWholeRowRef(pstate, rte, cref->location); + /* Whole-row reference? */ + if (IsA(field4, A_Star)) { + if (OrientedIsCOLorPAX(rte) || RelIsSpecifiedFTbl(rte, HDFS) || RelIsSpecifiedFTbl(rte, OBS)) { + Node* row_expr = convertStarToCRef(rte, catname, nspname, relname, cref->location); + node = transformExprRecurse(pstate, row_expr); + } else { + node = transformWholeRowRef(pstate, rte, cref->location); + } + break; } - break; - } - AssertEreport(IsA(field4, String), MOD_OPT, ""); - colname = strVal(field4); + AssertEreport(IsA(field4, String), MOD_OPT, ""); + colname = strVal(field4); - node = ParseColumnRef(pstate, rte, colname, cref); + node = ParseColumnRef(old_pstate, rte, colname, cref); + } while (NULL == node && NULL != (pstate = pstate->parentParseState) && DB_IS_CMPT(A_FORMAT)); + pstate = old_pstate; break; } default: diff --git a/contrib/dolphin/plugin_parser/parse_func.cpp b/contrib/dolphin/plugin_parser/parse_func.cpp index 1b9574b77..585bddc74 100644 --- a/contrib/dolphin/plugin_parser/parse_func.cpp +++ b/contrib/dolphin/plugin_parser/parse_func.cpp @@ -39,8 +39,10 @@ static Oid FuncNameAsType(List* funcname); static Node* ParseComplexProjection(ParseState* pstate, char* funcname, Node* first_arg, int location); -static List* GetDefaultVale(Oid funcoid, const int* argnumbers, int ndargs); +static List* GetDefaultVale(Oid funcoid, const int* argnumbers, int ndargs, bool* defaultValid); static Oid cl_get_input_param_original_type(Oid func_oid, int argno); +static bool CheckDefaultArgsPosition(int2vector* defaultargpos, int pronargdefaults, int ndargs, + int pronallargs, int pronargs, HeapTuple procTup); /* * Parse a function call @@ -1887,7 +1889,14 @@ FuncDetailCode func_get_detail(List* funcname, List* fargs, List* fargnames, int ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmodule(MOD_OPT), errmsg("not enough default arguments"))); - *argdefaults = GetDefaultVale(*funcid, best_candidate->argnumbers, best_candidate->ndargs); + bool defaultValid = true; + *argdefaults = GetDefaultVale(*funcid, best_candidate->argnumbers, best_candidate->ndargs, &defaultValid); + if (!defaultValid) { + ereport(DEBUG3, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_OPT), errmsg("default arguments pos is not right."))); + return FUNCDETAIL_NOTFOUND; + } } if (pform->proisagg) result = FUNCDETAIL_AGGREGATE; @@ -2347,7 +2356,7 @@ Oid LookupAggNameTypeNames(List* aggname, List* argtypes, bool noError) } // fetch default args if caller wants 'em -static List* GetDefaultVale(Oid funcoid, const int* argnumbers, int ndargs) +static List* GetDefaultVale(Oid funcoid, const int* argnumbers, int ndargs, bool* defaultValid) { HeapTuple tuple; Form_pg_proc formproc; @@ -2444,6 +2453,9 @@ static List* GetDefaultVale(Oid funcoid, const int* argnumbers, int ndargs) while (ndelete-- > 0) { defaults = list_delete_first(defaults); } + + *defaultValid = CheckDefaultArgsPosition(defaultargpos, pronargdefaults, ndargs, + pronallargs, pronargs, tuple); } if (argtypes != NULL) @@ -2463,6 +2475,49 @@ static List* GetDefaultVale(Oid funcoid, const int* argnumbers, int ndargs) return defaults; } +static bool CheckDefaultArgs(int2vector* defaultargpos, int pronargdefaults, int pronallargs, + int pronargs, HeapTuple proctup) +{ + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT || PROC_UNCHECK_DEFAULT_PARAM) { + return true; + } + // if the func has out param, but did not enable_out_param_override, we don't check the defaultpos + if (pronallargs > pronargs && !enable_out_param_override()) { + return true; + } + // the pg_catalog's func can omit out param, so we don't check the defaultpos + bool isnull = false; + Datum namespaceDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_pronamespace, &isnull); + if (!isnull && IsAformatStyleFunctionOid(DatumGetObjectId(namespaceDatum))) { + return true; + } + // if the func has the default args without position, we consider the position is at the end, no error + if (defaultargpos->dim1 == 0 && pronargdefaults > 0) { + return true; + } + return false; +} + +/* + * Check whether the parameter is in the appropriate position with default values + */ +static bool CheckDefaultArgsPosition(int2vector* defaultargpos, int pronargdefaults, int ndargs, + int pronallargs, int pronargs, HeapTuple proctup) +{ + if (CheckDefaultArgs(defaultargpos, pronargdefaults, pronallargs, pronargs, proctup)) { + return true; + } + // Check whether the defaultpos are at the end of the parameter list from back to front + int argIndex = pronallargs - 1; + int defaultposIndex = pronargdefaults - 1; + for (; argIndex >= pronallargs - ndargs; argIndex--, defaultposIndex--) { + if (defaultargpos->values[defaultposIndex] != argIndex) { + return false; + } + } + return true; +} + /* * Replace column managed type with original type * to identify overloaded functions diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp index 958f55d81..e931f02e0 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_comp.cpp @@ -805,6 +805,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, func->fn_searchpath->addCatalog = true; func->fn_searchpath->addTemp = true; func->ns_top = curr_compile->ns_top; + func->guc_stat = u_sess->utils_cxt.behavior_compat_flags; if (is_dml_trigger) func->fn_is_trigger = PLPGSQL_DML_TRIGGER; diff --git a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp index 1f6e01e37..0700a95ff 100644 --- a/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp +++ b/contrib/dolphin/plugin_pl/plpgsql/src/pl_handler.cpp @@ -2381,4 +2381,4 @@ static char* replace_html_entity(const char* input) } result[j] = '\0'; return result; -} \ No newline at end of file +} diff --git a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp index 81b42cd48..c6716806c 100644 --- a/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/jsonfuncs.cpp @@ -3199,8 +3199,8 @@ addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb) elog(ERROR, "unexpected parent oe nested structure."); } } else { - while((type == JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { - if (type = WJB_ELEM || type == WJB_KEY || type == WJB_VALUE) { + while ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { + if (type == WJB_ELEM || type == WJB_KEY || type == WJB_VALUE) { (void)pushJsonbValue(jbps, type, &v); } else { (void)pushJsonbValue(jbps, type, NULL); @@ -3345,7 +3345,6 @@ static void setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_null if (!done && k.string.len == VARSIZE_ANY_EXHDR(pathelem) && memcmp(k.string.val, VARDATA_ANY(pathelem), k.string.len) == 0) { - done = true; if (level == path_len - 1) { /* @@ -3364,6 +3363,7 @@ static void setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_null (void) pushJsonbValue(st, WJB_KEY, &k); addJsonbToParseState(st, newval); } + done = true; } else { (void) pushJsonbValue(st, r, &k); setPath(it, path_elems, path_nulls, path_len, @@ -3473,7 +3473,6 @@ static void setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls int r; if (i == idx && level < path_len) { - done = true; if (level == path_len - 1) { r = JsonbIteratorNext(it, &v, true); /* skip */ @@ -3491,6 +3490,7 @@ static void setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_REPLACE)) addJsonbToParseState(st, newval); + done = true; } else (void) setPath(it, path_elems, path_nulls, path_len, st, level + 1, newval, op_type); } else { diff --git a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp index 4a268e915..f52ceac08 100644 --- a/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp +++ b/contrib/dolphin/plugin_utils/adt/pgstatfuncs.cpp @@ -85,6 +85,8 @@ #include "ddes/dms/ss_dms_recovery.h" #include "utils/json.h" #include "utils/jsonapi.h" +#include "access/ondemand_extreme_rto/page_redo.h" + #include "plugin_postgres.h" #define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32*)&(var)))) @@ -97,6 +99,7 @@ #define DISPLACEMENTS_VALUE 32 #define MAX_DURATION_TIME 60 #define DSS_IO_STAT_COLUMN_NUM 3 +#define ONDEMAND_RECOVERY_STAT_COLUMN_NUM 10 const uint32 INDEX_STATUS_VIEW_COL_NUM = 3; @@ -14769,6 +14772,113 @@ Datum track_memory_context_detail(PG_FUNCTION_ARGS) } } +Datum get_ondemand_recovery_status(PG_FUNCTION_ARGS) +{ + if (!ENABLE_ONDEMAND_RECOVERY) { + ereport(ERROR, (errmsg("This function only supports when enable ss_enable_ondemand_recovery."))); + } + Datum result; + TupleDesc tupdesc; + ondemand_recovery_stat stat; + errno_t errorno = EOK; + + ondemand_extreme_rto::GetOndemandRecoveryStatus(&stat); + // tuple header + int i = 1; + tupdesc = CreateTemplateTupleDesc(ONDEMAND_RECOVERY_STAT_COLUMN_NUM, false); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "primary_checkpoint_redo_lsn", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "realtime_build_replayed_lsn", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "hashmap_used_blocks", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "hashmap_total_blocks", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "trxn_queue_blocks", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "seg_queue_blocks", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "in_ondemand_recovery", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "ondemand_recovery_status", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "realtime_build_status", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "recovery_pause_status", TEXTOID, -1, 0); + + tupdesc = BlessTupleDesc(tupdesc); + + // tuple body + char redoLocation[MAXFNAMELEN]; + char replayedLocation[MAXFNAMELEN]; + + errorno = snprintf_s(redoLocation, sizeof(redoLocation), sizeof(redoLocation) - 1, "%X/%X", + (uint32)(stat.checkpointPtr >> 32), (uint32)stat.checkpointPtr); + securec_check_ss(errorno, "", ""); + errorno = snprintf_s(replayedLocation, sizeof(replayedLocation), sizeof(replayedLocation) - 1, "%X/%X", + (uint32)(stat.replayedPtr >> 32), (uint32)stat.replayedPtr); + securec_check_ss(errorno, "", ""); + + Datum values[ONDEMAND_RECOVERY_STAT_COLUMN_NUM]; + bool nulls[ONDEMAND_RECOVERY_STAT_COLUMN_NUM] = {false}; + i = 0; + values[i++] = CStringGetTextDatum(redoLocation); + values[i++] = CStringGetTextDatum(replayedLocation); + values[i++] = UInt32GetDatum(stat.hmpUsedBlkNum); + values[i++] = UInt32GetDatum(stat.hmpTotalBlkNum); + values[i++] = UInt32GetDatum(stat.trxnQueueNum); + values[i++] = UInt32GetDatum(stat.segQueueNum); + values[i++] = BoolGetDatum(stat.inOndemandRecovery); + + switch (stat.ondemandRecoveryStatus) { + case CLUSTER_IN_ONDEMAND_BUILD: + values[i++] = CStringGetTextDatum("ONDEMAND_RECOVERY_BUILD"); + break; + case CLUSTER_IN_ONDEMAND_REDO: + values[i++] = CStringGetTextDatum("ONDEMAND_RECOVERY_REDO"); + break; + case CLUSTER_NORMAL: + values[i++] = CStringGetTextDatum("NORMAL"); + break; + default: + ereport(ERROR, (errmsg("Invalid ondemand recovery status."))); + break; + } + + switch (stat.realtimeBuildStatus) { + case DISABLED: + values[i++] = CStringGetTextDatum("DISABLED"); + break; + case BUILD_NORMAL: + values[i++] = CStringGetTextDatum("BUILD_NORMAL"); + break; + case BUILD_TO_DISABLED: + values[i++] = CStringGetTextDatum("BUILD_TO_DISABLED"); + break; + case BUILD_TO_REDO: + values[i++] = CStringGetTextDatum("BUILD_TO_REDO"); + break; + default: + ereport(ERROR, (errmsg("Invalid realtime build status."))); + break; + } + + switch (stat.recoveryPauseStatus) { + case NOT_PAUSE: + values[i] = CStringGetTextDatum("NOT PAUSE"); + break; + case PAUSE_FOR_SYNC_REDO: + values[i] = CStringGetTextDatum("PAUSE(for sync record)"); + break; + case PAUSE_FOR_PRUNE_HASHMAP: + values[i] = CStringGetTextDatum("PAUSE(for hashmap full)"); + break; + case PAUSE_FOR_PRUNE_TRXN_QUEUE: + values[i] = CStringGetTextDatum("PAUSE(for trxn queue full)"); + break; + case PAUSE_FOR_PRUNE_SEG_QUEUE: + values[i] = CStringGetTextDatum("PAUSE(for seg queue full)"); + break; + default: + ereport(ERROR, (errmsg("Invalid recovery pause status."))); + break; + } + + HeapTuple heap_tuple = heap_form_tuple(tupdesc, values, nulls); + result = HeapTupleGetDatum(heap_tuple); + PG_RETURN_DATUM(result); +} /* * @Description : Get the statistical information for DSS IO, including read bytes, write bytes and io times. @@ -15008,6 +15118,7 @@ Datum query_node_reform_info(PG_FUNCTION_ARGS) ss_reform_info_t reform_info = iterate->reform_info; for (uint64 i = iterate->iterate_idx; i < DMS_MAX_INSTANCE; i++) { if (!((reform_info.old_bitmap | reform_info.new_bitmap) & (((uint64)1) << i))) { + iterate->iterate_idx++; continue; } @@ -15015,7 +15126,7 @@ Datum query_node_reform_info(PG_FUNCTION_ARGS) char tmp_buf[MAX_BUF_SIZE] = {0}; Datum values[10]; bool nulls[10] = {false}; - values[0] = UInt16GetDatum(i); + values[0] = UInt64GetDatum(i); if (i == (uint64)SS_MY_INST_ID) { switch (reform_info.reform_type) { case DMS_REFORM_TYPE_FOR_NORMAL_OPENGAUSS: @@ -15325,7 +15436,7 @@ Datum query_node_reform_info_from_dms(PG_FUNCTION_ARGS) dms_info_id_e reform_info_id = PG_GETARG_INT64(0) == 0 ? dms_info_id_e::DMS_INFO_REFORM_LAST : dms_info_id_e::DMS_INFO_REFORM_CURRENT; if (!ENABLE_DMS) { - ereport(ERROR, (errmsg("[SS] cannot query query_node_reform_info without shared storage deployment!"))); + ereport(ERROR, (errmsg("[SS] cannot query query_node_reform_info_from_dms without shared storage deployment!"))); } FuncCallContext *funcctx = NULL; @@ -15424,7 +15535,7 @@ Datum query_all_drc_info(PG_FUNCTION_ARGS) { int type = PG_GETARG_INT64(0) == 0 ? en_drc_res_type::DRC_RES_PAGE_TYPE : en_drc_res_type::DRC_RES_LOCK_TYPE; if (!ENABLE_DMS) { - ereport(ERROR, (errmsg("[SS] cannot query query_node_reform_info without shared storage deployment!"))); + ereport(ERROR, (errmsg("[SS] cannot query query_all_drc_info without shared storage deployment!"))); } if (!SS_PRIMARY_MODE) { ereport(WARNING, (errmsg("[SS] query only in primary node. current node is standby!"))); -- Gitee From 422bebd16a7963e196f508558b44116ad0b27747 Mon Sep 17 00:00:00 2001 From: lukeman Date: Wed, 17 Jan 2024 19:56:03 +0800 Subject: [PATCH 198/434] =?UTF-8?q?=E5=A4=84=E7=90=86issue:=20acos?= =?UTF-8?q?=E4=B8=8D=E6=94=AF=E6=8C=81boolean=E3=80=81year=E4=BB=A5?= =?UTF-8?q?=E5=8F=8Ajson=E7=B1=BB=E5=9E=8B=EF=BC=8Cmysql=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/math_func.out | 80 +++++++++++++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 3 + .../dolphin/sql/builtin_funcs/math_func.sql | 15 ++++ .../upgrade_script/dolphin--2.0--3.0.sql | 6 ++ 4 files changed, 104 insertions(+) diff --git a/contrib/dolphin/expected/builtin_funcs/math_func.out b/contrib/dolphin/expected/builtin_funcs/math_func.out index 799cdbe84..263abdf16 100644 --- a/contrib/dolphin/expected/builtin_funcs/math_func.out +++ b/contrib/dolphin/expected/builtin_funcs/math_func.out @@ -201,6 +201,85 @@ select * from test_double_exp order by 1; 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 2.71828182845905 | 1096.63315842846 | 2.71828182845905 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 3.42122953628967 | 2.71828182845905 | 148.413159102577 | 1 (2 rows) +-- acos math function +reset dolphin.sql_mode; +select acos(`int1`), acos(`uint1`), acos(`int2`), acos(`uint2`), acos(`int4`), acos(`uint4`), acos(`int8`), acos(`uint8`), acos(`float4`), acos(`float8`), acos(`numeric`),acos(`bit1`), acos(`bit64`), acos(`boolean`), acos(`date`), acos(`time`), acos(`time(4)`), acos(`datetime`),acos(`datetime(4)`), acos(`timestamp`), acos(`timestamp(4)`), acos(`year`), acos(`char`), acos(`varchar`), acos(`binary`), acos(`varbinary`), acos(`tinyblob`), acos(`blob`), acos(`mediumblob`), acos(`longblob`), acos(`text`), acos(`enum_t`), acos(`set_t`), acos(`json`) from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a " +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: acos +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: acos + acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos | acos +------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+------+----------------- + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | 0 | | | | | | | | | | | | | | | | | | 0 | | 1.5707963267949 +(1 row) + +create table test_double_acos(d1 double, d2 double, d3 double, d4 double, d5 double, d6 double, d7 double, d8 double, d9 double, d10 double, d11 double, d12 double, d13 double, d14 double, d15 double, d16 double, d17 double, d18 double, d19 double, d20 double, d21 double, d22 double, d23 double, d24 double, d25 double, d26 double, d27 double, d28 double, d29 double, d30 double, d31 double, d32 double, d33 double, d34 double); +insert ignore into test_double_acos select acos(`int1`), acos(`uint1`), acos(`int2`), acos(`uint2`), acos(`int4`), acos(`uint4`), acos(`int8`), acos(`uint8`), acos(`float4`), acos(`float8`), acos(`numeric`),acos(`bit1`), acos(`bit64`),acos(`boolean`), acos(`date`), acos(`time`), acos(`time(4)`), acos(`datetime`),acos(`datetime(4)`), acos(`timestamp`), acos(`timestamp(4)`), acos(`year`), acos(`char`), acos(`varchar`), acos(`binary`),acos(`varbinary`), acos(`tinyblob`), acos(`blob`), acos(`mediumblob`), acos(`longblob`), acos(`text`), acos(`enum_t`), acos(`set_t`), acos(`json`) from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a " +CONTEXT: referenced column: d23 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d24 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d25 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d26 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d27 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d28 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d29 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d30 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d31 +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: d34 +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +insert into test_double_acos select acos(`int1`), acos(`uint1`), acos(`int2`), acos(`uint2`), acos(`int4`), acos(`uint4`), acos(`int8`), acos(`uint8`), acos(`float4`), acos(`float8`), acos(`numeric`),acos(`bit1`), acos(`bit64`),acos(`boolean`), acos(`date`), acos(`time`), acos(`time(4)`), acos(`datetime`),acos(`datetime(4)`), acos(`timestamp`), acos(`timestamp(4)`), acos(`year`), acos(`char`), acos(`varchar`), acos(`binary`),acos(`varbinary`), acos(`tinyblob`), acos(`blob`), acos(`mediumblob`), acos(`longblob`), acos(`text`), acos(`enum_t`), acos(`set_t`), acos(`json`) from test_type_table; +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d23 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d24 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d25 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d26 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d27 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d28 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d29 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d30 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: d31 +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: d34 +select * from test_double_acos order by 1; + d1 | d2 | d3 | d4 | d5 | d6 | d7 | d8 | d9 | d10 | d11 | d12 | d13 | d14 | d15 | d16 | d17 | d18 | d19 | d20 | d21 | d22 | d23 | d24 | d25 | d26 | d27 | d28 | d29 | d30 | d31 | d32 | d33 | d34 +----+----+----+----+----+----+----+----+----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+----------------- + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | 0 | | | | | | | | | | | | | | | | | | 0 | | 1.5707963267949 + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | 0 | | | | | | | | | | | | | | | | | | 0 | | 1.5707963267949 +(2 rows) + -- ln math function select ln(`int1`), @@ -278,6 +357,7 @@ select exp(-1000); 0 (1 row) +drop table if exists test_double_acos; drop table if exists test_double_exp; drop table if exists test_double_degrees; drop table if exists test_type_table; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 753417018..49f4ccb82 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -368,6 +368,9 @@ DROP FUNCTION IF EXISTS pg_catalog.binary_cmp(binary, binary); DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); +DROP FUNCTION IF EXISTS pg_catalog.acos(boolean); +DROP FUNCTION IF EXISTS pg_catalog.acos(year); +DROP FUNCTION IF EXISTS pg_catalog.acos(json); DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(bit); DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(binary); diff --git a/contrib/dolphin/sql/builtin_funcs/math_func.sql b/contrib/dolphin/sql/builtin_funcs/math_func.sql index e83a91a51..03ef27d62 100644 --- a/contrib/dolphin/sql/builtin_funcs/math_func.sql +++ b/contrib/dolphin/sql/builtin_funcs/math_func.sql @@ -76,6 +76,20 @@ insert into test_double_exp select exp(`int1`), exp(`uint1`), exp(`int2`), exp(` select * from test_double_exp order by 1; +-- acos math function +reset dolphin.sql_mode; +select acos(`int1`), acos(`uint1`), acos(`int2`), acos(`uint2`), acos(`int4`), acos(`uint4`), acos(`int8`), acos(`uint8`), acos(`float4`), acos(`float8`), acos(`numeric`),acos(`bit1`), acos(`bit64`), acos(`boolean`), acos(`date`), acos(`time`), acos(`time(4)`), acos(`datetime`),acos(`datetime(4)`), acos(`timestamp`), acos(`timestamp(4)`), acos(`year`), acos(`char`), acos(`varchar`), acos(`binary`), acos(`varbinary`), acos(`tinyblob`), acos(`blob`), acos(`mediumblob`), acos(`longblob`), acos(`text`), acos(`enum_t`), acos(`set_t`), acos(`json`) from test_type_table; + +create table test_double_acos(d1 double, d2 double, d3 double, d4 double, d5 double, d6 double, d7 double, d8 double, d9 double, d10 double, d11 double, d12 double, d13 double, d14 double, d15 double, d16 double, d17 double, d18 double, d19 double, d20 double, d21 double, d22 double, d23 double, d24 double, d25 double, d26 double, d27 double, d28 double, d29 double, d30 double, d31 double, d32 double, d33 double, d34 double); + +insert ignore into test_double_acos select acos(`int1`), acos(`uint1`), acos(`int2`), acos(`uint2`), acos(`int4`), acos(`uint4`), acos(`int8`), acos(`uint8`), acos(`float4`), acos(`float8`), acos(`numeric`),acos(`bit1`), acos(`bit64`),acos(`boolean`), acos(`date`), acos(`time`), acos(`time(4)`), acos(`datetime`),acos(`datetime(4)`), acos(`timestamp`), acos(`timestamp(4)`), acos(`year`), acos(`char`), acos(`varchar`), acos(`binary`),acos(`varbinary`), acos(`tinyblob`), acos(`blob`), acos(`mediumblob`), acos(`longblob`), acos(`text`), acos(`enum_t`), acos(`set_t`), acos(`json`) from test_type_table; + +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; + +insert into test_double_acos select acos(`int1`), acos(`uint1`), acos(`int2`), acos(`uint2`), acos(`int4`), acos(`uint4`), acos(`int8`), acos(`uint8`), acos(`float4`), acos(`float8`), acos(`numeric`),acos(`bit1`), acos(`bit64`),acos(`boolean`), acos(`date`), acos(`time`), acos(`time(4)`), acos(`datetime`),acos(`datetime(4)`), acos(`timestamp`), acos(`timestamp(4)`), acos(`year`), acos(`char`), acos(`varchar`), acos(`binary`),acos(`varbinary`), acos(`tinyblob`), acos(`blob`), acos(`mediumblob`), acos(`longblob`), acos(`text`), acos(`enum_t`), acos(`set_t`), acos(`json`) from test_type_table; + +select * from test_double_acos order by 1; + -- ln math function select ln(`int1`), @@ -117,6 +131,7 @@ select exp(709); select exp(710); select exp(-1000); +drop table if exists test_double_acos; drop table if exists test_double_exp; drop table if exists test_double_degrees; drop table if exists test_type_table; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 04f5f8104..472a41a88 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1003,11 +1003,17 @@ CREATE OPERATOR pg_catalog.>=(leftarg = longblob, rightarg = mediumblob, procedu DROP FUNCTION IF EXISTS pg_catalog.degrees(boolean); DROP FUNCTION IF EXISTS pg_catalog.degrees(year); DROP FUNCTION IF EXISTS pg_catalog.degrees(json); +DROP FUNCTION IF EXISTS pg_catalog.acos(boolean); +DROP FUNCTION IF EXISTS pg_catalog.acos(year); +DROP FUNCTION IF EXISTS pg_catalog.acos(json); DROP FUNCTION IF EXISTS pg_catalog.exp(year); DROP FUNCTION IF EXISTS pg_catalog.exp(json); CREATE OR REPLACE FUNCTION pg_catalog.degrees(boolean) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(year) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; CREATE OR REPLACE FUNCTION pg_catalog.degrees(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.degrees(cast($1 as double precision))'; +CREATE OR REPLACE FUNCTION pg_catalog.acos(boolean) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.acos(cast($1 as double precision))'; +CREATE OR REPLACE FUNCTION pg_catalog.acos(year) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.acos(cast($1 as double precision))'; +CREATE OR REPLACE FUNCTION pg_catalog.acos(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.acos(cast($1 as double precision))'; DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(bit) CASCADE; DROP FUNCTION IF EXISTS pg_catalog.inet_ntoa(binary) CASCADE; -- Gitee From 7b6bc69e624293094202210ae75f4277618a5c6c Mon Sep 17 00:00:00 2001 From: chenbd Date: Thu, 18 Jan 2024 17:12:58 +0800 Subject: [PATCH 199/434] fix natural join column name --- contrib/dolphin/expected/uint_join.out | 43 +++++++++++++++++++ .../dolphin/plugin_parser/parse_clause.cpp | 22 +++++++++- contrib/dolphin/sql/uint_join.sql | 26 +++++++++++ 3 files changed, 90 insertions(+), 1 deletion(-) diff --git a/contrib/dolphin/expected/uint_join.out b/contrib/dolphin/expected/uint_join.out index 961986b9b..440681b9d 100644 --- a/contrib/dolphin/expected/uint_join.out +++ b/contrib/dolphin/expected/uint_join.out @@ -106,3 +106,46 @@ NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table t1 drop cascades to table t2 reset current_schema; +create schema scott; +drop table if exists scott.emp; +NOTICE: table "emp" does not exist, skipping +drop table if exists scott.dept; +NOTICE: table "dept" does not exist, skipping +create table scott.dept(DEPTNO number(2) constraint pk_dept primary key,mgr number(4), dname varchar2(14) ,loc varchar2(13) ); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_dept" for table "dept" +create table scott.emp +(empno number(4) constraint pk_emp primary key, +dept varchar2(10), +job varchar2(9), +mgr number(4), +hiredate date, +sal number(7,2), +comm number(7,2), +deptno number(2) constraint fk_deptno references scott.dept); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_emp" for table "emp" +select * from scott.emp join scott.dept on emp.deptno = dept.deptno; + empno | dept | job | mgr | hiredate | sal | comm | deptno | DEPTNO | mgr | dname | loc +-------+------+-----+-----+----------+-----+------+--------+--------+-----+-------+----- +(0 rows) + +--for converage +select * from scott.emp join scott.dept using(deptno); + deptno | empno | dept | job | mgr | hiredate | sal | comm | mgr | dname | loc +--------+-------+------+-----+-----+----------+-----+------+-----+-------+----- +(0 rows) + +select * from scott.emp join scott.dept using(deptno, mgr); + deptno | mgr | empno | dept | job | hiredate | sal | comm | dname | loc +--------+-----+-------+------+-----+----------+-----+------+-------+----- +(0 rows) + +select * from scott.emp natural join scott.dept; + mgr | deptno | empno | dept | job | hiredate | sal | comm | dname | loc +-----+--------+-------+------+-----+----------+-----+------+-------+----- +(0 rows) + +drop schema scott cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table scott.dept +drop cascades to table scott.emp +reset current_schema; diff --git a/contrib/dolphin/plugin_parser/parse_clause.cpp b/contrib/dolphin/plugin_parser/parse_clause.cpp index 4c7347148..d3f60c828 100644 --- a/contrib/dolphin/plugin_parser/parse_clause.cpp +++ b/contrib/dolphin/plugin_parser/parse_clause.cpp @@ -445,7 +445,11 @@ static void extractRemainingColumns( foreach (cnames, common_colnames) { char* ccolname = strVal(lfirst(cnames)); +#ifdef DOLPHIN + if (strcasecmp(colname, ccolname) == 0) { +#else if (strcmp(colname, ccolname) == 0) { +#endif match = true; break; } @@ -1136,7 +1140,11 @@ Node* transformFromClauseItem(ParseState* pstate, Node* n, RangeTblEntry** top_r foreach (rx, r_colnames) { char* r_colname = strVal(lfirst(rx)); +#ifdef DOLPHIN + if (strcasecmp(l_colname, r_colname) == 0) { +#else if (strcmp(l_colname, r_colname) == 0) { +#endif m_name = makeString(l_colname); break; } @@ -1183,8 +1191,12 @@ Node* transformFromClauseItem(ParseState* pstate, Node* n, RangeTblEntry** top_r /* Check for USING(foo,foo) */ foreach (col, res_colnames) { char* res_colname = strVal(lfirst(col)); - +#ifdef DOLPHIN + /* match column names in dolphin, should be case insensitive */ + if (strcasecmp(res_colname, u_colname) == 0) { +#else if (strcmp(res_colname, u_colname) == 0) { +#endif ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column name \"%s\" appears more than once in USING clause", u_colname))); @@ -1196,7 +1208,11 @@ Node* transformFromClauseItem(ParseState* pstate, Node* n, RangeTblEntry** top_r foreach (col, l_colnames) { char* l_colname = strVal(lfirst(col)); +#ifdef DOLPHIN + if (strcasecmp(l_colname, u_colname) == 0) { +#else if (strcmp(l_colname, u_colname) == 0) { +#endif if (l_index >= 0) ereport(ERROR, (errcode(ERRCODE_AMBIGUOUS_COLUMN), @@ -1217,7 +1233,11 @@ Node* transformFromClauseItem(ParseState* pstate, Node* n, RangeTblEntry** top_r foreach (col, r_colnames) { char* r_colname = strVal(lfirst(col)); +#ifdef DOLPHIN + if (strcasecmp(r_colname, u_colname) == 0) { +#else if (strcmp(r_colname, u_colname) == 0) { +#endif if (r_index >= 0) { ereport(ERROR, (errcode(ERRCODE_AMBIGUOUS_COLUMN), diff --git a/contrib/dolphin/sql/uint_join.sql b/contrib/dolphin/sql/uint_join.sql index 32927eb90..2fb7f8c77 100644 --- a/contrib/dolphin/sql/uint_join.sql +++ b/contrib/dolphin/sql/uint_join.sql @@ -22,4 +22,30 @@ select /*+ hashjoin(t1 t2)*/ * from t1 join t2; select /*+ mergejoin(t1 t2)*/ * from t1 join t2; drop schema uint_join cascade; +reset current_schema; + +create schema scott; + +drop table if exists scott.emp; +drop table if exists scott.dept; + +create table scott.dept(DEPTNO number(2) constraint pk_dept primary key,mgr number(4), dname varchar2(14) ,loc varchar2(13) ); + +create table scott.emp +(empno number(4) constraint pk_emp primary key, +dept varchar2(10), +job varchar2(9), +mgr number(4), +hiredate date, +sal number(7,2), +comm number(7,2), +deptno number(2) constraint fk_deptno references scott.dept); + +select * from scott.emp join scott.dept on emp.deptno = dept.deptno; +--for converage +select * from scott.emp join scott.dept using(deptno); +select * from scott.emp join scott.dept using(deptno, mgr); +select * from scott.emp natural join scott.dept; + +drop schema scott cascade; reset current_schema; \ No newline at end of file -- Gitee From 1d4aeab03e0d4af47f08a25ed1cb07f6568af44b Mon Sep 17 00:00:00 2001 From: Mijamind Date: Thu, 18 Jan 2024 17:20:09 +0800 Subject: [PATCH 200/434] =?UTF-8?q?=E3=80=90=E8=B5=84=E6=BA=90=E6=B1=A0?= =?UTF-8?q?=E5=8C=96=E3=80=91SPQ=E4=BF=AE=E5=A4=8Dswitchover=E9=98=B6?= =?UTF-8?q?=E6=AE=B5=E4=BA=A7=E7=94=9Fcore=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/spq_plugin/include/spqplugin.h | 1 + contrib/spq_plugin/src/spq_opt.cpp | 4 +--- .../libspqos/src/error/CAutoExceptionStack.cpp | 3 +++ .../src/spq_optimizer_util/SPQOptimizer.cpp | 17 +++++++++++++++-- .../src/spq_optimizer_util/spq_wrappers.cpp | 3 +-- contrib/spq_plugin/src/spqplugin.cpp | 2 ++ 6 files changed, 23 insertions(+), 7 deletions(-) diff --git a/contrib/spq_plugin/include/spqplugin.h b/contrib/spq_plugin/include/spqplugin.h index d38553ff1..7b7b87296 100644 --- a/contrib/spq_plugin/include/spqplugin.h +++ b/contrib/spq_plugin/include/spqplugin.h @@ -17,5 +17,6 @@ extern "C" void _PG_init(void); extern "C" void _PG_fini(void); extern "C" void spqplugin_invoke(void); extern THR_LOCAL MemoryContext OptimizerMemoryContext; +extern THR_LOCAL bool SPQ_IN_PROCESSING; #endif // SPQPLUGIN_H diff --git a/contrib/spq_plugin/src/spq_opt.cpp b/contrib/spq_plugin/src/spq_opt.cpp index d3956d30e..83e9c3929 100644 --- a/contrib/spq_plugin/src/spq_opt.cpp +++ b/contrib/spq_plugin/src/spq_opt.cpp @@ -188,12 +188,10 @@ PlannedStmt *spq_planner(Query *parse, ParamListInfo boundParams) * transform it such that the grouped query appears as a subquery */ pqueryCopy = (Query *)transformGroupedWindows((Node *)pqueryCopy, NULL); - + CHECK_FOR_INTERRUPTS(); /* Ok, invoke SPQOPT. */ result = SPQOPTOptimizedPlan(pqueryCopy, &fUnexpectedFailure); - log_optimizer(result, fUnexpectedFailure); - CHECK_FOR_INTERRUPTS(); /* diff --git a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CAutoExceptionStack.cpp b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CAutoExceptionStack.cpp index b7f0ab8aa..a3003f848 100644 --- a/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CAutoExceptionStack.cpp +++ b/contrib/spq_plugin/src/spq_optimizer/libspqos/src/error/CAutoExceptionStack.cpp @@ -17,6 +17,7 @@ //--------------------------------------------------------------------------- #include "spqos/error/CAutoExceptionStack.h" +#include "miscadmin.h" using namespace spqos; @@ -35,6 +36,7 @@ CAutoExceptionStack::CAutoExceptionStack(void **global_exception_stack, m_global_error_context_stack(global_error_context_stack), m_error_context_stack(*global_error_context_stack) { + HOLD_INTERRUPTS(); } //--------------------------------------------------------------------------- @@ -49,6 +51,7 @@ CAutoExceptionStack::~CAutoExceptionStack() { *m_global_exception_stack = m_exception_stack; *m_global_error_context_stack = m_error_context_stack; + RESUME_INTERRUPTS(); } //--------------------------------------------------------------------------- diff --git a/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp b/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp index 0ea3f196a..ca75075d9 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/SPQOptimizer.cpp @@ -34,6 +34,7 @@ bool optimizer_trace_fallback = false; extern MemoryContext MessageContext; +extern THR_LOCAL bool SPQ_IN_PROCESSING; void DelCException(CException **exception) { @@ -65,7 +66,7 @@ SPQOptimizer::SPQOPTOptimizedPlan( *had_unexpected_failure = false; CException *exception = NULL; - + SPQ_IN_PROCESSING = true; SPQOS_TRY { plStmt = COptTasks::SPQOPTOptimizedPlan(query, &spqopt_context); @@ -77,6 +78,7 @@ SPQOptimizer::SPQOPTOptimizedPlan( exception = new CException(ex.Major(), ex.Minor(), ex.Filename(), ex.Line()); } SPQOS_CATCH_END; + SPQ_IN_PROCESSING = false; if (exception == NULL) { return plStmt; } @@ -320,12 +322,23 @@ TerminateSPQOPT() } DelCException(&exception); } +void RecordBackTrace() +{ + if (SPQ_IN_PROCESSING == false) { + ereport(LOG, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPQ exit normal tid[%d]", gettid()))); + } else { + ereport(LOG, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPQ exit error tid[%d]", gettid()))); + } +} void UnInitSPQOPT(int status, Datum arg) { + if (status != 0) { + RecordBackTrace(); + } knl_session_context* session_back = u_sess; u_sess = (knl_session_context*) DatumGetPointer(arg); - TerminateSPQOPT(); + TerminateSPQOPT(); u_sess = session_back; } diff --git a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp index daf0b0470..19b4aac87 100644 --- a/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp +++ b/contrib/spq_plugin/src/spq_optimizer_util/spq_wrappers.cpp @@ -2697,8 +2697,7 @@ spqdb::IsAbortRequested(void) { // No SPQ_WRAP_START/END needed here. We just check these global flags, // it cannot throw an ereport(). - //return (QueryCancelPending || ProcDiePending); - return false; + return (t_thrd.int_cxt.ProcDiePending || t_thrd.int_cxt.QueryCancelPending || InterruptPending); } GpPolicy * diff --git a/contrib/spq_plugin/src/spqplugin.cpp b/contrib/spq_plugin/src/spqplugin.cpp index 64cdef558..78d280acb 100644 --- a/contrib/spq_plugin/src/spqplugin.cpp +++ b/contrib/spq_plugin/src/spqplugin.cpp @@ -37,6 +37,7 @@ THR_LOCAL ExecutorStart_hook_type spq_hook_ExecutorStart = NULL; THR_LOCAL spq_planner_hook_type backup_spq_planner_hook = NULL; THR_LOCAL bool HOOK_INIT = false; THR_LOCAL MemoryContext OptimizerMemoryContext = NULL; +THR_LOCAL bool SPQ_IN_PROCESSING = false; typedef struct SpqDirectReadWalkerContext { MethodPlanWalkerContext cxt; @@ -248,6 +249,7 @@ PlannedStmt* spq_optimize_query(Query* parse, int cursorOptions, ParamListInfo b instr_time starttime; double totaltime = 0; t_thrd.spq_ctx.spq_role = ROLE_UTILITY; + SPQ_IN_PROCESSING = false; if ((cursorOptions & CURSOR_OPT_SPQ_OK) && should_spq_planner(parse)) { t_thrd.spq_ctx.spq_role = ROLE_QUERY_COORDINTOR; t_thrd.spq_ctx.spq_session_id = u_sess->debug_query_id; -- Gitee From 71d8721a15c97271d0c4d0ceceb33d54ee27bf10 Mon Sep 17 00:00:00 2001 From: luo_zihao5524 Date: Fri, 19 Jan 2024 10:59:19 +0800 Subject: [PATCH 201/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dupsert=E5=88=97?= =?UTF-8?q?=E5=90=8D=E5=A4=A7=E5=B0=8F=E5=86=99=E6=95=8F=E6=84=9F=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/upsert.out | 21 +++++++++++++++++++++ contrib/dolphin/plugin_parser/analyze.cpp | 4 ++++ contrib/dolphin/sql/upsert.sql | 15 +++++++++++++++ 3 files changed, 40 insertions(+) diff --git a/contrib/dolphin/expected/upsert.out b/contrib/dolphin/expected/upsert.out index e04c8169c..e266e5884 100644 --- a/contrib/dolphin/expected/upsert.out +++ b/contrib/dolphin/expected/upsert.out @@ -779,6 +779,27 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "subpartition_03_ NOTICE: CREATE TABLE / UNIQUE will create implicit index "subpartition_03_col_3_tableoid_key" for table "subpartition_03" NOTICE: CREATE TABLE / UNIQUE will create implicit index "subpartition_03_col_1_col_2_col_3_key" for table "subpartition_03" NOTICE: CREATE TABLE / UNIQUE will create implicit index "subpartition_03_col_2_col_4_tableoid_key" for table "subpartition_03" +-- Test column name case sensitivity. +DROP TABLE IF EXISTS "TAB_STUDENT"; +NOTICE: table "TAB_STUDENT" does not exist, skipping +CREATE TABLE "TAB_STUDENT" ( +"DI_ID" int8 NOT NULL, +"DC_NAME" varchar(100), +"DC_SEX" text, +"DI_AGE" int4 +); +INSERT INTO "TAB_STUDENT" VALUES (1,'first','man',9) ON DUPLICATE KEY UPDATE DI_AGE = 10; +INSERT INTO "TAB_STUDENT" VALUES (2,'second','woman',9) ON DUPLICATE KEY UPDATE di_age = 10; +INSERT INTO "TAB_STUDENT" VALUES (3,'third','woman',9) ON DUPLICATE KEY UPDATE DI_age = 10; +SELECT * FROM "TAB_STUDENT"; + DI_ID | DC_NAME | DC_SEX | DI_AGE +-------+---------+--------+-------- + 1 | first | man | 9 + 2 | second | woman | 9 + 3 | third | woman | 9 +(3 rows) + +DROP TABLE "TAB_STUDENT"; create unique index subpartition_03_idx1 on subpartition_03(col_2, col_3, col_4) local; create index subpartition_03_idx2 on subpartition_03(col_3, col_1) local; create index subpartition_03_idx3 on subpartition_03(col_4) global; diff --git a/contrib/dolphin/plugin_parser/analyze.cpp b/contrib/dolphin/plugin_parser/analyze.cpp index d03cb4a2c..8f7b5646d 100644 --- a/contrib/dolphin/plugin_parser/analyze.cpp +++ b/contrib/dolphin/plugin_parser/analyze.cpp @@ -1763,7 +1763,11 @@ static void SetUpsertAttrnoState(ParseState* pstate, List *targetList) if (attr[ci].attisdropped) { continue; } +#ifdef DOLPHIN + if (pg_strcasecmp(name, attr[ci].attname.data) == 0) { +#else if (strcmp(name, attr[ci].attname.data) == 0) { +#endif rstate->usExplicitAttrNos[ni] = ci + 1; break; } diff --git a/contrib/dolphin/sql/upsert.sql b/contrib/dolphin/sql/upsert.sql index 02489ed6c..a7af727b5 100644 --- a/contrib/dolphin/sql/upsert.sql +++ b/contrib/dolphin/sql/upsert.sql @@ -459,6 +459,21 @@ PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) ), PARTITION p_list_7 VALUES (DEFAULT) ); + +-- Test column name case sensitivity. +DROP TABLE IF EXISTS "TAB_STUDENT"; +CREATE TABLE "TAB_STUDENT" ( +"DI_ID" int8 NOT NULL, +"DC_NAME" varchar(100), +"DC_SEX" text, +"DI_AGE" int4 +); +INSERT INTO "TAB_STUDENT" VALUES (1,'first','man',9) ON DUPLICATE KEY UPDATE DI_AGE = 10; +INSERT INTO "TAB_STUDENT" VALUES (2,'second','woman',9) ON DUPLICATE KEY UPDATE di_age = 10; +INSERT INTO "TAB_STUDENT" VALUES (3,'third','woman',9) ON DUPLICATE KEY UPDATE DI_age = 10; +SELECT * FROM "TAB_STUDENT"; +DROP TABLE "TAB_STUDENT"; + create unique index subpartition_03_idx1 on subpartition_03(col_2, col_3, col_4) local; create index subpartition_03_idx2 on subpartition_03(col_3, col_1) local; create index subpartition_03_idx3 on subpartition_03(col_4) global; -- Gitee From c2407e07e0886a0476b7b015d833de91d4a436be Mon Sep 17 00:00:00 2001 From: totaj Date: Fri, 19 Jan 2024 17:16:45 +0800 Subject: [PATCH 202/434] fix comment sequence bug. --- contrib/dolphin/expected/b_comments.out | 22 +++++++++++++++++++++- contrib/dolphin/plugin_parser/gram.y | 4 ++-- contrib/dolphin/sql/b_comments.sql | 4 ++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/contrib/dolphin/expected/b_comments.out b/contrib/dolphin/expected/b_comments.out index 3b66ff57d..1cbd094ff 100644 --- a/contrib/dolphin/expected/b_comments.out +++ b/contrib/dolphin/expected/b_comments.out @@ -258,6 +258,8 @@ create table t1(id bigint not null comment 'pk' primary key); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" create table t2(id bigint not null primary key comment 'pk' ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t2_pkey" for table "t2" +create table t3(id int comment 'user id' comment 'user_id' comment 'author id'); +create table t4(id int comment 'user id' comment 'user_id'); \d+ t1 Table "b_comments.t1" Column | Type | Modifiers | Storage | Stats target | Description @@ -278,8 +280,24 @@ Indexes: Has OIDs: no Options: orientation=row, compression=no +\d+ t3 + Table "b_comments.t3" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+-----------+---------+--------------+------------- + id | integer | | plain | | author id +Has OIDs: no +Options: orientation=row, compression=no + +\d+ t4 + Table "b_comments.t4" + Column | Type | Modifiers | Storage | Stats target | Description +--------+---------+-----------+---------+--------------+------------- + id | integer | | plain | | user_id +Has OIDs: no +Options: orientation=row, compression=no + drop schema b_comments cascade; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 20 other objects DETAIL: drop cascades to table test_unsupported drop cascades to table test_row drop cascades to table test_column @@ -298,4 +316,6 @@ drop cascades to table t_comment_0032 drop cascades to table t_comment_0034 drop cascades to table t1 drop cascades to table t2 +drop cascades to table t3 +drop cascades to table t4 reset search_path; diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index 01bf7461b..6f706392d 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -39309,7 +39309,7 @@ SplitColQualList(List *qualList, } else if (IsA(n, CommentStmt) && columnOptions != NULL) { - *columnOptions = lappend(*columnOptions, n); + *columnOptions = lcons(n, *columnOptions); } else { const char* message = "unexpected node type"; @@ -39378,7 +39378,7 @@ SplitColQualList(List *qualList, } else if (IsA(n, CommentStmt)) { - *columnOptions = lappend(*columnOptions, n); + *columnOptions = lcons(n, *columnOptions); } else { const char* message = "unexpected node type"; diff --git a/contrib/dolphin/sql/b_comments.sql b/contrib/dolphin/sql/b_comments.sql index 1ada076e9..5b7391e66 100644 --- a/contrib/dolphin/sql/b_comments.sql +++ b/contrib/dolphin/sql/b_comments.sql @@ -182,8 +182,12 @@ drop table if exists t1; drop table if exists t2; create table t1(id bigint not null comment 'pk' primary key); create table t2(id bigint not null primary key comment 'pk' ); +create table t3(id int comment 'user id' comment 'user_id' comment 'author id'); +create table t4(id int comment 'user id' comment 'user_id'); \d+ t1 \d+ t2 +\d+ t3 +\d+ t4 drop schema b_comments cascade; reset search_path; -- Gitee From feffa6ec3ca510d4df0bf26bc5c25474a662c7f1 Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Fri, 19 Jan 2024 21:05:28 +0800 Subject: [PATCH 203/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8VOYT=E6=89=80=E7=A4=BA=E7=9A=84set=E8=BD=AC?= =?UTF-8?q?date=E5=92=8Cdatetime=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8DI8VOYT=E6=89=80=E7=A4=BA=E7=9A=84set=E8=BD=ACdate?= =?UTF-8?q?=E5=92=8Cdatetime=E7=9A=84=E9=97=AE=E9=A2=98=20=E3=80=90?= =?UTF-8?q?=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90=E3=80=91:=201.=20set=20to?= =?UTF-8?q?=20date=20=E4=B8=BA=E8=B0=83=E7=94=A8=E6=98=BE=E5=BC=8F?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2=E7=9A=84text=5Fdate=5Fexplicit=E3=80=82=202.?= =?UTF-8?q?=20=E5=BD=93=E5=89=8D=E7=9A=84date=5Finternal=E6=B2=A1=E7=94=A8?= =?UTF-8?q?=E8=B0=83=E7=94=A8mysql=20=E5=85=BC=E5=AE=B9=E7=9A=84cstring=5F?= =?UTF-8?q?to=5Ftm=E5=87=BD=E6=95=B0=EF=BC=8C=E5=AF=BC=E8=87=B4=E8=A1=A8?= =?UTF-8?q?=E7=8E=B0=E5=92=8Cmysql=E4=B8=8D=E4=B8=80=E8=87=B4=E3=80=82=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88=E3=80=91:=201.?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0text=5Fdate=5Fexplicit=EF=BC=8C=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E8=BF=94=E5=9B=9ENULL=E3=80=822.=20=E5=8F=82=E8=80=83?= =?UTF-8?q?time=E5=87=BD=E6=95=B0=E7=9A=84=E5=A4=84=E7=90=86=E5=9C=A8date?= =?UTF-8?q?=5Finternal=E5=85=88=E8=B0=83=E7=94=A8mysql=E5=85=BC=E5=AE=B9?= =?UTF-8?q?=E7=9A=84=EF=BC=8C=20=E6=9C=89=E9=97=AE=E9=A2=98=E5=86=8D?= =?UTF-8?q?=E8=B0=83=E7=94=A8PG=E7=9A=84=E5=A4=84=E7=90=86=E8=BF=87?= =?UTF-8?q?=E7=A8=8B=E3=80=82=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80?= =?UTF-8?q?=E6=B1=82=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengau?= =?UTF-8?q?ssorg/dashboard=3Fissue=3DI8VOYT?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/b_compatibility_time_type.out | 6 +- .../b_compatibility_time_funcs.out | 10 +- contrib/dolphin/expected/timestamp_test.out | 229 +++++++++++++++++- contrib/dolphin/include/plugin_utils/date.h | 2 +- .../dolphin/include/plugin_utils/timestamp.h | 2 +- .../dolphin/plugin_parser/parse_coerce.cpp | 32 ++- contrib/dolphin/plugin_utils/adt/date.cpp | 151 +++++++----- .../dolphin/plugin_utils/adt/timestamp.cpp | 123 +++++++++- .../rollback_script/dolphin--3.0--2.0.sql | 30 ++- contrib/dolphin/sql/timestamp_test.sql | 19 ++ .../upgrade_script/dolphin--2.0--3.0.sql | 29 +++ 11 files changed, 540 insertions(+), 93 deletions(-) diff --git a/contrib/dolphin/expected/b_compatibility_time_type.out b/contrib/dolphin/expected/b_compatibility_time_type.out index aeee9f520..174abb605 100644 --- a/contrib/dolphin/expected/b_compatibility_time_type.out +++ b/contrib/dolphin/expected/b_compatibility_time_type.out @@ -793,9 +793,9 @@ CONTEXT: referenced column: timestamp SELECT 11::datetime; WARNING: timestamp out of range CONTEXT: referenced column: timestamp - timestamp ---------------------- - 0000-00-00 00:00:00 + timestamp +----------- + (1 row) SELECT 111::datetime; diff --git a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out index 47d70e7f3..438f6cb4b 100644 --- a/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out +++ b/contrib/dolphin/expected/builtin_funcs/b_compatibility_time_funcs.out @@ -880,13 +880,9 @@ CONTEXT: referenced column: date (1 row) select cast('2022-05-05 01' as date); -WARNING: invalid input syntax for type date: "2022-05-05 01" -LINE 1: select cast('2022-05-05 01' as date); - ^ -CONTEXT: referenced column: date - date ------- - + date +------------ + 2022-05-05 (1 row) select cast('2022-05-05 20-70' as date); diff --git a/contrib/dolphin/expected/timestamp_test.out b/contrib/dolphin/expected/timestamp_test.out index 5bb8950dd..0b488dac0 100644 --- a/contrib/dolphin/expected/timestamp_test.out +++ b/contrib/dolphin/expected/timestamp_test.out @@ -40,9 +40,9 @@ WARNING: invalid input syntax for type timestamp: " CONTEXT: referenced column: timestamp SQL function "bit_cast_datetime" statement 1 referenced column: timestamp - timestamp ---------------------- - 0000-00-00 00:00:00 + timestamp +----------- + (1 row) select '2022-01-01'::bit(64)::timestamp; @@ -263,3 +263,226 @@ select datetime '2022-1-12 12:23:23' or timestamp '00000000000000'; t (1 row) +reset dolphin.sql_mode; +create table t_set0004(c1 int not null auto_increment primary key, c2 set('2011-11-11', '2023-02-28 11:23:00', '2024-01', '2025/01/01') default null, c3 set('red', 'yellow', 'blue') not null, c4 set('0', '1', '1.01314')); +NOTICE: CREATE TABLE will create implicit sequence "t_set0004_c1_seq" for serial column "t_set0004.c1" +NOTICE: CREATE TABLE will create implicit set "t_set0004_c2_set" for column "t_set0004.c2" +NOTICE: CREATE TABLE will create implicit set "t_set0004_c3_set" for column "t_set0004.c3" +NOTICE: CREATE TABLE will create implicit set "t_set0004_c4_set" for column "t_set0004.c4" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_set0004_pkey" for table "t_set0004" +insert into t_set0004(c2, c3, c4) values ('2025/01/01', 'blue', '0'); +insert into t_set0004(c2, c3, c4) values ('2011-11-11,2023-02-28 11:23:00', 'red,yellow', '0,1'); +insert into t_set0004(c2, c3, c4) values ('2024-01,2011-11-11,2025/01/01', 'red,blue', '0,1.01314'); +insert into t_set0004(c2, c3) values ('2023-02-28 11:23:00', 'red'); +insert into t_set0004(c2, c3) values ('2023-02-28 11:23:00,2025/01/01,2025/01/01', 'blue,blue,yellow'); +insert into t_set0004(c3) values ('yellow'); +insert into t_set0004(c3) values ('yellow,yellow,yellow,yellow'); +insert into t_set0004(c3) values ('blue,yellow,red,red'); +insert into t_set0004(c3) values ('blue,red'); +insert into t_set0004(c3, c4) values ('red', '1'); +insert into t_set0004(c3, c4) values ('red,red', '1.01314,1.01314'); +insert into t_set0004(c3, c4) values ('red,blue', '0,1,1.01314'); +select cast(c1 as date), cast(c2 as date), cast(c3 as date), cast(c4 as date) from t_set0004 order by 1,2,3,4; +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "blue" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: invalid input syntax for type timestamp: "0" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 +WARNING: invalid input syntax for type date: "0" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "2011-11-11,2023-02-28 11:23:00" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c2 +WARNING: invalid input syntax for type date: "red,yellow" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: date/time field value out of range: "0,1" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "2011-11-11,2024-01,2025/01/01" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c2 +WARNING: invalid input syntax for type date: "red,blue" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: invalid input syntax for type date: "0,1.01314" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "red" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "yellow,blue" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "yellow" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "yellow" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "red,yellow,blue" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "red,blue" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "red" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: invalid input syntax for type timestamp: "1" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 +WARNING: invalid input syntax for type date: "1" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "red" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: Out of range value for date +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type date: "red,blue" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c3 +WARNING: invalid input syntax for type date: "0,1,1.01314" +CONTEXT: referenced column: date +SQL function "set_date" statement 1 +referenced column: c4 + c1 | c2 | c3 | c4 +----+------------+----+------------ + | | | + | | | + | | | + | | | + | | | + | | | + | | | + | | | + | | | 2001-01-31 + | 2023-02-28 | | + | 2023-02-28 | | + | 2025-01-01 | | +(12 rows) + +select cast(c1 as datetime), cast(c2 as datetime), cast(c3 as datetime), cast(c4 as datetime) from t_set0004 order by 1,2,3,4; +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "blue" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type timestamp: "0" +CONTEXT: referenced column: c4 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "2011-11-11,2023-02-28 11:23:00" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type timestamp: "red,yellow" +CONTEXT: referenced column: c3 +WARNING: date/time field value out of range: "0,1" +CONTEXT: referenced column: c4 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "2011-11-11,2024-01,2025/01/01" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type timestamp: "red,blue" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type timestamp: "0,1.01314" +CONTEXT: referenced column: c4 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "red" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "yellow,blue" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "yellow" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "yellow" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "red,yellow,blue" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "red,blue" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "red" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type timestamp: "1" +CONTEXT: referenced column: c4 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "red" +CONTEXT: referenced column: c3 +WARNING: timestamp out of range +CONTEXT: referenced column: c1 +WARNING: invalid input syntax for type timestamp: "red,blue" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type timestamp: "0,1,1.01314" +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 +----+---------------------+----+--------------------- + | | | + | | | + | | | + | | | + | | | + | | | + | | | + | | | + | | | 2001-01-31 04:00:00 + | 2023-02-28 11:23:00 | | + | 2023-02-28 11:23:00 | | + | 2025-01-01 00:00:00 | | +(12 rows) + diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index f42a4877c..17188b067 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -92,7 +92,7 @@ extern void adjust_time_range(pg_tm *tm, fsec_t &fsec, bool &warnings); extern bool time_in_with_flag(char *str, unsigned int date_flag, TimeADT* time_adt, bool vertify_time = false); extern bool time_in_with_sql_mode(char *str, TimeADT *result, unsigned int date_flag, bool vertify_time = false); extern bool date_add_interval(DateADT date, Interval *span, DateADT *result); -extern Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_error_type); +extern Datum date_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorType* time_error_type); extern "C" Datum time_float(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum date_enum(PG_FUNCTION_ARGS); extern "C" DLL_PUBLIC Datum timestamp_enum(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/include/plugin_utils/timestamp.h b/contrib/dolphin/include/plugin_utils/timestamp.h index 9d0edf31e..83f4f4e9e 100644 --- a/contrib/dolphin/include/plugin_utils/timestamp.h +++ b/contrib/dolphin/include/plugin_utils/timestamp.h @@ -149,7 +149,7 @@ typedef enum { TIME_INCORRECT } TimeErrorType; -extern Datum timestamp_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type); +extern Datum timestamp_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorType* time_error_type); extern Datum timestamptz_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type); extern "C" DLL_PUBLIC Datum int64_b_format_datetime(PG_FUNCTION_ARGS); diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index 40ca303c7..2559e7412 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -90,7 +90,7 @@ static const doConvert convertFunctions[convertFunctionsCount] = {&String2Others #define CAST_ENUM_IDX 22 #define ENUM_CAST_IDX 19 #define CAST_SIGNED_IDX 16 -#define NUM_CAST_TIME_IDX 11 +#define NUM_CAST_TIME_IDX 12 static const char* castFunction[CAST_FUNCTION_ROW][CAST_FUNCTION_COLUMN] = {{"i1_cast_ui1", "i1_cast_ui2", "i1_cast_ui4", "i1_cast_ui8"}, {"i2_cast_ui1", "i2_cast_ui2", "i2_cast_ui4", "i2_cast_ui8"}, @@ -126,12 +126,22 @@ static const char* enumCastFunction[ENUM_CAST_IDX] = {"enum_bit", "enum_int1", " static const char* numCastTimeFunction[NUM_CAST_TIME_IDX] = {"int8_cast_time", "int16_cast_time", "int32_cast_time", "int64_cast_time", "uint8_cast_time", "uint16_cast_time", "uint32_cast_time", "uint64_cast_time", "float4_cast_time", - "float8_cast_time", "numeric_cast_time"}; + "float8_cast_time", "numeric_cast_time", + "text_time_explicit"}; static const char* numCastDateFunction[NUM_CAST_TIME_IDX] = {"int8_cast_date", "int16_cast_date", "int32_cast_date", "int64_cast_date", "uint8_cast_date", "uint16_cast_date", - "uint32_cast_date", "uint64_cast_date", "float4_cast_date", - "float8_cast_date", "numeric_cast_date"}; + "uint32_cast_date", "uint64_cast_date", + "float4_cast_date", "float8_cast_date", + "numeric_cast_date", "text_date_explicit"}; + +static const char* numCastDateTimeFunction[NUM_CAST_TIME_IDX] = {"int8_cast_datetime", "int16_cast_datetime", + "int32_cast_datetime", "int64_cast_datetime", + "uint8_cast_datetime", "uint16_cast_datetime", + "uint32_cast_datetime", "uint64_cast_datetime", + "float4_cast_datetime", "float8_cast_datetime", + "numeric_cast_datetime", "timestamp_explicit"}; + typedef enum { @@ -216,7 +226,8 @@ typedef enum { N_UINT8, N_FLOAT4, N_FLOAT8, - N_NUMERIC + N_NUMERIC, + N_TEXT } NumCastIdx; #endif /* @@ -3366,6 +3377,8 @@ int findNumTimeFunctionIdx(Oid typeId) return N_FLOAT8; case NUMERICOID: return N_NUMERIC; + case TEXTOID: + return N_TEXT; default: break; } @@ -3398,6 +3411,13 @@ Oid findNumDateExplicitCastFunction(Oid sourceTypeId, Oid funcid) return (cast_oid != InvalidOid) ? cast_oid : funcid; } +Oid findNumDateTimeExplicitCastFunction(Oid sourceTypeId, Oid funcid) +{ + int idx = findNumTimeFunctionIdx(sourceTypeId); + Oid cast_oid = (idx == INVALID_IDX) ? InvalidOid : + get_func_oid(numCastDateTimeFunction[idx], PG_CATALOG_NAMESPACE, NULL); + return (cast_oid != InvalidOid) ? cast_oid : funcid; +} int findEnumFunctionIdx(Oid typeId) { @@ -3571,6 +3591,8 @@ void TryFindSpecifiedCastFunction(const Oid sourceTypeId, const Oid targetTypeId *funcId = findNumTimeExplicitCastFunction(sourceTypeId, defaultFuncId); } else if (targetTypeId == DATEOID) { *funcId = findNumDateExplicitCastFunction(sourceTypeId, defaultFuncId); + } else if (targetTypeId == TIMESTAMPOID) { + *funcId = findNumDateTimeExplicitCastFunction(sourceTypeId, defaultFuncId); } else { *funcId = findUnsignedExplicitCastFunction(targetTypeId, sourceTypeId, defaultFuncId); diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index dde936d42..ff7a36451 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -66,7 +66,7 @@ bool check_pg_tm_time_part(pg_tm *tm, fsec_t fsec); extern const char* extract_numericstr(const char* str); extern "C" DLL_PUBLIC Datum uint8out(PG_FUNCTION_ARGS); static char* adjust_b_format_time(char *str, int *timeSign, int *D, bool *hasD); -int DatetimeDate(char *str, pg_tm *tm, bool is_date_sconst = false); +int DatetimeDate(char *str, pg_tm *tm, int time_cast_type); static float8 getPartFromTm(pg_tm* tm, fsec_t fsec, int part); PG_FUNCTION_INFO_V1_PUBLIC(int8_b_format_time); @@ -275,6 +275,8 @@ extern "C" DLL_PUBLIC Datum date_int(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(date_cast); extern "C" DLL_PUBLIC Datum date_cast(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(text_date_explicit); +extern "C" DLL_PUBLIC Datum text_date_explicit(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(time_cast); extern "C" DLL_PUBLIC Datum time_cast(PG_FUNCTION_ARGS); @@ -536,7 +538,8 @@ Datum date_in(PG_FUNCTION_ARGS) { Datum result; TimeErrorType time_error_type = TIME_CORRECT; - result = date_internal(fcinfo, false, &time_error_type); + char* str = PG_GETARG_CSTRING(0); + result = date_internal(fcinfo, str, TIME_IN, &time_error_type); if ((fcinfo->ccontext == COERCION_IMPLICIT || fcinfo->ccontext == COERCION_EXPLICIT) && time_error_type == TIME_INCORRECT) { PG_RETURN_NULL(); @@ -547,13 +550,30 @@ Datum date_in(PG_FUNCTION_ARGS) Datum date_cast(PG_FUNCTION_ARGS) { TimeErrorType time_error_type = TIME_CORRECT; - return date_internal(fcinfo, true, &time_error_type); + char* str = PG_GETARG_CSTRING(0); + return date_internal(fcinfo, str, TIME_CAST, &time_error_type); +} + +Datum text_date_explicit(PG_FUNCTION_ARGS) +{ + Datum result; + TimeErrorType time_error_type = TIME_CORRECT; + char* str = fcinfo->argTypes[0] ? + parser_function_input(PG_GETARG_DATUM(0), fcinfo->argTypes[0]) : + PG_GETARG_CSTRING(0); + result = date_internal(fcinfo, str, TEXT_TIME_EXPLICIT, &time_error_type); + if (time_error_type == TIME_INCORRECT) { + PG_RETURN_NULL(); + } + return result; } -Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_error_type) +Datum date_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorType* time_error_type) #endif { +#ifndef DOLPHIN char* str = PG_GETARG_CSTRING(0); +#endif DateADT date; int dterr; fsec_t fsec; @@ -567,7 +587,6 @@ Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_e char* date_fmt = NULL; #ifdef DOLPHIN errno_t rc = EOK; - rc = memset_s(&tt, sizeof(tt), 0, sizeof(tt)); securec_check(rc, "\0", "\0"); #endif @@ -588,62 +607,62 @@ Datum date_internal(PG_FUNCTION_ARGS, bool is_date_sconst, TimeErrorType* time_e } #else } else { - /* - * default pg date formatting parsing. - */ - dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); - if (dterr != 0) { - DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, is_date_sconst); + int invalid_tz; + bool res = cstring_to_tm(str, tm, fsec, &tzp, &invalid_tz); + if (!res) { /* - * if reporting warning in DateTimeParseError, return 1970-01-01 + * default pg date formatting parsing. */ -#ifdef DOLPHIN - *time_error_type = TIME_INCORRECT; -#endif - PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); - } - if (dterr == 0) { - if (ftype[0] == DTK_NUMBER && nf == 1) { - dterr = DatetimeDate(field[0], tm, is_date_sconst); - dtype = DTK_DATE; - } else { - dterr = DecodeDateTimeForBDatabase(field, ftype, nf, &dtype, tm, &fsec, &tzp); + dterr = ParseDateTime(str, workbuf, sizeof(workbuf), field, ftype, MAXDATEFIELDS, &nf); + if (dterr != 0) { + DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, time_cast_type == TIME_CAST); + /* + * if reporting warning in DateTimeParseError, return 1970-01-01 + */ + *time_error_type = TIME_INCORRECT; + PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); } - } - if (dterr != 0) { - DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, is_date_sconst); -#ifdef DOLPHIN - *time_error_type = TIME_INCORRECT; -#endif - PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); - } - switch (dtype) { - case DTK_DATE: - break; + if (dterr == 0) { + if (ftype[0] == DTK_NUMBER && nf == 1) { + dterr = DatetimeDate(field[0], tm, time_cast_type); + dtype = DTK_DATE; + } else { + dterr = DecodeDateTimeForBDatabase(field, ftype, nf, &dtype, tm, &fsec, &tzp); + } + } + if (dterr != 0) { + DateTimeParseErrorWithFlag(dterr, str, "date", fcinfo->can_ignore, time_cast_type == TIME_CAST); + *time_error_type = TIME_INCORRECT; + PG_RETURN_DATEADT(DATE_ALL_ZERO_VALUE); + } + switch (dtype) { + case DTK_DATE: + break; - case DTK_CURRENT: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"current\" is no longer supported"))); + case DTK_CURRENT: + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("date/time value \"current\" is no longer supported"))); - GetCurrentDateTime(tm); - break; + GetCurrentDateTime(tm); + break; - case DTK_EPOCH: - GetEpochTime(tm); - break; + case DTK_EPOCH: + GetEpochTime(tm); + break; - case DTK_LATE: - DATE_NOEND(date); - PG_RETURN_DATEADT(date); + case DTK_LATE: + DATE_NOEND(date); + PG_RETURN_DATEADT(date); - case DTK_EARLY: - DATE_NOBEGIN(date); - PG_RETURN_DATEADT(date); + case DTK_EARLY: + DATE_NOBEGIN(date); + PG_RETURN_DATEADT(date); - default: - DateTimeParseError(DTERR_BAD_FORMAT, str, "date"); - break; + default: + DateTimeParseError(DTERR_BAD_FORMAT, str, "date"); + break; + } } } #endif @@ -698,17 +717,31 @@ Datum input_date_in(char* str, bool can_ignore) #ifdef DOLPHIN extern "C" DLL_PUBLIC Datum timestamp_cast(PG_FUNCTION_ARGS); +extern "C" DLL_PUBLIC Datum timestamp_explicit(PG_FUNCTION_ARGS); -int DatetimeDate(char *str, pg_tm *tm, bool is_date_sconst) +int DatetimeDate(char *str, pg_tm *tm, int time_cast_type) { fsec_t fsec; Datum datetime; - if (is_date_sconst) { - datetime = DirectFunctionCall3(timestamp_cast, CStringGetDatum(str), - ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); - } else { - datetime = DirectFunctionCall3(timestamp_in, CStringGetDatum(str), - ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + bool isRetNull = false; + switch (time_cast_type) { + case TIME_CAST: + datetime = DirectFunctionCall3(timestamp_cast, CStringGetDatum(str), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + break; + + case TEXT_TIME_EXPLICIT: + datetime = DirectCall3(&isRetNull, timestamp_explicit, InvalidOid, CStringGetDatum(str), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (isRetNull) { + return ERRCODE_DATETIME_VALUE_OUT_OF_RANGE; + } + break; + + default: + datetime = DirectFunctionCall3(timestamp_in, CStringGetDatum(str), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + break; } if (timestamp2tm(datetime, NULL, tm, &fsec, NULL, NULL) != 0) { diff --git a/contrib/dolphin/plugin_utils/adt/timestamp.cpp b/contrib/dolphin/plugin_utils/adt/timestamp.cpp index c550a60ad..1d135ecdb 100644 --- a/contrib/dolphin/plugin_utils/adt/timestamp.cpp +++ b/contrib/dolphin/plugin_utils/adt/timestamp.cpp @@ -350,6 +350,32 @@ extern "C" DLL_PUBLIC Datum convert_datetime_uint64(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1_PUBLIC(convert_timestamptz_uint64); extern "C" DLL_PUBLIC Datum convert_timestamptz_uint64(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(int8_cast_datetime); +extern "C" DLL_PUBLIC Datum int8_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int16_cast_datetime); +extern "C" DLL_PUBLIC Datum int16_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int32_cast_datetime); +extern "C" DLL_PUBLIC Datum int32_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(int64_cast_datetime); +extern "C" DLL_PUBLIC Datum int64_cast_datetime(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(uint8_cast_datetime); +extern "C" DLL_PUBLIC Datum uint8_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint16_cast_datetime); +extern "C" DLL_PUBLIC Datum uint16_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint32_cast_datetime); +extern "C" DLL_PUBLIC Datum uint32_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(uint64_cast_datetime); +extern "C" DLL_PUBLIC Datum uint64_cast_datetime(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1_PUBLIC(float4_cast_datetime); +extern "C" DLL_PUBLIC Datum float4_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(float8_cast_datetime); +extern "C" DLL_PUBLIC Datum float8_cast_datetime(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1_PUBLIC(numeric_cast_datetime); +extern "C" DLL_PUBLIC Datum numeric_cast_datetime(PG_FUNCTION_ARGS); + #endif /* b format datetime and timestamp type */ @@ -553,8 +579,9 @@ bool TimestampTypeCheck(char* str, bool can_ignore, struct pg_tm* tm, Timestamp Datum timestamp_in(PG_FUNCTION_ARGS) #ifdef DOLPHIN { + char* str = PG_GETARG_CSTRING(0); TimeErrorType time_error_type = TIME_CORRECT; - Datum datum_internal = timestamp_internal(fcinfo, TIME_IN, &time_error_type); + Datum datum_internal = timestamp_internal(fcinfo, str, TIME_IN, &time_error_type); if ((fcinfo->ccontext == COERCION_IMPLICIT || fcinfo->ccontext == COERCION_EXPLICIT) && time_error_type == TIME_INCORRECT) { PG_RETURN_NULL(); @@ -564,24 +591,30 @@ Datum timestamp_in(PG_FUNCTION_ARGS) Datum timestamp_cast(PG_FUNCTION_ARGS) { + char* str = PG_GETARG_CSTRING(0); TimeErrorType time_error_type = TIME_CORRECT; - return timestamp_internal(fcinfo, TIME_CAST, &time_error_type); + return timestamp_internal(fcinfo, str, TIME_CAST, &time_error_type); } Datum timestamp_explicit(PG_FUNCTION_ARGS) { + char* input_str = fcinfo->argTypes[0] ? + parser_function_input(PG_GETARG_DATUM(0), fcinfo->argTypes[0]) : + PG_GETARG_CSTRING(0); TimeErrorType time_error_type = TIME_CORRECT; - Datum datum_internal = timestamp_internal(fcinfo, TEXT_TIME_EXPLICIT, &time_error_type); + Datum datum_internal = timestamp_internal(fcinfo, input_str, TEXT_TIME_EXPLICIT, &time_error_type); if (time_error_type == TIME_INCORRECT) { PG_RETURN_NULL(); } return datum_internal; } -Datum timestamp_internal(PG_FUNCTION_ARGS, int time_cast_type, TimeErrorType* time_error_type) +Datum timestamp_internal(PG_FUNCTION_ARGS, char* str, int time_cast_type, TimeErrorType* time_error_type) #endif { +#ifndef DOLPHIN char* str = PG_GETARG_CSTRING(0); +#endif #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); @@ -1082,11 +1115,11 @@ static int64 integer_b_format_timestamp(bool hasTz, int64 ts, bool can_ignore) #endif #ifdef DOLPHIN -Datum timestamp_to_datum(PG_FUNCTION_ARGS, bool hasTz, int64 ts) +Datum timestamp_to_datum(PG_FUNCTION_ARGS, bool hasTz, int64 ts, bool is_explicit = false) { TimeErrorType time_error_type = TIME_CORRECT; int64 result = integer_b_format_timestamp(hasTz, ts, fcinfo->can_ignore, &time_error_type); - if (fcinfo->ccontext == COERCION_IMPLICIT && time_error_type == TIME_INCORRECT && ENABLE_B_CMPT_MODE) { + if (is_explicit && time_error_type == TIME_INCORRECT) { PG_RETURN_NULL(); } PG_RETURN_TIMESTAMP(result); @@ -11871,6 +11904,84 @@ Datum dolphin_timestamptznot(PG_FUNCTION_ARGS) PG_RETURN_UINT64(~((uint64)timestamp2int(tm))); } + +Datum int8_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT8(0), true); +} + +Datum int16_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT16(0), true); +} + +Datum int32_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT32(0), true); +} + +Datum int64_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT64(0), true); +} + + +Datum uint8_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT8(0), true); +} + +Datum uint16_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT16(0), true); +} + +Datum uint32_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT32(0), true); +} + +Datum uint64_cast_datetime(PG_FUNCTION_ARGS) +{ + return timestamp_to_datum(fcinfo, false, (int64)PG_GETARG_INT64(0), true); +} + +Datum str_cast_datetime(PG_FUNCTION_ARGS, char *str) +{ + char buf[MAXDATELEN + 1]; + fillZeroBeforeNumericTimestamp(str, buf); + bool isRetNull = false; + Datum result = DirectCall3(&isRetNull, timestamp_explicit, InvalidOid, CStringGetDatum(buf), + ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)); + if (isRetNull) { + PG_RETURN_NULL(); + } else { + return result; + } +} + +Datum float4_cast_datetime(PG_FUNCTION_ARGS) +{ + float8 n = (float8)PG_GETARG_FLOAT4(0); + char *str = DatumGetCString(DirectFunctionCall1(float8out, Float8GetDatum(n))); + return str_cast_datetime(fcinfo, str); +} + + +Datum float8_cast_datetime(PG_FUNCTION_ARGS) +{ + float8 n = PG_GETARG_FLOAT8(0); + char *str = DatumGetCString(DirectFunctionCall1(float8out, Float8GetDatum(n))); + return str_cast_datetime(fcinfo, str); +} + +Datum numeric_cast_datetime(PG_FUNCTION_ARGS) +{ + Numeric n = PG_GETARG_NUMERIC(0); + char *str = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(n))); + return str_cast_datetime(fcinfo, str); +} + #endif #endif diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 49f4ccb82..86f231359 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -385,14 +385,28 @@ DROP FUNCTION IF EXISTS pg_catalog.bool_date(boolean); DROP FUNCTION IF EXISTS pg_catalog.int8_cast_date(int1); DROP FUNCTION IF EXISTS pg_catalog.int16_cast_date(int2); DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(int4); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(int8); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint1); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint2); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint4); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(uint8); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(float4); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(float8); -DROP FUNCTION IF EXISTS pg_catalog.int32_cast_date(numeric); +DROP FUNCTION IF EXISTS pg_catalog.int64_cast_date(int8); +DROP FUNCTION IF EXISTS pg_catalog.uint8_cast_date(uint1); +DROP FUNCTION IF EXISTS pg_catalog.uint16_cast_date(uint2); +DROP FUNCTION IF EXISTS pg_catalog.uint32_cast_date(uint4); +DROP FUNCTION IF EXISTS pg_catalog.uint64_cast_date(uint8); +DROP FUNCTION IF EXISTS pg_catalog.float4_cast_date(float4); +DROP FUNCTION IF EXISTS pg_catalog.float8_cast_date(float8); +DROP FUNCTION IF EXISTS pg_catalog.numeric_cast_date(numeric); +DROP FUNCTION IF EXISTS pg_catalog.text_date_explicit(TEXT); + +DROP FUNCTION IF EXISTS pg_catalog.int8_cast_datetime(int1); +DROP FUNCTION IF EXISTS pg_catalog.int16_cast_datetime(int2); +DROP FUNCTION IF EXISTS pg_catalog.int32_cast_datetime(int4); +DROP FUNCTION IF EXISTS pg_catalog.int64_cast_datetime(int8); +DROP FUNCTION IF EXISTS pg_catalog.uint8_cast_datetime(uint1); +DROP FUNCTION IF EXISTS pg_catalog.uint16_cast_datetime(uint2); +DROP FUNCTION IF EXISTS pg_catalog.uint32_cast_datetime(uint4); +DROP FUNCTION IF EXISTS pg_catalog.uint64_cast_datetime(uint8); +DROP FUNCTION IF EXISTS pg_catalog.float4_cast_datetime(float4); +DROP FUNCTION IF EXISTS pg_catalog.float8_cast_datetime(float8); +DROP FUNCTION IF EXISTS pg_catalog.numeric_cast_datetime(numeric); +DROP FUNCTION IF EXISTS pg_catalog.timestamp_explicit(TEXT); DROP FUNCTION IF EXISTS pg_catalog.ln(year); DROP FUNCTION IF EXISTS pg_catalog.ln(json); diff --git a/contrib/dolphin/sql/timestamp_test.sql b/contrib/dolphin/sql/timestamp_test.sql index 08e2db656..d553bcf2b 100644 --- a/contrib/dolphin/sql/timestamp_test.sql +++ b/contrib/dolphin/sql/timestamp_test.sql @@ -50,3 +50,22 @@ select timestamp '2022-1-12 12:23:23' or timestamp '00000000000000'; select datetime '2023-1-12 12:23:23' or datetime '00000000000000'; select timestamp '2022-1-12 12:23:23' or datetime '00000000000000'; select datetime '2022-1-12 12:23:23' or timestamp '00000000000000'; + +reset dolphin.sql_mode; +create table t_set0004(c1 int not null auto_increment primary key, c2 set('2011-11-11', '2023-02-28 11:23:00', '2024-01', '2025/01/01') default null, c3 set('red', 'yellow', 'blue') not null, c4 set('0', '1', '1.01314')); +insert into t_set0004(c2, c3, c4) values ('2025/01/01', 'blue', '0'); +insert into t_set0004(c2, c3, c4) values ('2011-11-11,2023-02-28 11:23:00', 'red,yellow', '0,1'); +insert into t_set0004(c2, c3, c4) values ('2024-01,2011-11-11,2025/01/01', 'red,blue', '0,1.01314'); +insert into t_set0004(c2, c3) values ('2023-02-28 11:23:00', 'red'); +insert into t_set0004(c2, c3) values ('2023-02-28 11:23:00,2025/01/01,2025/01/01', 'blue,blue,yellow'); +insert into t_set0004(c3) values ('yellow'); +insert into t_set0004(c3) values ('yellow,yellow,yellow,yellow'); +insert into t_set0004(c3) values ('blue,yellow,red,red'); +insert into t_set0004(c3) values ('blue,red'); +insert into t_set0004(c3, c4) values ('red', '1'); +insert into t_set0004(c3, c4) values ('red,red', '1.01314,1.01314'); +insert into t_set0004(c3, c4) values ('red,blue', '0,1,1.01314'); + +select cast(c1 as date), cast(c2 as date), cast(c3 as date), cast(c4 as date) from t_set0004 order by 1,2,3,4; +select cast(c1 as datetime), cast(c2 as datetime), cast(c3 as datetime), cast(c4 as datetime) from t_set0004 order by 1,2,3,4; + diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 472a41a88..84db35fa9 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1051,6 +1051,35 @@ CREATE OR REPLACE FUNCTION pg_catalog.float8_cast_date(float8) RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float8_cast_date'; CREATE OR REPLACE FUNCTION pg_catalog.numeric_cast_date(numeric) RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'numeric_cast_date'; +CREATE OR REPLACE FUNCTION pg_catalog.text_date_explicit(TEXT) +RETURNS date LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'text_date_explicit'; + +CREATE OR REPLACE FUNCTION pg_catalog.int8_cast_datetime(int1) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int8_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.int16_cast_datetime(int2) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int16_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.int32_cast_datetime(int4) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int32_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.int64_cast_datetime(int8) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'int64_cast_datetime'; + +CREATE OR REPLACE FUNCTION pg_catalog.uint8_cast_datetime(uint1) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint8_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.uint16_cast_datetime(uint2) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint16_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.uint32_cast_datetime(uint4) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint32_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.uint64_cast_datetime(uint8) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'uint64_cast_datetime'; + +CREATE OR REPLACE FUNCTION pg_catalog.float4_cast_datetime(float4) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float4_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.float8_cast_datetime(float8) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'float8_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.numeric_cast_datetime(numeric) +RETURNS timestamp without time zone LANGUAGE C STABLE STRICT as '$libdir/dolphin', 'numeric_cast_datetime'; +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_explicit(TEXT) +RETURNS timestamp without time zone LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'timestamp_explicit'; CREATE OR REPLACE FUNCTION pg_catalog.bool_date(boolean) RETURNS date LANGUAGE SQL IMMUTABLE STRICT as 'select $1::integer::date'; CREATE CAST (boolean AS date) WITH FUNCTION bool_date(boolean) AS ASSIGNMENT; -- Gitee From 4e6150418430e73b18b4eb6794d810f9fc1c41be Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 18 Jan 2024 19:24:11 +0800 Subject: [PATCH 204/434] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E5=85=BC=E5=AE=B9B=E5=BA=93=E4=BD=BF=E7=94=A8cast/convert?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E5=AF=B9binary=E5=88=97=E8=BD=ACunsigned?= =?UTF-8?q?=E6=97=B6=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/expected/test_binary.out | 64 +++ contrib/dolphin/expected/uint_cast2.out | 16 +- contrib/dolphin/expected/uint_cast3.out | 434 ++++++++++++++++++ .../dolphin/include/plugin_utils/varlena.h | 1 + contrib/dolphin/plugin_utils/adt/float.cpp | 19 +- .../dolphin/plugin_utils/adt/unsigned_int.cpp | 12 +- contrib/dolphin/plugin_utils/adt/varlena.cpp | 18 +- contrib/dolphin/sql/uint_cast3.sql | 73 +++ 8 files changed, 623 insertions(+), 14 deletions(-) diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index e62ebb6eb..43fa5b094 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -832,6 +832,14 @@ set dolphin.b_compatibility_mode to on; create table binary_operator(c1 binary(10), c2 numeric(10, 2), c3 time(6), c4 uint1, c5 uint2, c6 uint4, c7 uint8); insert into binary_operator values('34', '234.5', '234.5', 234, 234, 234, 234), ('1234', '234.5', '234.5', 234, 234, 234, 234); select c1 < c2, c1 > c2, c1 <= c2, c1 >= c2 from binary_operator; +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- t | f | t | f @@ -839,6 +847,30 @@ select c1 < c2, c1 > c2, c1 <= c2, c1 >= c2 from binary_operator; (2 rows) select c1 < c3, c1 > c3, c1 <= c3, c1 >= c3 from binary_operator; +WARNING: Data truncated for input data: "34" +CONTEXT: referenced column: float8lt +SQL function "binary_time_lt" statement 1 +WARNING: Data truncated for input data: "34" +CONTEXT: referenced column: float8gt +SQL function "binary_time_gt" statement 1 +WARNING: Data truncated for input data: "34" +CONTEXT: referenced column: float8le +SQL function "binary_time_le" statement 1 +WARNING: Data truncated for input data: "34" +CONTEXT: referenced column: float8ge +SQL function "binary_time_ge" statement 1 +WARNING: Data truncated for input data: "1234" +CONTEXT: referenced column: float8lt +SQL function "binary_time_lt" statement 1 +WARNING: Data truncated for input data: "1234" +CONTEXT: referenced column: float8gt +SQL function "binary_time_gt" statement 1 +WARNING: Data truncated for input data: "1234" +CONTEXT: referenced column: float8le +SQL function "binary_time_le" statement 1 +WARNING: Data truncated for input data: "1234" +CONTEXT: referenced column: float8ge +SQL function "binary_time_ge" statement 1 ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- t | f | t | f @@ -846,6 +878,14 @@ select c1 < c3, c1 > c3, c1 <= c3, c1 >= c3 from binary_operator; (2 rows) select c1 < c4, c1 > c4, c1 <= c4, c1 >= c4 from binary_operator; +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- t | f | t | f @@ -853,6 +893,14 @@ select c1 < c4, c1 > c4, c1 <= c4, c1 >= c4 from binary_operator; (2 rows) select c1 < c5, c1 > c5, c1 <= c5, c1 >= c5 from binary_operator; +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- t | f | t | f @@ -860,6 +908,14 @@ select c1 < c5, c1 > c5, c1 <= c5, c1 >= c5 from binary_operator; (2 rows) select c1 < c6, c1 > c6, c1 <= c6, c1 >= c6 from binary_operator; +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- t | f | t | f @@ -867,6 +923,14 @@ select c1 < c6, c1 > c6, c1 <= c6, c1 >= c6 from binary_operator; (2 rows) select c1 < c7, c1 > c7, c1 <= c7, c1 >= c7 from binary_operator; +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "34" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" +WARNING: Data truncated for input data: "1234" ?column? | ?column? | ?column? | ?column? ----------+----------+----------+---------- t | f | t | f diff --git a/contrib/dolphin/expected/uint_cast2.out b/contrib/dolphin/expected/uint_cast2.out index 007a9d728..6eb95ccf1 100644 --- a/contrib/dolphin/expected/uint_cast2.out +++ b/contrib/dolphin/expected/uint_cast2.out @@ -114,7 +114,7 @@ insert into t1 values('-1'::varchar(10)); ERROR: value "-1" is out of range for type bigint unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); -ERROR: bigint unsigned out of range +ERROR: Data truncated for input data: "-1" CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); ERROR: bigint unsigned out of range @@ -154,6 +154,8 @@ insert into t1 values('-1'::varchar(10)); WARNING: value "-1" is out of range for type bigint unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); +WARNING: Data truncated for input data: "-1" +CONTEXT: referenced column: a WARNING: bigint unsigned out of range CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); @@ -314,7 +316,7 @@ insert into t1 values('-1'::varchar(10)); ERROR: value "-1" is out of range for type int unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); -ERROR: int unsigned out of range +ERROR: Data truncated for input data: "-1" CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); ERROR: int unsigned out of range @@ -354,6 +356,8 @@ insert into t1 values('-1'::varchar(10)); WARNING: value "-1" is out of range for type int unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); +WARNING: Data truncated for input data: "-1" +CONTEXT: referenced column: a WARNING: int unsigned out of range CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); @@ -532,7 +536,7 @@ insert into t1 values('-1'::varchar(10)); ERROR: value "-1" is out of range for type smallint unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); -ERROR: smallint unsigned out of range +ERROR: Data truncated for input data: "-1" CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); ERROR: smallint unsigned out of range @@ -572,6 +576,8 @@ insert into t1 values('-1'::varchar(10)); WARNING: value "-1" is out of range for type smallint unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); +WARNING: Data truncated for input data: "-1" +CONTEXT: referenced column: a WARNING: smallint unsigned out of range CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); @@ -756,7 +762,7 @@ insert into t1 values('-1'::varchar(10)); ERROR: value "-1" is out of range for type tinyint unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); -ERROR: tinyint unsigned out of range +ERROR: Data truncated for input data: "-1" CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); ERROR: tinyint unsigned out of range @@ -796,6 +802,8 @@ insert into t1 values('-1'::varchar(10)); WARNING: value "-1" is out of range for type tinyint unsigned CONTEXT: referenced column: a insert into t1 values('-1'::binary(10)); +WARNING: Data truncated for input data: "-1" +CONTEXT: referenced column: a WARNING: tinyint unsigned out of range CONTEXT: referenced column: a insert into t1 values('-1'::varbinary(10)); diff --git a/contrib/dolphin/expected/uint_cast3.out b/contrib/dolphin/expected/uint_cast3.out index 6d983cf4d..e5e94627d 100644 --- a/contrib/dolphin/expected/uint_cast3.out +++ b/contrib/dolphin/expected/uint_cast3.out @@ -550,6 +550,440 @@ CONTEXT: referenced column: uint8 18446744073709551615 (1 row) +-- test: binary to unsigned +--- é…ç½®å‚æ•° +set dolphin.b_compatibility_mode=on; +set b_format_behavior_compat_options=enable_set_variables; +set bytea_output=escape; +--- 建表 +drop table if exists t_binary0001 cascade; +NOTICE: table "t_binary0001" does not exist, skipping +create table t_binary0001( +c1 int not null, +c2 binary, +c3 binary(10), +c4 binary(255), +c5 varbinary(1), +c6 varbinary(10), +c7 varbinary(255)) charset utf8mb3; +WARNING: utf8mb3 is not a valid encoding name. default value set +--- æ’å…¥æ•°æ® +set @v1='abcdefghijklmnopqrstuvwxyz'; +set @v2='a熊猫竹竹爱åƒç«¹å­'; +set @v3=hex(@v2); +insert into t_binary0001 values ( +1, substr(@v1,1,1), substr(@v1,1,10), repeat(@v1, 9), +substr(@v1,1,1), substr(@v1,1,10), repeat(@v1, 9)); +insert into t_binary0001 values ( +2, substr(@v2,1,1), substr(@v2,1,4), repeat(@v2, 9), +substr(@v2,1,1), substr(@v2,1,4), repeat(@v2, 9)); +insert into t_binary0001 values ( +2, substr(@v3,1,1), substr(@v3,1,4), substr(repeat(@v3, 10),1,255), +substr(@v3,1,1), substr(@v3,1,4), substr(repeat(@v3, 10),1,255)); +--- 使用cast/convertå‡½æ•°è½¬æ¢ +select c1, cast(c2 as unsigned), cast(c3 as unsigned), cast(c4 as unsigned), +cast(c5 as unsigned) from t_binary0001 order by 1,2,3,4,5; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 +(3 rows) + +select c1, convert(c2, unsigned), convert(c3, unsigned), convert(c4, unsigned), +convert(c5, unsigned), convert(c6, unsigned), convert(c7, unsigned) from +t_binary0001 order by 1,2,3,4,5,6,7; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c7 + c1 | c2 | c3 | c4 | c5 | c6 | c7 +----+----+----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 | 61 | 61 +(3 rows) + +--- 测试uint1/uint2/uint4/uint8,预期表现和unsigned一致 +select c1, cast(c2 as uint1), cast(c3 as uint1), cast(c4 as uint1), +cast(c5 as uint1) from t_binary0001 order by 1,2,3,4,5; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 +(3 rows) + +select c1, convert(c2, uint1), convert(c3, uint1), convert(c4, uint1), +convert(c5, uint1), convert(c6, uint1), convert(c7, uint1) from +t_binary0001 order by 1,2,3,4,5,6,7; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c7 + c1 | c2 | c3 | c4 | c5 | c6 | c7 +----+----+----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 | 61 | 61 +(3 rows) + +select c1, cast(c2 as uint2), cast(c3 as uint2), cast(c4 as uint2), +cast(c5 as uint2) from t_binary0001 order by 1,2,3,4,5; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 +(3 rows) + +select c1, convert(c2, uint2), convert(c3, uint2), convert(c4, uint2), +convert(c5, uint2), convert(c6, uint2), convert(c7, uint2) from +t_binary0001 order by 1,2,3,4,5,6,7; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c7 + c1 | c2 | c3 | c4 | c5 | c6 | c7 +----+----+----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 | 61 | 61 +(3 rows) + +select c1, cast(c2 as uint4), cast(c3 as uint4), cast(c4 as uint4), +cast(c5 as uint4) from t_binary0001 order by 1,2,3,4,5; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 +(3 rows) + +select c1, convert(c2, uint4), convert(c3, uint4), convert(c4, uint4), +convert(c5, uint4), convert(c6, uint4), convert(c7, uint4) from +t_binary0001 order by 1,2,3,4,5,6,7; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c7 + c1 | c2 | c3 | c4 | c5 | c6 | c7 +----+----+----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 | 61 | 61 +(3 rows) + +select c1, cast(c2 as uint8), cast(c3 as uint8), cast(c4 as uint8), +cast(c5 as uint8) from t_binary0001 order by 1,2,3,4,5; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 + c1 | c2 | c3 | c4 | c5 +----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 +(3 rows) + +select c1, convert(c2, uint8), convert(c3, uint8), convert(c4, uint8), +convert(c5, uint8), convert(c6, uint8), convert(c7, uint8) from +t_binary0001 order by 1,2,3,4,5,6,7; +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "abcdefghij" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c2 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "a" +CONTEXT: referenced column: c5 +WARNING: invalid input syntax for type double precision: "a熊猫竹" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" +CONTEXT: referenced column: c7 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c3 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c4 +WARNING: invalid input syntax for type double precision: "61e7" +CONTEXT: referenced column: c6 +WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" +CONTEXT: referenced column: c7 + c1 | c2 | c3 | c4 | c5 | c6 | c7 +----+----+----+----+----+----+---- + 1 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 0 | 0 | 0 | 0 | 0 | 0 + 2 | 6 | 61 | 61 | 6 | 61 | 61 +(3 rows) + +--- 建表并æ’å…¥æ•°æ® +create table t_binary(a binary(255)); +create table t_unsigned(a int8 unsigned); +insert into t_binary values('2e1'); +insert into t_binary values('1ab'); +insert into t_binary values('0x1ab'); +insert into t_binary values('123'); +--- 测试转æ¢ç»“æžœ +select cast(a as unsigned) from t_binary; +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a + a +----- + 2 + 1 + 0 + 123 +(4 rows) + +insert ignore into t_unsigned select a from t_binary; +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +select * from t_unsigned; + a +----- + 20 + 1 + 0 + 123 +(4 rows) + +--- æ¸…ç† +drop table t_unsigned cascade; +drop table t_binary cascade; +drop table t_binary0001 cascade; SET dolphin.sql_mode = ''; select cast('-0' as unsigned); uint8 diff --git a/contrib/dolphin/include/plugin_utils/varlena.h b/contrib/dolphin/include/plugin_utils/varlena.h index 43fde2d48..e3c93ce13 100644 --- a/contrib/dolphin/include/plugin_utils/varlena.h +++ b/contrib/dolphin/include/plugin_utils/varlena.h @@ -6,6 +6,7 @@ #ifdef DOLPHIN extern Datum bit_blob(VarBit* input); +double float8in_internal(char* str, char** endptr_p, bool* hasError, CoercionContext ccontext); #endif #endif \ No newline at end of file diff --git a/contrib/dolphin/plugin_utils/adt/float.cpp b/contrib/dolphin/plugin_utils/adt/float.cpp index e1a053c19..aedc6f1e5 100644 --- a/contrib/dolphin/plugin_utils/adt/float.cpp +++ b/contrib/dolphin/plugin_utils/adt/float.cpp @@ -51,6 +51,7 @@ static const uint32 nan[2] = {0xffffffff, 0x7fffffff}; /* not sure what the following should be, but better to make it over-sufficient */ #define MAXFLOATWIDTH 64 #define MAXDOUBLEWIDTH 128 +#define TENBASE 10 /* * check to see if a float4/8 val has underflowed or overflowed @@ -67,7 +68,11 @@ static const uint32 nan[2] = {0xffffffff, 0x7fffffff}; /* ========== USER I/O ROUTINES ========== */ static int float4_cmp_internal(float4 a, float4 b); +#ifdef DOLPHIN +double float8in_internal(char* str, char** s, bool* hasError, CoercionContext ccontext); +#else double float8in_internal(char* str, char** s, bool* hasError); +#endif #ifndef HAVE_CBRT /* @@ -97,8 +102,11 @@ static float8 get_log_result(float8 arg, bool is_log10); * place is less well standardized; pre-C99 systems tend not to have C99's * INFINITY and NAN macros. We centralize our workarounds for this here. */ - +#ifdef DOLPHIN +double float8in_internal(char* str, char** endptr_p, bool* hasError, CoercionContext ccontext) +#else double float8in_internal(char* str, char** endptr_p, bool* hasError) +#endif { double val; char* endptr = NULL; @@ -120,7 +128,14 @@ double float8in_internal(char* str, char** endptr_p, bool* hasError) } errno = 0; - val = strtod(str, &endptr); +#ifdef DOLPHIN + val = (double)strtoll(str, &endptr, TENBASE); + if (ccontext != COERCION_EXPLICIT && (*endptr != 'X' && *endptr != 'x')) { +#endif + val = strtod(str, &endptr); +#ifdef DOLPHIN + } +#endif if (endptr == str || errno != 0) { int save_errno = errno; diff --git a/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp b/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp index d5bdaac25..d81e9983c 100644 --- a/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp +++ b/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp @@ -5169,32 +5169,36 @@ PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_ui1); extern "C" DLL_PUBLIC Datum varlena_cast_ui1(PG_FUNCTION_ARGS); Datum varlena_cast_ui1(PG_FUNCTION_ARGS) { + fcinfo->ccontext = COERCION_EXPLICIT; Datum val = Varlena2Float8(fcinfo); - return DirectFunctionCall1(f8_cast_ui1, val); + return DirectFunctionCall1Coll(f8_cast_ui1, InvalidOid, val, fcinfo->can_ignore); } PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_ui2); extern "C" DLL_PUBLIC Datum varlena_cast_ui2(PG_FUNCTION_ARGS); Datum varlena_cast_ui2(PG_FUNCTION_ARGS) { + fcinfo->ccontext = COERCION_EXPLICIT; Datum val = Varlena2Float8(fcinfo); - return DirectFunctionCall1(f8_cast_ui2, val); + return DirectFunctionCall1Coll(f8_cast_ui2, InvalidOid, val, fcinfo->can_ignore); } PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_ui4); extern "C" DLL_PUBLIC Datum varlena_cast_ui4(PG_FUNCTION_ARGS); Datum varlena_cast_ui4(PG_FUNCTION_ARGS) { + fcinfo->ccontext = COERCION_EXPLICIT; Datum val = Varlena2Float8(fcinfo); - return DirectFunctionCall1(f8_cast_ui4, val); + return DirectFunctionCall1Coll(f8_cast_ui4, InvalidOid, val, fcinfo->can_ignore); } PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_ui8); extern "C" DLL_PUBLIC Datum varlena_cast_ui8(PG_FUNCTION_ARGS); Datum varlena_cast_ui8(PG_FUNCTION_ARGS) { + fcinfo->ccontext = COERCION_EXPLICIT; Datum val = Varlena2Float8(fcinfo); - return DirectFunctionCall1(f8_cast_ui8, val); + return DirectFunctionCall1Coll(f8_cast_ui8, InvalidOid, val, fcinfo->can_ignore); } PG_FUNCTION_INFO_V1_PUBLIC(dolphin_float4not); diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 1332f45ac..25fc2cfb3 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10741,7 +10741,7 @@ Datum blob_any_value(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(vlena); } -static char* AnyElementGetCString(Oid anyOid, Datum anyDatum) +static char* AnyElementGetCString(Oid anyOid, Datum anyDatum, bool* hasError = nullptr) { if (!OidIsValid(anyOid)) { return DatumGetCString(DirectFunctionCall1(textout, anyDatum)); @@ -10752,6 +10752,11 @@ static char* AnyElementGetCString(Oid anyOid, Datum anyDatum) getTypeOutputInfo(anyOid, &typeOutput, &typIsVarlena); if (typIsVarlena) { data = DatumGetCString(DirectFunctionCall1(textout, anyDatum)); + int reallen = VARSIZE_ANY_EXHDR(DatumGetByteaPP(anyDatum)); + int datalen = strlen(data); + if (hasError && datalen < reallen) { + *hasError = true; + } } else { data = DatumGetCString(OidOutputFunctionCall(typeOutput, anyDatum)); } @@ -10761,15 +10766,20 @@ static char* AnyElementGetCString(Oid anyOid, Datum anyDatum) Datum Varlena2Float8(PG_FUNCTION_ARGS) { char* data = NULL; - data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0)); + bool hasLenError = false; + data = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0), &hasLenError); + bool hasError = false; char* endptr = NULL; - - double result = float8in_internal(data, &endptr, &hasError); + double result = float8in_internal(data, &endptr, &hasError, fcinfo->ccontext); if (hasError || (endptr != NULL && *endptr != '\0')) { ereport((!fcinfo->can_ignore && SQL_MODE_STRICT()) ? ERROR : WARNING, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type double precision: \"%s\"", data))); + } else if (hasLenError) { + ereport((!fcinfo->can_ignore && SQL_MODE_STRICT()) ? ERROR : WARNING, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("Data truncated for input data: \"%s\"", data))); } pfree_ext(data); PG_RETURN_FLOAT8(result); diff --git a/contrib/dolphin/sql/uint_cast3.sql b/contrib/dolphin/sql/uint_cast3.sql index 81e8a82b6..829011356 100644 --- a/contrib/dolphin/sql/uint_cast3.sql +++ b/contrib/dolphin/sql/uint_cast3.sql @@ -103,6 +103,79 @@ select '65536'::text::uint2; select '4294967296'::text::uint4; select '18446744073709551616'::text::uint8; +-- test: binary to unsigned +--- é…ç½®å‚æ•° +set dolphin.b_compatibility_mode=on; +set b_format_behavior_compat_options=enable_set_variables; +set bytea_output=escape; +--- 建表 +drop table if exists t_binary0001 cascade; +create table t_binary0001( +c1 int not null, +c2 binary, +c3 binary(10), +c4 binary(255), +c5 varbinary(1), +c6 varbinary(10), +c7 varbinary(255)) charset utf8mb3; +--- æ’å…¥æ•°æ® +set @v1='abcdefghijklmnopqrstuvwxyz'; +set @v2='a熊猫竹竹爱åƒç«¹å­'; +set @v3=hex(@v2); +insert into t_binary0001 values ( +1, substr(@v1,1,1), substr(@v1,1,10), repeat(@v1, 9), +substr(@v1,1,1), substr(@v1,1,10), repeat(@v1, 9)); +insert into t_binary0001 values ( +2, substr(@v2,1,1), substr(@v2,1,4), repeat(@v2, 9), +substr(@v2,1,1), substr(@v2,1,4), repeat(@v2, 9)); +insert into t_binary0001 values ( +2, substr(@v3,1,1), substr(@v3,1,4), substr(repeat(@v3, 10),1,255), +substr(@v3,1,1), substr(@v3,1,4), substr(repeat(@v3, 10),1,255)); +--- 使用cast/convertå‡½æ•°è½¬æ¢ +select c1, cast(c2 as unsigned), cast(c3 as unsigned), cast(c4 as unsigned), +cast(c5 as unsigned) from t_binary0001 order by 1,2,3,4,5; +select c1, convert(c2, unsigned), convert(c3, unsigned), convert(c4, unsigned), +convert(c5, unsigned), convert(c6, unsigned), convert(c7, unsigned) from +t_binary0001 order by 1,2,3,4,5,6,7; +--- 测试uint1/uint2/uint4/uint8,预期表现和unsigned一致 +select c1, cast(c2 as uint1), cast(c3 as uint1), cast(c4 as uint1), +cast(c5 as uint1) from t_binary0001 order by 1,2,3,4,5; +select c1, convert(c2, uint1), convert(c3, uint1), convert(c4, uint1), +convert(c5, uint1), convert(c6, uint1), convert(c7, uint1) from +t_binary0001 order by 1,2,3,4,5,6,7; +select c1, cast(c2 as uint2), cast(c3 as uint2), cast(c4 as uint2), +cast(c5 as uint2) from t_binary0001 order by 1,2,3,4,5; +select c1, convert(c2, uint2), convert(c3, uint2), convert(c4, uint2), +convert(c5, uint2), convert(c6, uint2), convert(c7, uint2) from +t_binary0001 order by 1,2,3,4,5,6,7; +select c1, cast(c2 as uint4), cast(c3 as uint4), cast(c4 as uint4), +cast(c5 as uint4) from t_binary0001 order by 1,2,3,4,5; +select c1, convert(c2, uint4), convert(c3, uint4), convert(c4, uint4), +convert(c5, uint4), convert(c6, uint4), convert(c7, uint4) from +t_binary0001 order by 1,2,3,4,5,6,7; +select c1, cast(c2 as uint8), cast(c3 as uint8), cast(c4 as uint8), +cast(c5 as uint8) from t_binary0001 order by 1,2,3,4,5; +select c1, convert(c2, uint8), convert(c3, uint8), convert(c4, uint8), +convert(c5, uint8), convert(c6, uint8), convert(c7, uint8) from +t_binary0001 order by 1,2,3,4,5,6,7; + +--- 建表并æ’å…¥æ•°æ® +create table t_binary(a binary(255)); +create table t_unsigned(a int8 unsigned); +insert into t_binary values('2e1'); +insert into t_binary values('1ab'); +insert into t_binary values('0x1ab'); +insert into t_binary values('123'); +--- 测试转æ¢ç»“æžœ +select cast(a as unsigned) from t_binary; +insert ignore into t_unsigned select a from t_binary; +select * from t_unsigned; + +--- æ¸…ç† +drop table t_unsigned cascade; +drop table t_binary cascade; +drop table t_binary0001 cascade; + SET dolphin.sql_mode = ''; select cast('-0' as unsigned); create table t_uint(a uint1, b uint2, c uint4, d uint8); -- Gitee From a6004b35d96d5d8a15ccfa93680f1aece22d49b3 Mon Sep 17 00:00:00 2001 From: wangpingyun <2418191738@qq.com> Date: Mon, 22 Jan 2024 15:52:02 +0800 Subject: [PATCH 205/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dfloor=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E4=B8=8D=E6=94=AF=E6=8C=81year=E5=92=8Cjson=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/math_func.out | 62 +++++++++++++++++++ .../rollback_script/dolphin--3.0--2.0.sql | 3 + .../dolphin/sql/builtin_funcs/math_func.sql | 39 ++++++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 10 +++ 4 files changed, 114 insertions(+) diff --git a/contrib/dolphin/expected/builtin_funcs/math_func.out b/contrib/dolphin/expected/builtin_funcs/math_func.out index 263abdf16..5ea937cbc 100644 --- a/contrib/dolphin/expected/builtin_funcs/math_func.out +++ b/contrib/dolphin/expected/builtin_funcs/math_func.out @@ -342,6 +342,68 @@ CONTEXT: referenced column: ln 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0 | 0.0000000000000000 | 0.0000000000000000 | 1.9459101490553133 | 0.0000000000000000 | 16.822687342557736 | 12.160290452875046 | 12.160292261823946 | 30.638197909965809 | 30.638197909965832 | 30.638197909965809 | 30.638197909965832 | 7.61233683716775 | 0.2070141693843261 | 0.2070141693843261 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.2070141693843261 | 0.0000000000000000 | 1.6094379124341004 | (1 row) +-- floor math function +select +floor(`int1`), +floor(`uint1`), +floor(`int2`), +floor(`uint2`), +floor(`int4`), +floor(`uint4`), +floor(`int8`), +floor(`uint8`), +floor(`float4`), +floor(`float8`), +floor(`numeric`), +floor(`bit1`), +floor(`bit64`), +floor(`boolean`), +floor(`date`), +floor(`time`), +floor(`time(4)`), +floor(`datetime`), +floor(`datetime(4)`), +floor(`timestamp`), +floor(`timestamp(4)`), +floor(`year`), +floor(`char`), +floor(`varchar`), +floor(`binary`), +floor(`varbinary`), +floor(`tinyblob`), +floor(`blob`), +floor(`mediumblob`), +floor(`longblob`), +floor(`text`), +floor(`enum_t`), +floor(`set_t`), +floor(`json`) +from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: floor +WARNING: invalid input syntax for type double precision: "{"a": 1, "b": 2}" +CONTEXT: referenced column: floor + floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor | floor +-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+----------+--------+--------+----------------+----------------+----------------+----------------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+-------+------- + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 7 | 1 | 20230205 | 191050 | 191050 | 20230205191050 | 20230205191050 | 20230205191050 | 20230205191050 | 2023 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 5 | 0 +(1 row) + select exp(709); exp ----------------------- diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 86f231359..ae1bc077a 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -747,3 +747,6 @@ DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_le(arg1 longblob, arg2 me DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_gt(arg1 longblob, arg2 mediumblob); DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_ge(arg1 longblob, arg2 mediumblob); DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_cmp(longblob, longblob); + +DROP FUNCTION IF EXISTS pg_catalog.floor(year); +DROP FUNCTION IF EXISTS pg_catalog.floor(json); \ No newline at end of file diff --git a/contrib/dolphin/sql/builtin_funcs/math_func.sql b/contrib/dolphin/sql/builtin_funcs/math_func.sql index 03ef27d62..847ca0fcf 100644 --- a/contrib/dolphin/sql/builtin_funcs/math_func.sql +++ b/contrib/dolphin/sql/builtin_funcs/math_func.sql @@ -127,6 +127,45 @@ ln(`enum_t`), ln(`set_t`), ln(`json`) from test_type_table; + +-- floor math function +select +floor(`int1`), +floor(`uint1`), +floor(`int2`), +floor(`uint2`), +floor(`int4`), +floor(`uint4`), +floor(`int8`), +floor(`uint8`), +floor(`float4`), +floor(`float8`), +floor(`numeric`), +floor(`bit1`), +floor(`bit64`), +floor(`boolean`), +floor(`date`), +floor(`time`), +floor(`time(4)`), +floor(`datetime`), +floor(`datetime(4)`), +floor(`timestamp`), +floor(`timestamp(4)`), +floor(`year`), +floor(`char`), +floor(`varchar`), +floor(`binary`), +floor(`varbinary`), +floor(`tinyblob`), +floor(`blob`), +floor(`mediumblob`), +floor(`longblob`), +floor(`text`), +floor(`enum_t`), +floor(`set_t`), +floor(`json`) +from test_type_table; + select exp(709); select exp(710); select exp(-1000); diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 84db35fa9..2186dd586 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1098,3 +1098,13 @@ CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(boolean, TEXT) RETURNS TEXT LA CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(longblob, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(anyenum, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(json, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; + +DROP FUNCTION IF EXISTS pg_catalog.floor(year); +CREATE OR REPLACE FUNCTION pg_catalog.floor(year) +RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as +'select pg_catalog.floor(cast($1 as double precision))'; + +DROP FUNCTION IF EXISTS pg_catalog.floor(json); +CREATE OR REPLACE FUNCTION pg_catalog.floor(json) +RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as +'select pg_catalog.floor(cast($1 as double precision))'; \ No newline at end of file -- Gitee From 5ebc56822b6e25950ea14c15c1b39a5c5e52715d Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Wed, 10 Jan 2024 16:38:08 +0800 Subject: [PATCH 206/434] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E5=87=BD=E6=95=B0?= =?UTF-8?q?=EF=BC=9Ahour(YEAR)=E3=80=81minute(YEAR)=E3=80=81second(YEAR)?= =?UTF-8?q?=E3=80=81year(YEAR)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dolphin/expected/time_function_test.out | 47 +++++++++++ contrib/dolphin/include/plugin_utils/date.h | 7 +- contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_utils/adt/date.cpp | 81 ++++++++++--------- .../rollback_script/dolphin--3.0--2.0.sql | 7 +- contrib/dolphin/sql/time_function_test.sql | 12 +++ .../upgrade_script/dolphin--2.0--3.0.sql | 7 +- 7 files changed, 121 insertions(+), 42 deletions(-) create mode 100644 contrib/dolphin/expected/time_function_test.out create mode 100644 contrib/dolphin/sql/time_function_test.sql diff --git a/contrib/dolphin/expected/time_function_test.out b/contrib/dolphin/expected/time_function_test.out new file mode 100644 index 000000000..9b40f2544 --- /dev/null +++ b/contrib/dolphin/expected/time_function_test.out @@ -0,0 +1,47 @@ +create schema time_function_test; +set search_path to time_function_test; + +create table test_year(year year); +insert into test_year values (1901), (2000), (2023); +select SECOND(year) from test_year; + second +-------- + 1 + 0 + 23 +(3 rows) + +select MINUTE(year) from test_year; + minute +-------- + 19 + 20 + 20 +(3 rows) + +select HOUR(year) from test_year; + hour +------ + 0 + 0 + 0 +(3 rows) + +select YEAR(year) from test_year; +WARNING: invalid input syntax for type timestamp: "00:19:01" +CONTEXT: referenced column: year +WARNING: invalid input syntax for type timestamp: "00:20:00" +CONTEXT: referenced column: year +WARNING: invalid input syntax for type timestamp: "00:20:23" +CONTEXT: referenced column: year + year +------ + + + +(3 rows) + + +reset search_path; +drop schema time_function_test cascade ; +NOTICE: drop cascades to table time_function_test.test_year diff --git a/contrib/dolphin/include/plugin_utils/date.h b/contrib/dolphin/include/plugin_utils/date.h index 17188b067..bfbbb73fc 100644 --- a/contrib/dolphin/include/plugin_utils/date.h +++ b/contrib/dolphin/include/plugin_utils/date.h @@ -63,6 +63,11 @@ #define MAX_MICRO_SECOND 1000000 #define TIME_MAX_NANO_SECOND 99999999 #define TIME_NANO_SECOND_TO_MICRO_SECOND_RADIX 1000 +struct LongLongTm { + pg_tm result_tm; /* yyyy-mm-dd hh:MM:ss timestamp */ + fsec_t fsec; /* ms of result_tm */ + int timeSign; /* timeSign */ +}; #endif /* for b compatibility type*/ @@ -101,7 +106,7 @@ extern bool time_add_nanoseconds_with_round(char* input_str, pg_tm *tm, long lon extern long long align_to_nanoseconds(long long src); extern bool check_time_mmssff_range(pg_tm *tm, long long microseconds); extern bool longlong_to_tm(long long nr, TimeADT* time, pg_tm* result_tm, fsec_t* fsec, int32* timeSign); -bool check_time_min_value(char* input_str, long long nr, bool can_ignore); +extern bool longlong_to_tm(long long nr, TimeADT* time, LongLongTm* tm); /* same as longlong_to_tm */ typedef struct DateTimeFormat { diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index 0ed92900e..cb067134c 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -21,7 +21,7 @@ test: test_procedure_ddl_import_and_export test_function_ddl_import_and_export t # must be single test group, cause other connection will affect the result test: test_shows -test: test_shows_1 test_shows_2 +test: test_shows_1 test_shows_2 time_function_test # must be single test group, cause other connection will affect the result test: test_shows_3 diff --git a/contrib/dolphin/plugin_utils/adt/date.cpp b/contrib/dolphin/plugin_utils/adt/date.cpp index ff7a36451..e0b0276ba 100644 --- a/contrib/dolphin/plugin_utils/adt/date.cpp +++ b/contrib/dolphin/plugin_utils/adt/date.cpp @@ -5576,6 +5576,32 @@ Datum time_to_sec(PG_FUNCTION_ARGS) PG_RETURN_INT32(result); } +/** + * covert unmber to time + * @param input_str: use for print + * @param nr: convert time input + * @param can_ignore: Indicates whether to report an error when an error occurs. + * @param time: timeStamp destination + * @param tm: long long tm destination + * @return: succss or not + */ +bool number_to_time(char* input_str, long long nr, bool can_ignore, TimeADT* time, LongLongTm* tm) +{ + int errlevel = (SQL_MODE_STRICT() && !can_ignore) ? ERROR : WARNING; + if (nr < -TIME_MAX_VALUE) { + ereport(errlevel, (errmsg("Truncated incorrect time value: \"%s\"", input_str))); + return false; + } + return longlong_to_tm(nr, time, tm); +} + +bool number_to_time(long long nr, bool can_ignore, TimeADT* time, LongLongTm* tm) +{ + char time_str[MAX_LONGLONG_TO_CHAR_LENGTH] = {0}; + errno_t errorno = sprintf_s(time_str, sizeof(time_str), "%lld", nr); + securec_check_ss(errorno, "\0", "\0"); + return number_to_time(time_str, nr, can_ignore, time, tm); +} /* int64_time_to_sec * @param time, type is int @@ -5583,32 +5609,20 @@ Datum time_to_sec(PG_FUNCTION_ARGS) */ Datum int64_time_to_sec(PG_FUNCTION_ARGS) { - TimeADT time; int64 input_time = PG_GETARG_INT64(0); - pg_tm result_tt = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - pg_tm* result_tm = &result_tt; - fsec_t fsec; - int32 timeSign = 1; + TimeADT time; int32 result; - - errno_t errorno = EOK; - char time_str[MAX_LONGLONG_TO_CHAR_LENGTH] = {0}; - errorno = sprintf_s(time_str, sizeof(time_str), "%lld", input_time); - securec_check_ss(errorno, "\0", "\0"); - - if (!check_time_min_value(time_str, input_time, fcinfo->can_ignore)) { - PG_RETURN_NULL(); - } - if (!longlong_to_tm(input_time, &time, result_tm, &fsec, &timeSign)) { + LongLongTm longlongTm = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 1}; + + if (!number_to_time(input_time, fcinfo->can_ignore, &time, &longlongTm)) { PG_RETURN_NULL(); } - + pg_tm *result_tm = &longlongTm.result_tm; result = ((result_tm->tm_hour * MINS_PER_HOUR + result_tm->tm_min) * SECS_PER_MINUTE) + result_tm->tm_sec; - result *= timeSign; + result *= longlongTm.timeSign; PG_RETURN_INT32(result); } - /* numeric_time_to_sec * @param time, type is numeric * @return seconds of the given time @@ -5617,43 +5631,34 @@ Datum numeric_time_to_sec(PG_FUNCTION_ARGS) { Numeric num1 = PG_GETARG_NUMERIC(0); lldiv_t div1; - struct pg_tm tt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - pg_tm* result_tm = &tt1; int32 result; TimeADT time; - fsec_t fsec; - int32 timeSign = 1; - + LongLongTm longlongTm = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 1}; Numeric_to_lldiv(num1, &div1); char* input_str = DatumGetCString(numeric_out_with_zero(fcinfo)); - if (!check_time_min_value(input_str, div1.quot, fcinfo->can_ignore)) { - PG_RETURN_NULL(); - } - if (!longlong_to_tm(div1.quot, &time, result_tm, &fsec, &timeSign)) { + if (!number_to_time(input_str, div1.quot, fcinfo->can_ignore, &time, &longlongTm)) { PG_RETURN_NULL(); } div1.rem = div1.rem < 0 ? -div1.rem : div1.rem; - time_add_nanoseconds_with_round(input_str, result_tm, div1.rem, &fsec, fcinfo->can_ignore); + pg_tm* result_tm = &longlongTm.result_tm; + time_add_nanoseconds_with_round(input_str, result_tm, div1.rem, &longlongTm.fsec, fcinfo->can_ignore); result = ((result_tm->tm_hour * MINS_PER_HOUR + result_tm->tm_min) * SECS_PER_MINUTE) + result_tm->tm_sec; - result *= timeSign; + result *= longlongTm.timeSign; PG_RETURN_INT32(result); } - -bool check_time_min_value(char* input_str, long long nr, bool can_ignore) +/** + * same as longlong_to_tm + */ +bool longlong_to_tm(long long nr, TimeADT* time, LongLongTm* tm) { - int errlevel = (SQL_MODE_STRICT() && !can_ignore) ? ERROR : WARNING; - if (nr < -TIME_MAX_VALUE) { - ereport(errlevel, (errmsg("Truncated incorrect time value: \"%s\"", input_str))); - return false; - } - return true; + return longlong_to_tm(nr, time, &tm->result_tm, &tm->fsec, &tm->timeSign); } - + bool longlong_to_tm(long long nr, TimeADT* time, pg_tm* result_tm, fsec_t* fsec, int32* timeSign) { errno_t errorno = EOK; diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index ae1bc077a..77494e6ce 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -749,4 +749,9 @@ DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_ge(arg1 longblob, arg2 me DROP FUNCTION IF EXISTS pg_catalog.longblob_mediumblob_cmp(longblob, longblob); DROP FUNCTION IF EXISTS pg_catalog.floor(year); -DROP FUNCTION IF EXISTS pg_catalog.floor(json); \ No newline at end of file +DROP FUNCTION IF EXISTS pg_catalog.floor(json); + +DROP FUNCTION IF EXISTS pg_catalog.hour(YEAR); +DROP FUNCTION IF EXISTS pg_catalog.minute(YEAR); +DROP FUNCTION IF EXISTS pg_catalog.second(YEAR); +DROP FUNCTION IF EXISTS pg_catalog.year(YEAR); \ No newline at end of file diff --git a/contrib/dolphin/sql/time_function_test.sql b/contrib/dolphin/sql/time_function_test.sql new file mode 100644 index 000000000..f3f525ede --- /dev/null +++ b/contrib/dolphin/sql/time_function_test.sql @@ -0,0 +1,12 @@ +create schema time_function_test; +set search_path to time_function_test; + +create table test_year(year year); +insert into test_year values (1901), (2000), (2023); +select SECOND(year) from test_year; +select MINUTE(year) from test_year; +select HOUR(year) from test_year; +select YEAR(year) from test_year; + +reset search_path; +drop schema time_function_test cascade ; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index 2186dd586..a43e4282c 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1107,4 +1107,9 @@ RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as DROP FUNCTION IF EXISTS pg_catalog.floor(json); CREATE OR REPLACE FUNCTION pg_catalog.floor(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as -'select pg_catalog.floor(cast($1 as double precision))'; \ No newline at end of file +'select pg_catalog.floor(cast($1 as double precision))'; + +CREATE OR REPLACE FUNCTION pg_catalog.hour (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT hour($1::time)'; +CREATE OR REPLACE FUNCTION pg_catalog.minute (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT minute($1::time)'; +CREATE OR REPLACE FUNCTION pg_catalog.second (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT second($1::time)'; +CREATE OR REPLACE FUNCTION pg_catalog.year (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT year($1::time)'; \ No newline at end of file -- Gitee From c8c572f834dd88802aa55d0d548be9515ba582c4 Mon Sep 17 00:00:00 2001 From: wuyuechuan Date: Fri, 19 Jan 2024 16:13:32 +0800 Subject: [PATCH 207/434] compatibility: repeat function --- .../dolphin/expected/builtin_funcs/repeat.out | 144 ++++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_postgres.cpp | 35 ++++- contrib/dolphin/plugin_utils/adt/a_compat.cpp | 80 ++++++++++ contrib/dolphin/plugin_utils/adt/varlena.cpp | 55 ------- .../rollback_script/dolphin--3.0--2.0.sql | 11 +- contrib/dolphin/sql/builtin_funcs/repeat.sql | 97 ++++++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 11 +- 8 files changed, 369 insertions(+), 66 deletions(-) create mode 100644 contrib/dolphin/expected/builtin_funcs/repeat.out create mode 100644 contrib/dolphin/sql/builtin_funcs/repeat.sql diff --git a/contrib/dolphin/expected/builtin_funcs/repeat.out b/contrib/dolphin/expected/builtin_funcs/repeat.out new file mode 100644 index 000000000..8986be63f --- /dev/null +++ b/contrib/dolphin/expected/builtin_funcs/repeat.out @@ -0,0 +1,144 @@ +create schema test_builtin_funcs; +set current_schema to 'test_builtin_funcs'; +set dolphin.sql_mode to ''; -- remove pad_char_to_full_length +set dolphin.b_compatibility_mode = on; +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); +NOTICE: CREATE TABLE will create implicit set "test_type_table_set_t_set" for column "test_type_table.set_t" +insert into test_type_table +values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, '2023-02-05', '19:10:50', '19:10:50.3456', + '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', + '2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', 'a', 'a,c', + json_object('a', 1, 'b', 2)); +set bytea_output TO escape; +\x +select repeat(`int1`, 10), + repeat(`uint1`, 10), + repeat(`int2`, 10), + repeat(`uint2`, 10), + repeat(`int4`, 10), + repeat(`uint4`, 10), + repeat(`int8`, 10), + repeat(`uint8`, 10), + repeat(`float4`, 10), + repeat(`float8`, 10), + repeat(`numeric`, 10), + repeat(`bit1`, 10), + repeat(`bit64`, 10), + repeat(`boolean`, 10), + repeat(`date`, 10), + repeat(`time`, 10), + repeat(`time(4)`, 10), + repeat(`datetime`, 10), + repeat(`datetime(4)`, 10), + repeat(`timestamp`, 10), + repeat(`timestamp(4)`, 10), + repeat(`year`, 10), + repeat(`char`, 10), + repeat(`varchar`, 10), + repeat(`binary`, 10), + repeat(`varbinary`, 10), + repeat(`tinyblob`, 10), + repeat(`blob`, 10), + repeat(`mediumblob`, 10), + repeat(`longblob`, 10), + repeat(`text`, 10), + repeat(`enum_t`, 10), + repeat(`set_t`, 10), + repeat(`json`, 10) +from test_type_table; +-[ RECORD 1 ]------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1111111111 +repeat | 1.0000001.0000001.0000001.0000001.0000001.0000001.0000001.0000001.0000001.000000 +repeat | \001\001\001\001\001\001\001\001\001\001 +repeat | \000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007\000\000\000\000\000\000\000\007 +repeat | 1111111111 +repeat | 2023-02-052023-02-052023-02-052023-02-052023-02-052023-02-052023-02-052023-02-052023-02-052023-02-05 +repeat | 19:10:5019:10:5019:10:5019:10:5019:10:5019:10:5019:10:5019:10:5019:10:5019:10:50 +repeat | 19:10:50.345619:10:50.345619:10:50.345619:10:50.345619:10:50.345619:10:50.345619:10:50.345619:10:50.345619:10:50.345619:10:50.3456 +repeat | 2023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:502023-02-05 19:10:50 +repeat | 2023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.4562023-02-05 19:10:50.456 +repeat | 2023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-082023-02-05 19:10:50-08 +repeat | 2023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-082023-02-05 19:10:50.456-08 +repeat | 2023202320232023202320232023202320232023 +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\0001.23a\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000 +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | 1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a1.23a +repeat | aaaaaaaaaa +repeat | a,ca,ca,ca,ca,ca,ca,ca,ca,ca,c +repeat | {"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2}{"a": 1, "b": 2} + +\x +-- bit test +create table bit_test(bit1 bit(1), bit5 bit(5), bit8 bit(8), bit15 bit(15), bit16 bit(16)); +insert into bit_test values(0,0,0,0,0); +insert into bit_test values(1,1,1,1,1); +insert into bit_test values(1,0x7,0xF,0xF5,0xF6); +select repeat(bit1,5),repeat(bit5,5),repeat(bit8,5),repeat(bit15,5),repeat(bit16,5) from bit_test; + repeat | repeat | repeat | repeat | repeat +----------------------+----------------------+----------------------+------------------------------------------+------------------------------------------ + \000\000\000\000\000 | \000\000\000\000\000 | \000\000\000\000\000 | \000\000\000\000\000\000\000\000\000\000 | \000\000\000\000\000\000\000\000\000\000 + \001\001\001\001\001 | \001\001\001\001\001 | \001\001\001\001\001 | \000\001\000\001\000\001\000\001\000\001 | \000\001\000\001\000\001\000\001\000\001 + \001\001\001\001\001 | \007\007\007\007\007 | \017\017\017\017\017 | \000\365\000\365\000\365\000\365\000\365 | \000\366\000\366\000\366\000\366\000\366 +(3 rows) + +drop schema test_builtin_funcs cascade; +NOTICE: drop cascades to 4 other objects +--?.* +drop cascades to type test_type_table_set_t_set +drop cascades to table test_type_table +drop cascades to table bit_test +reset bytea_output; +reset dolphin.sql_mode; +reset dolphin.b_compatibility_mode; +reset current_schema; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index cb067134c..765429fef 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -78,7 +78,7 @@ test: network2 use_dbname show_create view_definer_test insert_set show_create_d test: partition_test1 partition_test2 partition_test3 partition_test4 partition_maxvalue_test -test: builtin_funcs/b_compatibility_time_funcs builtin_funcs/b_compatibility_time_funcs2 builtin_funcs/b_compatibility_time_funcs3 +test: builtin_funcs/b_compatibility_time_funcs builtin_funcs/b_compatibility_time_funcs2 builtin_funcs/b_compatibility_time_funcs3 builtin_funcs/repeat # case sensitive test, do not insert test case */ test: case_sensitive_test/create_env diff --git a/contrib/dolphin/plugin_postgres.cpp b/contrib/dolphin/plugin_postgres.cpp index e291c5268..265a4ad14 100644 --- a/contrib/dolphin/plugin_postgres.cpp +++ b/contrib/dolphin/plugin_postgres.cpp @@ -508,11 +508,14 @@ static void SpiMultiSelectException() */ static bool CheckSqlMode(char** newval, void** extra, GucSource source) { - char* rawstring = NULL; - List* elemlist = NULL; - ListCell* cell = NULL; + char *rawstring = NULL; + List *elemlist = NULL; + ListCell *cell = NULL; int start = 0; + if (strlen(*newval) == 0) { + return true; + } /* Need a modifiable copy of string */ rawstring = pstrdup(*newval); /* Parse string into list of identifiers */ @@ -524,19 +527,18 @@ static bool CheckSqlMode(char** newval, void** extra, GucSource source) return false; } - + bool sql_mode_assign[OPT_SQL_MODE_MAX] = {0}; foreach (cell, elemlist) { - const char* item = (const char*)lfirst(cell); - bool nfound = true; + const char *item = (const char *)lfirst(cell); for (start = 0; start < OPT_SQL_MODE_MAX; start++) { if (strcmp(item, sql_mode_options[start].name) == 0) { - nfound = false; + sql_mode_assign[start] = true; break; } } - if (nfound) { + if (start == OPT_SQL_MODE_MAX) { GUC_check_errdetail("invalid sql_mode option \"%s\"", item); pfree(rawstring); list_free(elemlist); @@ -547,6 +549,23 @@ static bool CheckSqlMode(char** newval, void** extra, GucSource source) pfree(rawstring); list_free(elemlist); + /* rewrite dolphin sql_mode */ + StringInfo stringInfo = makeStringInfo(); + bool firstEnter = false; + for (start = 0; start < OPT_SQL_MODE_MAX; start++) { + if (sql_mode_assign[start]) { + if (firstEnter) { + appendStringInfoString(stringInfo, ","); + } + firstEnter = true; + appendStringInfoString(stringInfo, sql_mode_options[start].name); + } + } + /* Obviously, the length of stringInfo must be less than or equal to that of newval. */ + int ret = strcpy_s(*newval, strlen(*newval) + 1, stringInfo->data); + securec_check(ret, "\0", "\0"); + DestroyStringInfo(stringInfo); + return true; } diff --git a/contrib/dolphin/plugin_utils/adt/a_compat.cpp b/contrib/dolphin/plugin_utils/adt/a_compat.cpp index cabba66bc..56e7f01b6 100644 --- a/contrib/dolphin/plugin_utils/adt/a_compat.cpp +++ b/contrib/dolphin/plugin_utils/adt/a_compat.cpp @@ -20,6 +20,7 @@ #include "plugin_mb/pg_wchar.h" #include "plugin_parser/parser.h" #include "plugin_postgres.h" +#include "utils/varbit.h" #include "miscadmin.h" static text* dotrim(const char* string, int stringlen, const char* set, int setlen, bool doltrim, bool dortrim); @@ -1091,3 +1092,82 @@ Datum repeat(PG_FUNCTION_ARGS) } else PG_RETURN_TEXT_P(result); } + +PG_FUNCTION_INFO_V1_PUBLIC(repeat_binary); +extern "C" DLL_PUBLIC Datum repeat_binary(PG_FUNCTION_ARGS); +Datum repeat_binary(PG_FUNCTION_ARGS) +{ + bytea *vlena = PG_GETARG_BYTEA_PP(0); + int32 count = PG_GETARG_INT32(1); + int32 slen = VARSIZE_ANY_EXHDR(vlena); + FUNC_CHECK_HUGE_POINTER(false, vlena, "repeat()"); + int tlen; + /* out of rage check */ + if (unlikely(pg_mul_s32_overflow(count, slen, &tlen)) || unlikely(pg_add_s32_overflow(tlen, VARHDRSZ, &tlen))) { + ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("requested length too large"))); + } + + char *source = VARDATA_ANY(vlena); + bytea *result = (bytea *)palloc(tlen); + /* destination size */ + int32 resultSize = tlen - VARHDRSZ; + SET_VARSIZE(result, resultSize + VARHDRSZ); + char *tmp = VARDATA(result); + int leftLength = resultSize; + while (count--) { + errno_t rc = memcpy_s(tmp, leftLength, source, slen); + securec_check(rc, "\0", "\0"); + tmp += slen; + leftLength -= slen; + } + PG_RETURN_BYTEA_P(result); +} + +PG_FUNCTION_INFO_V1_PUBLIC(repeat_bit); +extern "C" DLL_PUBLIC Datum repeat_bit(PG_FUNCTION_ARGS); + +Datum repeat_bit(PG_FUNCTION_ARGS) +{ + VarBit *s = PG_GETARG_VARBIT_P(0); + char *bitsChar = (char *)VARBITS(s); + int len = VARBITLEN(s); + + /* + * When the length of varbit is not a multiple of 8, varbit uses the first several digits of bits8 as the bit flag. + * For example, if the length of var is 13, the actual number of digits is as follows: + * | 11111111 | 11111000 | + * When we convert the varbit value to bytea, we need to shift all the varbits right. + * The length of the shift right operation is BITS_PER_BYTE - length % BITS_PER_BYTE + * | 00001111 | 11111111 | + * Other examples: + * Example 1: length: 18. Shift move: 6 + * | 10101011 | 00110111 | 01000000 | converts to: + * | 00000010 | 10101100 | 11011101 | + * Example 2: length 24(=8*3): do nothing + * | 10000000 | 10000000 | 10000000 | converts to: + * | 10000000 | 10000000 | 10000000 | + * We only need to convert the last varbit whose length is not 8. + * */ + int moveCount = BITS_PER_BYTE - (len % BITS_PER_BYTE); + if (moveCount != BITS_PER_BYTE) { + /* make a copy of varbit */ + VarBit *varBitCopy = PG_GETARG_VARBIT_P_COPY(0); + bits8 *sp = VARBITS(varBitCopy); + bitsChar = (char *)sp; + + int varBitSize = len / BITS_PER_BYTE + 1; + while (moveCount-- != 0) { + bits8 currentRight = 0x00; + bits8 lastRight = 0x00; +#define LOWER_MASK 0x01 + for (int i = 0; i < varBitSize; i++) { + currentRight = sp[i] & LOWER_MASK; + sp[i] >>= 1; + sp[i] |= (lastRight << 7); + lastRight = currentRight; + } + } + } + Datum bit_binary = CStringGetByteaDatum(bitsChar, VARBITBYTES(s)); + return DirectFunctionCall2(repeat_binary, bit_binary, PG_GETARG_INT32(1)); +} diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index adf9738c9..f72aa9313 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10967,61 +10967,6 @@ Datum binary_length(PG_FUNCTION_ARGS) PG_RETURN_INT32(VARSIZE_ANY_EXHDR(vlena)); } -static Datum normal_dolphin_binaryout(PG_FUNCTION_ARGS) -{ - bytea* vlena = PG_GETARG_BYTEA_PP(0); - char* result = NULL; - char* rp = NULL; - char* vp = NULL; - int len; - int i; - - len = 1; - vp = VARDATA_ANY(vlena); - for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++) { - if (*vp == '\0') { - len += 2; - } else { - len++; - } - } - rp = result = (char*)palloc(len); - vp = VARDATA_ANY(vlena); - for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++) { - if (*vp == '\0') { - *rp++ = '\\'; - *rp++ = '0'; - } else { - *rp++ = *vp; - } - } - *rp = '\0'; - - /* free memory if allocated by the toaster */ - PG_FREE_IF_COPY(vlena, 0); - - PG_RETURN_CSTRING(result); -} - -static Datum hex_dolphin_binaryout(PG_FUNCTION_ARGS) -{ - bytea* vlena = PG_GETARG_BYTEA_PP(0); - char* result = NULL; - char* rp = NULL; - - /* Print hex format */ - rp = result = (char*)palloc(VARSIZE_ANY_EXHDR(vlena) * 2 + 2 + 1); - *rp++ = '\\'; - *rp++ = 'x'; - rp += hex_encode(VARDATA_ANY(vlena), VARSIZE_ANY_EXHDR(vlena), rp); - *rp = '\0'; - - /* free memory if allocated by the toaster */ - PG_FREE_IF_COPY(vlena, 0); - - PG_RETURN_CSTRING(result); -} - Datum dolphin_binaryout(PG_FUNCTION_ARGS) { return byteaout(fcinfo); diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index 77494e6ce..936d0c68c 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -754,4 +754,13 @@ DROP FUNCTION IF EXISTS pg_catalog.floor(json); DROP FUNCTION IF EXISTS pg_catalog.hour(YEAR); DROP FUNCTION IF EXISTS pg_catalog.minute(YEAR); DROP FUNCTION IF EXISTS pg_catalog.second(YEAR); -DROP FUNCTION IF EXISTS pg_catalog.year(YEAR); \ No newline at end of file +DROP FUNCTION IF EXISTS pg_catalog.year(YEAR); + +-- repeat function support +DROP FUNCTION IF EXISTS pg_catalog.repeat(anyenum, integer); +DROP FUNCTION IF EXISTS pg_catalog.repeat(boolean, integer); +DROP FUNCTION IF EXISTS pg_catalog.repeat(tinyblob, integer); +DROP FUNCTION IF EXISTS pg_catalog.repeat(json, integer); +DROP FUNCTION IF EXISTS pg_catalog.repeat(year, integer); +DROP FUNCTION IF EXISTS pg_catalog.repeat(binary, integer); +DROP FUNCTION IF EXISTS pg_catalog.repeat(bit, integer); \ No newline at end of file diff --git a/contrib/dolphin/sql/builtin_funcs/repeat.sql b/contrib/dolphin/sql/builtin_funcs/repeat.sql new file mode 100644 index 000000000..02e05864e --- /dev/null +++ b/contrib/dolphin/sql/builtin_funcs/repeat.sql @@ -0,0 +1,97 @@ +create schema test_builtin_funcs; +set current_schema to 'test_builtin_funcs'; +set dolphin.sql_mode to ''; -- remove pad_char_to_full_length +set dolphin.b_compatibility_mode = on; + +create table test_type_table +( + `int1` tinyint, + `uint1` tinyint unsigned, + `int2` smallint, + `uint2` smallint unsigned, + `int4` integer, + `uint4` integer unsigned, + `int8` bigint, + `uint8` bigint unsigned, + `float4` float4, + `float8` float8, + `numeric` decimal(20, 6), + `bit1` bit(1), + `bit64` bit(64), + `boolean` boolean, + `date` date, + `time` time, + `time(4)` time(4), + `datetime` datetime, + `datetime(4)` datetime(4) default '2022-11-11 11:11:11', + `timestamp` timestamp, + `timestamp(4)` timestamp(4) default '2022-11-11 11:11:11', + `year` year, + `char` char(100), + `varchar` varchar(100), + `binary` binary(100), + `varbinary` varbinary(100), + `tinyblob` tinyblob, + `blob` blob, + `mediumblob` mediumblob, + `longblob` longblob, + `text` text, + `enum_t` enum('a', 'b', 'c'), + `set_t` set('a', 'b', 'c'), + `json` json +); + +insert into test_type_table +values (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'1', b'111', true, '2023-02-05', '19:10:50', '19:10:50.3456', + '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', '2023-02-05 19:10:50', '2023-02-05 19:10:50.456', + '2023', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', '1.23a', 'a', 'a,c', + json_object('a', 1, 'b', 2)); +set bytea_output TO escape; +\x +select repeat(`int1`, 10), + repeat(`uint1`, 10), + repeat(`int2`, 10), + repeat(`uint2`, 10), + repeat(`int4`, 10), + repeat(`uint4`, 10), + repeat(`int8`, 10), + repeat(`uint8`, 10), + repeat(`float4`, 10), + repeat(`float8`, 10), + repeat(`numeric`, 10), + repeat(`bit1`, 10), + repeat(`bit64`, 10), + repeat(`boolean`, 10), + repeat(`date`, 10), + repeat(`time`, 10), + repeat(`time(4)`, 10), + repeat(`datetime`, 10), + repeat(`datetime(4)`, 10), + repeat(`timestamp`, 10), + repeat(`timestamp(4)`, 10), + repeat(`year`, 10), + repeat(`char`, 10), + repeat(`varchar`, 10), + repeat(`binary`, 10), + repeat(`varbinary`, 10), + repeat(`tinyblob`, 10), + repeat(`blob`, 10), + repeat(`mediumblob`, 10), + repeat(`longblob`, 10), + repeat(`text`, 10), + repeat(`enum_t`, 10), + repeat(`set_t`, 10), + repeat(`json`, 10) +from test_type_table; +\x +-- bit test +create table bit_test(bit1 bit(1), bit5 bit(5), bit8 bit(8), bit15 bit(15), bit16 bit(16)); +insert into bit_test values(0,0,0,0,0); +insert into bit_test values(1,1,1,1,1); +insert into bit_test values(1,0x7,0xF,0xF5,0xF6); +select repeat(bit1,5),repeat(bit5,5),repeat(bit8,5),repeat(bit15,5),repeat(bit16,5) from bit_test; +drop schema test_builtin_funcs cascade; +reset bytea_output; +reset dolphin.sql_mode; +reset dolphin.b_compatibility_mode; +reset current_schema; \ No newline at end of file diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index a43e4282c..460d70f9a 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1112,4 +1112,13 @@ RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as CREATE OR REPLACE FUNCTION pg_catalog.hour (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT hour($1::time)'; CREATE OR REPLACE FUNCTION pg_catalog.minute (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT minute($1::time)'; CREATE OR REPLACE FUNCTION pg_catalog.second (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT second($1::time)'; -CREATE OR REPLACE FUNCTION pg_catalog.year (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT year($1::time)'; \ No newline at end of file +CREATE OR REPLACE FUNCTION pg_catalog.year (year) RETURNS float8 LANGUAGE SQL STABLE STRICT as 'SELECT year($1::time)'; +-- repeat function support +CREATE OR REPLACE FUNCTION pg_catalog.repeat(anyenum, integer) RETURNS text LANGUAGE SQL STRICT IMMUTABLE AS 'select repeat($1::text, $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.repeat(boolean, integer) RETURNS text LANGUAGE SQL STRICT IMMUTABLE AS 'select repeat($1::text, $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.repeat(tinyblob, integer) RETURNS text LANGUAGE SQL STRICT IMMUTABLE AS 'select repeat($1::text, $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.repeat(json, integer) RETURNS text LANGUAGE SQL STRICT IMMUTABLE AS 'select repeat($1::text, $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.repeat(year, integer) RETURNS text LANGUAGE SQL STRICT IMMUTABLE AS 'select repeat($1::text, $2)'; +CREATE OR REPLACE FUNCTION pg_catalog.repeat(binary, integer) RETURNS bytea LANGUAGE C STRICT IMMUTABLE AS '$libdir/dolphin', 'repeat_binary'; +CREATE OR REPLACE FUNCTION pg_catalog.repeat(bit, integer) RETURNS bytea LANGUAGE C STRICT IMMUTABLE AS '$libdir/dolphin', 'repeat_bit'; + -- Gitee From a1d23cc050652187cf2394ba5d6e478988092bf1 Mon Sep 17 00:00:00 2001 From: chenbd Date: Tue, 23 Jan 2024 19:15:44 +0800 Subject: [PATCH 208/434] fix prepare stmt to replace --- contrib/dolphin/Makefile | 2 +- contrib/dolphin/checklist/checklist.md | 1 + .../dolphin/expected/test_mysql_prepare.out | 39 + .../plugin_optimizer/commands/Makefile | 2 +- .../plugin_optimizer/commands/prepare.cpp | 2387 +++++++++++++++++ contrib/dolphin/sql/test_mysql_prepare.sql | 9 + 6 files changed, 2438 insertions(+), 2 deletions(-) create mode 100644 contrib/dolphin/plugin_optimizer/commands/prepare.cpp diff --git a/contrib/dolphin/Makefile b/contrib/dolphin/Makefile index e55dfdd31..f066fd40e 100644 --- a/contrib/dolphin/Makefile +++ b/contrib/dolphin/Makefile @@ -68,7 +68,7 @@ OBJS += $(prep)/prepunion.o OBJS += $(optimizer_util)/plancat.o $(optimizer_util)/relnode.o $(optimizer_util)/clauses.o -OBJS += $(commands)/functioncmds.o $(commands)/foreigncmds.o $(commands)/copy.o $(commands)/schemacmds.o $(commands)/typecmds.o $(commands)/user.o $(commands)/alter.o +OBJS += $(commands)/functioncmds.o $(commands)/foreigncmds.o $(commands)/copy.o $(commands)/schemacmds.o $(commands)/typecmds.o $(commands)/user.o $(commands)/alter.o $(commands)/prepare.o OBJS += $(pl)/pl_gram.o $(pl)/pl_scanner.o $(pl)/pl_comp.o $(pl)/pl_handler.o diff --git a/contrib/dolphin/checklist/checklist.md b/contrib/dolphin/checklist/checklist.md index b75588dc3..7a79168b5 100644 --- a/contrib/dolphin/checklist/checklist.md +++ b/contrib/dolphin/checklist/checklist.md @@ -114,6 +114,7 @@ |plugin_optimizer\commands|typecmds.cpp |src\gausskernel\optimizer\commands\typecmds.cpp | |plugin_optimizer\commands|user.cpp |src\gausskernel\optimizer\commands\user.cpp | |plugin_optimizer\commands|alter.cpp |src\gausskernel\optimizer\commands\alter.cpp | +|plugin_optimizer\commands|prepare.cpp |src\gausskernel\optimizer\commands\prepare.cpp | |plugin_optimizer\plan|pgxcplan_single.cpp |src\gausskernel\optimizer\plan\pgxcplan_single.cpp | |plugin_optimizer\plan|planner.cpp |src\gausskernel\optimizer\plan\planner.cpp | |plugin_optimizer\plan|streamwalker.cpp |src\gausskernel\optimizer\plan\streamwalker.cpp | diff --git a/contrib/dolphin/expected/test_mysql_prepare.out b/contrib/dolphin/expected/test_mysql_prepare.out index dfd73d026..eae02f4d6 100644 --- a/contrib/dolphin/expected/test_mysql_prepare.out +++ b/contrib/dolphin/expected/test_mysql_prepare.out @@ -953,6 +953,45 @@ execute s2 using 1; deallocate s0; deallocate s1; deallocate s2; +--prepare replace +prepare s0 as select * from t1_xc_fqs t1 left join t2_xc_fqs t2 on t1.id1=t2.id1 and t2.id1=? order by t1.id1; +execute s0 using 1; + id1 | id2 | num | id1 | id2 | num +-----+-----+-----+-----+-----+----- + 1 | 1 | 11 | 1 | 2 | 12 + 2 | 2 | 21 | | | + 3 | 3 | 31 | | | + 4 | 4 | 41 | | | + 5 | 5 | 51 | | | +(5 rows) + +prepare s0 as 'select * from t1_xc_fqs t1 right join t2_xc_fqs t2 on t1.id1=t2.id1 and t2.id1=? order by t2.id1'; +execute s0 using @a; + id1 | id2 | num | id1 | id2 | num +-----+-----+-----+-----+-----+----- + 1 | 1 | 11 | 1 | 2 | 12 + | | | 2 | 3 | 22 + | | | 3 | 4 | 32 + | | | 4 | 5 | 42 + | | | 5 | 6 | 52 +(5 rows) + +prepare s0 as select * from t1_xc_fqs t1 full join t2_xc_fqs t2 on t1.id1=t2.id1 and t2.id1=? order by t1.id1, t2.id1; +execute s0 using 1; + id1 | id2 | num | id1 | id2 | num +-----+-----+-----+-----+-----+----- + | | | 2 | 3 | 22 + | | | 3 | 4 | 32 + | | | 4 | 5 | 42 + | | | 5 | 6 | 52 + 1 | 1 | 11 | 1 | 2 | 12 + 2 | 2 | 21 | | | + 3 | 3 | 31 | | | + 4 | 4 | 41 | | | + 5 | 5 | 51 | | | +(9 rows) + +deallocate s0; reset dolphin.b_compatibility_mode; reset enable_set_variable_b_format; drop schema test_mysql_prepare cascade; diff --git a/contrib/dolphin/plugin_optimizer/commands/Makefile b/contrib/dolphin/plugin_optimizer/commands/Makefile index 04c87f736..f6453af4d 100755 --- a/contrib/dolphin/plugin_optimizer/commands/Makefile +++ b/contrib/dolphin/plugin_optimizer/commands/Makefile @@ -19,7 +19,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif -OBJS = functioncmds.o foreigncmds.o copy.o schemacmds.o typecmds.o user.o alter.o +OBJS = functioncmds.o foreigncmds.o copy.o schemacmds.o typecmds.o user.o alter.o prepare.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/contrib/dolphin/plugin_optimizer/commands/prepare.cpp b/contrib/dolphin/plugin_optimizer/commands/prepare.cpp new file mode 100644 index 000000000..ebc882f1e --- /dev/null +++ b/contrib/dolphin/plugin_optimizer/commands/prepare.cpp @@ -0,0 +1,2387 @@ +/* ------------------------------------------------------------------------- + * + * prepare.cpp + * Prepareable SQL statements via PREPARE, EXECUTE and DEALLOCATE + * + * This module also implements storage of prepared statements that are + * accessed via the extended FE/BE query protocol. + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Copyright (c) 2002-2012, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/gausskernel/optimizer/commands/prepare.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" +#include "access/printtup.h" +#include "access/xact.h" +#include "catalog/pg_type.h" +#include "catalog/pg_proc.h" +#include "commands/createas.h" +#include "commands/prepare.h" +#include "executor/lightProxy.h" +#include "miscadmin.h" +#include "nodes/nodeFuncs.h" +#include "opfusion/opfusion.h" +#include "optimizer/bucketpruning.h" +#include "parser/analyze.h" +#include "parser/parse_coerce.h" +#include "parser/parse_collate.h" +#include "parser/parse_expr.h" +#include "parser/parse_type.h" +#include "pgstat.h" +#include "rewrite/rewriteHandler.h" +#include "tcop/pquery.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/dynahash.h" +#include "utils/globalplancache.h" +#include "utils/globalplancore.h" +#include "utils/snapmgr.h" +#include "utils/timestamp.h" +#ifdef PGXC +#include "pgxc/pgxc.h" +#include "nodes/nodes.h" +#include "pgxc/nodemgr.h" +#include "pgxc/execRemote.h" +#include "catalog/pgxc_node.h" +#endif +#include "replication/walreceiver.h" +#include "optimizer/gplanmgr.h" +#ifdef ENABLE_MOT +#include "storage/mot/jit_exec.h" +#endif + +#define CLUSTER_EXPANSION_BASE 2 + +void InitQueryHashTable(void); +static ParamListInfo EvaluateParams(CachedPlanSource* psrc, List* params, const char* queryString, EState* estate); +static Datum build_regtype_array(const Oid* param_types, int num_params); + +extern void destroy_handles(); + +static void CopyPlanForGPCIfNecessary(CachedPlanSource* psrc, Portal portal) +{ + MemoryContext tmpCxt = NULL; + bool needCopy = ENABLE_GPC && psrc->gplan; + if (needCopy) { + portal->stmts = CopyLocalStmt(portal->cplan->stmt_list, u_sess->temp_mem_cxt, &tmpCxt); + } +} + +#ifdef ENABLE_MOT +void TryMotJitCodegenQuery(const char* queryString, CachedPlanSource* psrc, Query* query) +{ + // Try to generate LLVM jitted code - first cleanup jit of previous run. + if (psrc->mot_jit_context != NULL) { + if (JitExec::IsJitContextPendingCompile(psrc->mot_jit_context) || + JitExec::IsJitContextDoneCompile(psrc->mot_jit_context)) { + return; + } + + // NOTE: context is cleaned up during end of session, this should not happen, + // maybe a warning should be issued + Assert(false); + ereport(WARNING, (errmsg("Cached Plan Source already has a MOT JIT Context, destroying the residual context"))); + JitExec::DestroyJitContext(psrc->mot_jit_context, true); + psrc->mot_jit_context = NULL; + Assert(psrc->opFusionObj == NULL); + } + + if (query == NULL) { + if (list_length(psrc->query_list) != 1) { + elog(DEBUG2, "Plan source does not have exactly one query"); + return; + } + query = (Query*)linitial(psrc->query_list); + if (query == NULL) { + elog(DEBUG2, "No query object present for MOT JIT"); + return; + } + } + + if ((query->commandType != CMD_SELECT) && (query->commandType != CMD_INSERT) && + (query->commandType != CMD_UPDATE) && (query->commandType != CMD_DELETE)) { + elog(DEBUG2, "Query is not SELECT|INSERT|UPDATE|DELETE"); + return; + } + + if (JitExec::IsMotCodegenPrintEnabled()) { + elog(LOG, "Attempting to generate MOT jitted code for query: %s\n", queryString); + } + + Assert(psrc->opFusionObj == NULL && psrc->mot_jit_context == NULL); + u_sess->mot_cxt.jit_codegen_error = 0; + psrc->mot_jit_context = JitExec::TryJitCodegenQuery(query, queryString); + if (psrc->mot_jit_context != NULL) { + if (JitExec::IsJitContextValid(psrc->mot_jit_context)) { + psrc->is_checked_opfusion = false; + } + } else { + if (JitExec::IsMotCodegenPrintEnabled()) { + elog(LOG, "Failed to generate jitted MOT function for query %s\n", queryString); + } + if (u_sess->mot_cxt.jit_codegen_error == ERRCODE_QUERY_CANCELED) { + // If JIT compilation failed due to cancel request, we need to ereport. JIT source will be in error state, + // but checkedMotJitCodegen will still be false so that the JIT compilation will be triggered on next + // attempt. + Assert(!psrc->checkedMotJitCodegen); + ereport(ERROR, (errcode(ERRCODE_QUERY_CANCELED), errmsg("canceling statement due to user request"))); + } + } +} +#endif + +/* + * Implements the 'PREPARE' utility statement. + */ +void PrepareQuery(PrepareStmt* stmt, const char* queryString) +{ + CachedPlanSource* plansource = NULL; + Oid* argtypes = NULL; + int nargs; + Query* query = NULL; + List* query_list = NIL; + int i; + + /* + * Disallow empty-string statement name (conflicts with protocol-level + * unnamed statement). + */ + if (!stmt->name || stmt->name[0] == '\0') + ereport(ERROR, + (errcode(ERRCODE_INVALID_PSTATEMENT_DEFINITION), errmsg("invalid statement name: must not be empty"))); + + /* + * Create the CachedPlanSource before we do parse analysis, since it needs + * to see the unmodified raw parse tree. + */ + plansource = CreateCachedPlan(stmt->query, + queryString, +#ifdef PGXC + stmt->name, +#endif + CreateCommandTag(stmt->query)); + t_thrd.postgres_cxt.cur_command_tag = transform_node_tag(stmt->query); + + /* Transform list of TypeNames to array of type OIDs */ + nargs = list_length(stmt->argtypes); + + if (nargs) { + ParseState* pstate = NULL; + ListCell* l = NULL; + + /* + * typenameTypeId wants a ParseState to carry the source query string. + * Is it worth refactoring its API to avoid this? + */ + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + + argtypes = (Oid*)palloc(nargs * sizeof(Oid)); + i = 0; + + foreach (l, stmt->argtypes) { + TypeName* tn = (TypeName*)lfirst(l); + Oid toid = typenameTypeId(pstate, tn); + + argtypes[i++] = toid; + } + } + + /* + * Analyze the statement using these parameter types (any parameters + * passed in from above us will not be visible to it), allowing + * information about unknown parameters to be deduced from context. + */ + + query = parse_analyze_varparams(stmt->query, queryString, &argtypes, &nargs); + +#ifdef ENABLE_MOT + /* check cross engine queries */ + StorageEngineType storageEngineType = SE_TYPE_UNSPECIFIED; + CheckTablesStorageEngine(query, &storageEngineType); + SetCurrentTransactionStorageEngine(storageEngineType); + /* set the plan's storage engine */ + plansource->storageEngineType = storageEngineType; + + /* gpc does not support MOT engine */ + if (ENABLE_CN_GPC && plansource->gpc.status.IsSharePlan() && + (storageEngineType == SE_TYPE_MOT || storageEngineType == SE_TYPE_MIXED)) { + plansource->gpc.status.SetKind(GPC_UNSHARED); + } +#endif + + if (ENABLE_CN_GPC && plansource->gpc.status.IsSharePlan() && contains_temp_tables(query->rtable)) { + /* temp table unsupport shared */ + plansource->gpc.status.SetKind(GPC_UNSHARED); + } + + /* + * Check that all parameter types were determined. + */ + for (i = 0; i < nargs; i++) { + Oid argtype = argtypes[i]; + + if (argtype == InvalidOid || argtype == UNKNOWNOID) + ereport(ERROR, + (errcode(ERRCODE_INDETERMINATE_DATATYPE), + errmsg("could not determine data type of parameter $%d", i + 1))); + } + + /* + * grammar only allows OptimizableStmt, so this check should be redundant + */ + switch (query->commandType) { + case CMD_SELECT: + case CMD_INSERT: + case CMD_UPDATE: + case CMD_DELETE: + case CMD_MERGE: + /* OK */ + break; + case CMD_UTILITY: + if (IsA(query->utilityStmt, VariableMultiSetStmt) || + IsA(query->utilityStmt, CopyStmt)) { + break; + } + default: + ereport(ERROR, + (errcode(ERRCODE_INVALID_PSTATEMENT_DEFINITION), errmsg("utility statements cannot be prepared"))); + break; + } + + /* Rewrite the query. The result could be 0, 1, or many queries. */ + query_list = QueryRewrite(query); + + /* Finish filling in the CachedPlanSource */ + CompleteCachedPlan(plansource, + query_list, + NULL, + argtypes, + NULL, + nargs, + NULL, + NULL, + 0, /* default cursor options */ + true, /* fixed result */ + stmt->name); + + /* + * Save the results. + */ + StorePreparedStatement(stmt->name, plansource, true); + +#ifdef ENABLE_MOT + // Try MOT JIT code generation only after the plan source is saved. + if ((plansource->storageEngineType == SE_TYPE_MOT || plansource->storageEngineType == SE_TYPE_UNSPECIFIED) && + !IS_PGXC_COORDINATOR && JitExec::IsMotCodegenEnabled()) { + // MOT JIT code generation + TryMotJitCodegenQuery(queryString, plansource, query); + } +#endif +} + +/* + * ExecuteQuery --- implement the 'EXECUTE' utility statement. + * + * This code also supports CREATE TABLE ... AS EXECUTE. That case is + * indicated by passing a non-null intoClause. The DestReceiver is already + * set up correctly for CREATE TABLE AS, but we still have to make a few + * other adjustments here. + * + * Note: this is one of very few places in the code that needs to deal with + * two query strings at once. The passed-in queryString is that of the + * EXECUTE, which we might need for error reporting while processing the + * parameter expressions. The query_string that we copy from the plan + * source is that of the original PREPARE. + */ +void ExecuteQuery(ExecuteStmt* stmt, IntoClause* intoClause, const char* queryString, ParamListInfo params, + DestReceiver* dest, char* completionTag) +{ + PreparedStatement *entry = NULL; + CachedPlan* cplan = NULL; + List* plan_list = NIL; + ParamListInfo paramLI = NULL; + EState* estate = NULL; + Portal portal; + char* query_string = NULL; + int eflags; + long count; + CachedPlanSource* psrc = NULL; + + /* Look it up in the hash table */ + entry = FetchPreparedStatement(stmt->name, true, true); + psrc = entry->plansource; + t_thrd.postgres_cxt.cur_command_tag = transform_node_tag(psrc->raw_parse_tree); + + /* Shouldn't find a non-fixed-result cached plan */ + if (!entry->plansource->fixed_result) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("EXECUTE does not support variable-result cached plans"))); + + /* Evaluate parameters, if any */ + if (entry->plansource->num_params > 0) { + /* + * Need an EState to evaluate parameters; must not delete it till end + * of query, in case parameters are pass-by-reference. Note that the + * passed-in "params" could possibly be referenced in the parameter + * expressions. + */ + estate = CreateExecutorState(); + estate->es_param_list_info = params; + paramLI = EvaluateParams(psrc, stmt->params, queryString, estate); + } + + OpFusion::clearForCplan((OpFusion*)psrc->opFusionObj, psrc); + +#ifdef ENABLE_MOT + /* + * MOT JIT Execution: + * Assist in distinguishing query boundaries in case of range query when client uses batches. This allows us to + * know a new query started, and in case a previous execution did not fetch all records (since user is working in + * batch-mode, and can decide to quit fetching in the middle), using this information we can infer this is a new + * scan, and old scan state should be discarded. + */ + if (psrc->mot_jit_context != NULL) { + JitResetScan(psrc->mot_jit_context); + } +#endif + + if (psrc->opFusionObj != NULL) { + Assert(psrc->cplan == NULL); + (void)RevalidateCachedQuery(psrc); + } + + if (psrc->opFusionObj != NULL) { + OpFusion *opFusionObj = (OpFusion *)(psrc->opFusionObj); + if (opFusionObj->IsGlobal()) { + opFusionObj = (OpFusion *)OpFusion::FusionFactory(opFusionObj->m_global->m_type, + u_sess->cache_mem_cxt, psrc, NULL, paramLI); + Assert(opFusionObj != NULL); + } + opFusionObj->setPreparedDestReceiver(dest); + opFusionObj->useOuterParameter(paramLI); + opFusionObj->setCurrentOpFusionObj(opFusionObj); + + CachedPlanSource* cps = opFusionObj->m_global->m_psrc; + bool needBucketId = cps != NULL && cps->gplan; + if (needBucketId) { + setCachedPlanBucketId(cps->gplan, paramLI); + } + + if (OpFusion::process(FUSION_EXECUTE, NULL, completionTag, false, NULL)) { + return; + } + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Bypass process Failed"))); + } + + /* Create a new portal to run the query in */ + portal = CreateNewPortal(); + /* Don't display the portal in pg_cursors, it is for internal use only */ + portal->visible = false; + + /* Copy the plan's saved query string into the portal's memory */ + query_string = MemoryContextStrdup(PortalGetHeapMemory(portal), entry->plansource->query_string); + + if (!intoClause) { + psrc->cursor_options |= CURSOR_OPT_SPQ_OK; + } + + /* Replan if needed, and increment plan refcount for portal */ + if (ENABLE_CACHEDPLAN_MGR) { + cplan = GetWiseCachedPlan(psrc, paramLI, false); + } else { + cplan = GetCachedPlan(psrc, paramLI, false); + } + + plan_list = cplan->stmt_list; + + /* + * Now we can define the portal. + * + * DO NOT put any code that could possibly throw an error between the + * above GetCachedPlan call and here. + */ + PortalDefineQuery(portal, NULL, query_string, entry->plansource->commandTag, plan_list, cplan); + portal->nextval_default_expr_type = psrc->nextval_default_expr_type; + + /* incase change shared plan in execute stage */ + CopyPlanForGPCIfNecessary(entry->plansource, portal); + + /* + * For CREATE TABLE ... AS EXECUTE, we must verify that the prepared + * statement is one that produces tuples. Currently we insist that it be + * a plain old SELECT. In future we might consider supporting other + * things such as INSERT ... RETURNING, but there are a couple of issues + * to be settled first, notably how WITH NO DATA should be handled in such + * a case (do we really want to suppress execution?) and how to pass down + * the OID-determining eflags (PortalStart won't handle them in such a + * case, and for that matter it's not clear the executor will either). + * + * For CREATE TABLE ... AS EXECUTE, we also have to ensure that the proper + * eflags and fetch count are passed to PortalStart/PortalRun. + */ + if (intoClause != NULL) { + PlannedStmt* pstmt = NULL; + + if (list_length(plan_list) != 1) + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("prepared statement is not a SELECT"))); + pstmt = (PlannedStmt*)linitial(plan_list); + if (!IsA(pstmt, PlannedStmt) || pstmt->commandType != CMD_SELECT || pstmt->utilityStmt != NULL) + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("prepared statement is not a SELECT"))); + + /* Set appropriate eflags */ + eflags = GetIntoRelEFlags(intoClause); + + /* And tell PortalRun whether to run to completion or not */ + if (intoClause->skipData) + count = 0; + else + count = FETCH_ALL; + } else { + /* Plain old EXECUTE */ + eflags = 0; + count = FETCH_ALL; + } + + if (OpFusion::IsSqlBypass(psrc, plan_list)) { + psrc->opFusionObj = + OpFusion::FusionFactory(OpFusion::getFusionType(cplan, paramLI, NULL), + u_sess->cache_mem_cxt, psrc, NULL, paramLI); + psrc->is_checked_opfusion = true; + if (psrc->opFusionObj != NULL) { + ((OpFusion*)psrc->opFusionObj)->setPreparedDestReceiver(dest); + ((OpFusion*)psrc->opFusionObj)->useOuterParameter(paramLI); + ((OpFusion*)psrc->opFusionObj)->setCurrentOpFusionObj((OpFusion*)psrc->opFusionObj); + + if (OpFusion::process(FUSION_EXECUTE, NULL, completionTag, false, NULL)) { + return; + } + Assert(0); + } + } + + /* + * Run the portal as appropriate. + */ + PortalStart(portal, paramLI, eflags, GetActiveSnapshot()); + + (void)PortalRun(portal, count, false, dest, dest, completionTag); + + PortalDrop(portal, false); + + if (estate != NULL) + FreeExecutorState(estate); + + /* No need to pfree other memory, MemoryContext will be reset */ +} + +/* + * EvaluateParams: evaluate a list of parameters. + * + * pstmt: statement we are getting parameters for. + * params: list of given parameter expressions (raw parser output!) + * queryString: source text for error messages. + * estate: executor state to use. + * + * Returns a filled-in ParamListInfo -- this can later be passed to + * CreateQueryDesc(), which allows the executor to make use of the parameters + * during query execution. + */ +static ParamListInfo EvaluateParams(CachedPlanSource* psrc, List* params, const char* queryString, EState* estate) +{ + Oid* param_types = psrc->param_types; + int num_params = psrc->num_params; + int nparams = list_length(params); + ParseState* pstate = NULL; + ParamListInfo paramLI; + List* exprstates = NIL; + ListCell* l = NULL; + Oid param_collation; + int param_charset; + int i; + + if (nparams != num_params) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("wrong number of parameters for prepared statement \"%s\"", psrc->stmt_name), + errdetail("Expected %d parameters but got %d.", num_params, nparams))); + + /* Quick exit if no parameters */ + if (num_params == 0) + return NULL; + + /* + * We have to run parse analysis for the expressions. Since the parser is + * not cool about scribbling on its input, copy first. + */ + params = (List*)copyObject(params); + + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + + param_collation = GetCollationConnection(); + param_charset = GetCharsetConnection(); + i = 0; + foreach (l, params) { + Node* expr = (Node*)lfirst(l); + Oid expected_type_id = param_types[i]; + Oid given_type_id; + + expr = transformExpr(pstate, expr, EXPR_KIND_EXECUTE_PARAMETER); + + /* Cannot contain subselects or aggregates */ + if (pstate->p_hasSubLinks) + ereport( + ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot use subquery in EXECUTE parameter"))); + if (pstate->p_hasAggs) + ereport( + ERROR, (errcode(ERRCODE_GROUPING_ERROR), errmsg("cannot use aggregate function in EXECUTE parameter"))); + if (pstate->p_hasWindowFuncs) + ereport( + ERROR, (errcode(ERRCODE_WINDOWING_ERROR), errmsg("cannot use window function in EXECUTE parameter"))); + + given_type_id = exprType(expr); + + expr = coerce_to_target_type( + pstate, expr, given_type_id, expected_type_id, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); + + if (expr == NULL) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("parameter $%d of type %s cannot be coerced to the expected type %s", + i + 1, + format_type_be(given_type_id), + format_type_be(expected_type_id)), + errhint("You will need to rewrite or cast the expression."))); + + /* Take care of collations in the finished expression. */ + assign_expr_collations(pstate, expr); + + /* Try convert expression to target parameter charset. */ + if (OidIsValid(param_collation) && IsSupportCharsetType(expected_type_id)) { + /* convert charset only, expression will be evaluated below */ + expr = coerce_to_target_charset(expr, param_charset, expected_type_id, -1, param_collation, false); + } + + lfirst(l) = expr; + i++; + } + + /* Prepare the expressions for execution */ + + paramLI = (ParamListInfo)palloc(offsetof(ParamListInfoData, params) + num_params * sizeof(ParamExternData)); + /* we have static list of params, so no hooks needed */ + paramLI->paramFetch = NULL; + paramLI->paramFetchArg = NULL; + paramLI->parserSetup = NULL; + paramLI->parserSetupArg = NULL; + paramLI->params_need_process = false; + paramLI->numParams = num_params; + paramLI->uParamInfo = DEFUALT_INFO; + paramLI->params_lazy_bind = false; + bool isInsertConst = IsA(psrc->raw_parse_tree, InsertStmt); + foreach (l, params) { + if (!IsA(lfirst(l), Const)) { + isInsertConst = false; + break; + } + } + i = 0; + if (isInsertConst) { + foreach (l, params) { + Const* e = (Const*)lfirst(l); + ParamExternData* prm = ¶mLI->params[i]; + + prm->ptype = param_types[i]; + prm->pflags = PARAM_FLAG_CONST; + prm->value = e->constvalue; + prm->isnull = e->constisnull; + prm->tabInfo = NULL; + i++; + } + } else { + exprstates = ExecPrepareExprList(params, estate); + foreach (l, exprstates) { + ExprState* n = (ExprState*)lfirst(l); + ParamExternData* prm = ¶mLI->params[i]; + + prm->ptype = param_types[i]; + prm->pflags = PARAM_FLAG_CONST; + prm->value = ExecEvalExprSwitchContext(n, GetPerTupleExprContext(estate), &prm->isnull); + prm->tabInfo = NULL; + + i++; + } + } + + + return paramLI; +} + +/* + * Initialize query hash table upon first use. + */ +void InitQueryHashTable(void) +{ + HASHCTL hash_ctl; + errno_t rc = 0; + + rc = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); + securec_check(rc, "\0", "\0"); + + hash_ctl.keysize = NAMEDATALEN; + hash_ctl.entrysize = sizeof(PreparedStatement); + hash_ctl.hcxt = u_sess->cache_mem_cxt; + + PG_TRY(); + { + (void)syscalllockAcquire(&u_sess->pcache_cxt.pstmt_htbl_lock); + u_sess->pcache_cxt.prepared_queries = hash_create("Prepared Queries", 32, &hash_ctl, HASH_ELEM | HASH_CONTEXT); + (void)syscalllockRelease(&u_sess->pcache_cxt.pstmt_htbl_lock); + } + PG_CATCH(); + { + (void)syscalllockRelease(&u_sess->pcache_cxt.pstmt_htbl_lock); + PG_RE_THROW(); + } + PG_END_TRY(); + +#ifdef PGXC + if (IS_PGXC_COORDINATOR) { + rc = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); + securec_check(rc, "\0", "\0"); + + hash_ctl.keysize = NAMEDATALEN; + hash_ctl.entrysize = sizeof(DatanodeStatement); + hash_ctl.hcxt = u_sess->cache_mem_cxt; + + u_sess->pcache_cxt.datanode_queries = hash_create("Datanode Queries", 64, &hash_ctl, HASH_ELEM | HASH_CONTEXT); + } +#endif + Assert(u_sess->pcache_cxt.prepared_queries); + + if (!ENABLE_THREAD_POOL) { + Assert(t_thrd.shemem_ptr_cxt.MyBEEntry->my_prepared_queries == NULL); + t_thrd.shemem_ptr_cxt.MyBEEntry->my_prepared_queries = u_sess->pcache_cxt.prepared_queries; + t_thrd.shemem_ptr_cxt.MyBEEntry->my_pstmt_htbl_lock = &u_sess->pcache_cxt.pstmt_htbl_lock; + } +} + +#ifdef DOLPHIN +static PreparedStatement* InsertIntoQueryHashTable(const char* stmt_name, CachedPlanSource* plansource, bool from_sql, bool* found) +#else +static void InsertIntoQueryHashTable(const char* stmt_name, CachedPlanSource* plansource, bool from_sql, bool* found) +#endif +{ + PreparedStatement* entry = NULL; + PG_TRY(); + { + (void)syscalllockAcquire(&u_sess->pcache_cxt.pstmt_htbl_lock); + entry = (PreparedStatement*)hash_search(u_sess->pcache_cxt.prepared_queries, stmt_name, HASH_ENTER, found); + (void)syscalllockRelease(&u_sess->pcache_cxt.pstmt_htbl_lock); + } + PG_CATCH(); + { + (void)syscalllockRelease(&u_sess->pcache_cxt.pstmt_htbl_lock); + PG_RE_THROW(); + } + PG_END_TRY(); + + if (!(*found)) { + entry->plansource = plansource; + entry->from_sql = from_sql; + entry->prepare_time = GetCurrentStatementStartTimestamp(); + entry->has_prepare_dn_stmt = false; + } + Assert(entry->plansource->magic == CACHEDPLANSOURCE_MAGIC); +#ifdef DOLPHIN + return entry; +#endif +} + +static void DropFromQueryHashTable(const char* stmt_name) +{ + PG_TRY(); + { + (void)syscalllockAcquire(&u_sess->pcache_cxt.pstmt_htbl_lock); + hash_search(u_sess->pcache_cxt.prepared_queries, stmt_name, HASH_REMOVE, NULL); + (void)syscalllockRelease(&u_sess->pcache_cxt.pstmt_htbl_lock); + } + PG_CATCH(); + { + (void)syscalllockRelease(&u_sess->pcache_cxt.pstmt_htbl_lock); + PG_RE_THROW(); + } + PG_END_TRY(); +} + +#ifdef PGXC + +/* + * Assign the statement name for all the RemoteQueries in the plan tree, so + * they use Datanode statements + */ +int SetRemoteStatementName(Plan* plan, const char* stmt_name, int num_params, Oid* param_types, int n, + bool isBuildingCustomPlan, bool is_plan_shared) +{ + /* If no plan simply return */ + if (plan == NULL) + return 0; + + /* Leave if no parameters */ + if (num_params == 0 || param_types == NULL) + return 0; + + if (IsA(plan, RemoteQuery)) { + RemoteQuery* remotequery = (RemoteQuery*)plan; + DatanodeStatement* entry = NULL; + bool exists = false; + char name[NAMEDATALEN]; + + /* Nothing to do if parameters are already set for this query */ + if (remotequery->rq_num_params != 0 && !is_plan_shared) + return 0; + + if (stmt_name != NULL) { + errno_t rc = strncpy_s(name, NAMEDATALEN, stmt_name, NAMEDATALEN - 1); + securec_check(rc, "\0", "\0"); + + name[NAMEDATALEN - 1] = '\0'; + + /* + * Append modifier. If resulting string is going to be truncated, + * truncate better the base string, otherwise we may enter endless + * loop + */ + if (n) { + char modifier[NAMEDATALEN]; + int ss_rc = -1; + ss_rc = sprintf_s(modifier, NAMEDATALEN, "__%d", n); + securec_check_ss(ss_rc, "\0", "\0"); + /* + * if position NAMEDATALEN - strlen(modifier) - 1 is beyond the + * base string this is effectively noop, otherwise it truncates + * the base string + */ + name[NAMEDATALEN - strlen(modifier) - 1] = '\0'; + ss_rc = -1; + ss_rc = strcat_s(name, NAMEDATALEN, modifier); + securec_check_ss(ss_rc, "\0", "\0"); + } + n++; + hash_search(u_sess->pcache_cxt.datanode_queries, name, HASH_FIND, &exists); + + /* If it already exists, that means this plan has just been revalidated. */ + if (!exists) { + entry = (DatanodeStatement*)hash_search(u_sess->pcache_cxt.datanode_queries, name, HASH_ENTER, NULL); + CN_GPC_LOG("entry datanodequery", 0, name); + entry->current_nodes_number = 0; + entry->dns_node_indices = (int*)MemoryContextAllocZero( + u_sess->pcache_cxt.datanode_queries->hcxt, u_sess->pgxc_cxt.NumDataNodes * sizeof(int)); + entry->max_nodes_number = u_sess->pgxc_cxt.NumDataNodes; + } + if (!is_plan_shared) { + remotequery->statement = pstrdup(name); + remotequery->stmt_idx = n - 1; + } +#ifdef USE_ASSERT_CHECKING + else { + /* check same msg */ + Assert (remotequery->stmt_idx == n - 1); + } +#endif + } else if (remotequery->statement) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Passing parameters in PREPARE statement is not supported"))); + if (!is_plan_shared) { + remotequery->rq_num_params = num_params; + remotequery->rq_param_types = param_types; + remotequery->isCustomPlan = isBuildingCustomPlan; + } +#ifdef USE_ASSERT_CHECKING + else { + /* check same param msg */ + Assert (remotequery->rq_num_params == num_params); + for (int i = 0; i < num_params; i++) { + Assert (remotequery->rq_param_types[i] == param_types[i]); + } + } +#endif + } else if (IsA(plan, ModifyTable)) { + ModifyTable* mt_plan = (ModifyTable*)plan; + /* For ModifyTable plan recurse into each of the plans underneath */ + ListCell* l = NULL; + foreach (l, mt_plan->plans) { + Plan* temp_plan = (Plan*)lfirst(l); + n = SetRemoteStatementName(temp_plan, stmt_name, num_params, param_types, n, + isBuildingCustomPlan, is_plan_shared); + } + } + + if (innerPlan(plan)) + n = SetRemoteStatementName(innerPlan(plan), stmt_name, num_params, param_types, n, + isBuildingCustomPlan, is_plan_shared); + + if (outerPlan(plan)) + n = SetRemoteStatementName(outerPlan(plan), stmt_name, num_params, param_types, n, + isBuildingCustomPlan, is_plan_shared); + + return n; +} + +DatanodeStatement* light_set_datanode_queries(const char* stmt_name) +{ + DatanodeStatement* entry = NULL; + + /* Initialize the hash table, if necessary */ + if (!u_sess->pcache_cxt.prepared_queries) + InitQueryHashTable(); + else { + Assert(u_sess->pcache_cxt.datanode_queries != NULL); + entry = (DatanodeStatement*)hash_search(u_sess->pcache_cxt.datanode_queries, stmt_name, HASH_FIND, NULL); + } + + /* if not exists, add it */ + if (entry == NULL) { + CN_GPC_LOG("entry lp datanodequery", 0, stmt_name); + entry = (DatanodeStatement*)hash_search(u_sess->pcache_cxt.datanode_queries, stmt_name, HASH_ENTER, NULL); + entry->current_nodes_number = 0; + entry->dns_node_indices = (int*)MemoryContextAllocZero( + u_sess->pcache_cxt.datanode_queries->hcxt, u_sess->pgxc_cxt.NumDataNodes * sizeof(int)); + entry->max_nodes_number = u_sess->pgxc_cxt.NumDataNodes; + } + + return entry; +} +#endif + +void StorePreparedStatementCNGPC(const char *stmt_name, CachedPlanSource *plansource, bool from_sql, bool is_share) +{ + TimestampTz cur_ts = GetCurrentStatementStartTimestamp(); + bool found = false; + + /* Initialize the hash table, if necessary */ + if (unlikely(!u_sess->pcache_cxt.prepared_queries)) + InitQueryHashTable(); + + /* Add entry to hash table */ +#ifdef DOLPHIN + PreparedStatement* entry = + InsertIntoQueryHashTable(stmt_name, plansource, from_sql, &found); +#else + InsertIntoQueryHashTable(stmt_name, plansource, from_sql, &found); +#endif + CN_GPC_LOG("entry preparedstatement", plansource, stmt_name); + + /* Shouldn't get a duplicate entry */ +#ifdef DOLPHIN + /* + * we only let sql: 'prepare xx from stmt' to be replaced by new stmt in dolphin + * here is_share = true then from_sql must be false ,so we do not check again. + * means when found a same prepare, and it is not from sql that we need erreport below. + */ + if (from_sql && found && entry != NULL) { + entry->plansource = plansource; + entry->from_sql = from_sql; + entry->prepare_time = GetCurrentStatementStartTimestamp(); + entry->has_prepare_dn_stmt = false; + } else +#endif + if (found) { + if (is_share) { + Assert(plansource->gpc.status.InShareTable()); + CN_GPC_LOG("duplicate prepared statement, sub refcount", plansource, 0); + plansource->gpc.status.SubRefCount(); + } + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_PSTATEMENT), errmsg("prepared statement \"%s\" already exists", stmt_name))); + } + + /* Now it's safe to move the CachedPlanSource to permanent memory */ + if (!is_share) { + Assert((plansource->raw_parse_tree && IsA(plansource->raw_parse_tree, TransactionStmt)) || + !plansource->is_support_gplan || plansource->gpc.status.IsSharePlan()); + plansource->gpc.status.SetLoc(GPC_SHARE_IN_LOCAL_SAVE_PLAN_LIST); + SaveCachedPlan(plansource); + } +} + +/* + * Store all the data pertaining to a query in the hash table using + * the specified key. The passed CachedPlanSource should be "unsaved" + * in case we get an error here; we'll save it once we've created the hash + * table entry. + */ +void StorePreparedStatement(const char* stmt_name, CachedPlanSource* plansource, bool from_sql) +{ + if (ENABLE_DN_GPC) { + if (unlikely(plansource->gpc.status.InShareTable())) + elog(PANIC, "should get shared plan in gpc when StorePreparedStatement"); + /* dn gpc don't save prepare statement on dn */ + u_sess->pcache_cxt.cur_stmt_psrc = plansource; + plansource->gpc.status.SetLoc(GPC_SHARE_IN_LOCAL_SAVE_PLAN_LIST); + SaveCachedPlan(plansource); + return; + } + if (ENABLE_CN_GPC) { + StorePreparedStatementCNGPC(stmt_name, plansource, from_sql, false); + return; + } + PreparedStatement* entry = NULL; + TimestampTz cur_ts = GetCurrentStatementStartTimestamp(); + bool found = false; + + /* Initialize the hash table, if necessary */ + if (unlikely(!u_sess->pcache_cxt.prepared_queries)) + InitQueryHashTable(); + + /* Add entry to hash table */ +#ifdef DOLPHIN + entry =InsertIntoQueryHashTable(stmt_name, plansource, from_sql, &found); +#else + InsertIntoQueryHashTable(stmt_name, plansource, from_sql, &found); +#endif + + /* Shouldn't get a duplicate entry */ +#ifdef DOLPHIN + /* we only let sql: 'prepare xx from stmt' to be replaced by new stmt in dolphin. + * means when found a same prepare, and it is not from sql that we need erreport below. + */ + if (from_sql && found && entry != NULL) { + entry->plansource = plansource; + entry->from_sql = from_sql; + entry->prepare_time = GetCurrentStatementStartTimestamp(); + entry->has_prepare_dn_stmt = false; + } else +#endif + if (found) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_PSTATEMENT), errmsg("prepared statement \"%s\" already exists", stmt_name))); + + /* Now it's safe to move the CachedPlanSource to permanent memory */ + SaveCachedPlan(plansource); +} + +static void FetchPreparedStatementCNGPC(PreparedStatement* entry, const char* stmt_name) +{ + Assert (entry->plansource->magic == CACHEDPLANSOURCE_MAGIC); + bool hasGetLock = false; + /* check if need recreate */ + if (g_instance.plan_cache->CheckRecreateCachePlan(entry->plansource, &hasGetLock)) { + entry->has_prepare_dn_stmt = false; + g_instance.plan_cache->RecreateCachePlan(entry->plansource, entry->stmt_name, entry, NULL, NULL, hasGetLock); + } +#ifdef ENABLE_MULTIPLE_NODES + Assert (entry->plansource->lightProxyObj == NULL); + /* add datanode statment for current sess if is shared plan. + If it's CN light plancache. We will add datanode statment in execute stage. */ + if (entry->plansource->gpc.status.InShareTable() && entry->has_prepare_dn_stmt == false) { + bool is_named_prepare = IS_PGXC_COORDINATOR && !IsConnFromCoord() && + entry->stmt_name && entry->stmt_name[0] != '\0'; + bool is_lp = entry->plansource->single_exec_node != NULL && + entry->plansource->gplan == NULL && entry->plansource->cplan == NULL; + if (is_named_prepare && !is_lp && entry->plansource->gplan) { + int n = 0; + ListCell* lc = NULL; + MemoryContext old_cxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + foreach (lc, entry->plansource->gplan->stmt_list) { + Node* st = NULL; + PlannedStmt* ps = NULL; + st = (Node*)lfirst(lc); + if (IsA(st, PlannedStmt)) { + ps = (PlannedStmt*)st; + n = SetRemoteStatementName(ps->planTree, entry->stmt_name, entry->plansource->num_params, + entry->plansource->param_types, n, false, true); + } + } + CN_GPC_LOG("set datanode statment for shared plan", entry->plansource, stmt_name); + Assert (entry->plansource->gplan->dn_stmt_num == n); + (void)MemoryContextSwitchTo(old_cxt); + } + entry->has_prepare_dn_stmt = true; + } +#endif +} + +/* + * Lookup an existing query in the hash table. If the query does not + * actually exist, throw ereport(ERROR) or return NULL per second parameter. + * + * Note: this does not force the referenced plancache entry to be valid, + * since not all callers care. + */ +PreparedStatement* FetchPreparedStatement(const char* stmt_name, bool throwError, bool need_valid) +{ + if (ENABLE_DN_GPC) { + if (throwError) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_PSTATEMENT), + errmsg("prepared statement \"%s\" does not exist on DN with GPC", stmt_name))); + return NULL; + } + + PreparedStatement *entry = NULL; + + /* + * If the hash table hasn't been initialized, it can't be storing + * anything, therefore it couldn't possibly store our plan. + */ + if (u_sess->pcache_cxt.prepared_queries) { + entry = (PreparedStatement*)hash_search(u_sess->pcache_cxt.prepared_queries, stmt_name, HASH_FIND, NULL); + } else + entry = NULL; + + if (entry == NULL && throwError) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_PSTATEMENT), errmsg("prepared statement \"%s\" does not exist", stmt_name))); + + if (ENABLE_CN_GPC && entry != NULL && need_valid) { + FetchPreparedStatementCNGPC(entry, stmt_name); + } + return entry; +} + +/* + * Before sned a plan with specified name to datanode, Check if it + * is exist on coordinator. + */ +bool HaveActiveCoordinatorPreparedStatement(const char* stmt_name) +{ + bool found = false; + + if (u_sess->pcache_cxt.prepared_queries) { + hash_search(u_sess->pcache_cxt.prepared_queries, stmt_name, HASH_FIND, &found); + } + + return found; +} + +/* + * Given a prepared statement, determine the result tupledesc it will + * produce. Returns NULL if the execution will not return tuples. + * + * Note: the result is created or copied into current memory context. + */ +TupleDesc FetchPreparedStatementResultDesc(PreparedStatement *stmt) +{ + /* + * Since we don't allow prepared statements' result tupdescs to change, + * there's no need to worry about revalidating the cached plan here. + */ + Assert(stmt->plansource->fixed_result); + if (stmt->plansource->resultDesc) + return CreateTupleDescCopy(stmt->plansource->resultDesc); + else + return NULL; +} + +/* + * Given a prepared statement that returns tuples, extract the query + * targetlist. Returns NIL if the statement doesn't have a determinable + * targetlist. + * + * Note: this is pretty ugly, but since it's only used in corner cases like + * Describe Statement on an EXECUTE command, we don't worry too much about + * efficiency. + */ +List* FetchPreparedStatementTargetList(PreparedStatement *stmt) +{ + List* tlist = NIL; + + /* Get the plan's primary targetlist */ + tlist = CachedPlanGetTargetList(stmt->plansource); + + /* Copy into caller's context in case plan gets invalidated */ + return (List*)copyObject(tlist); +} + +/* + * Implements the 'DEALLOCATE' utility statement: deletes the + * specified plan from storage. + */ +void DeallocateQuery(DeallocateStmt* stmt) +{ + if (stmt->name) + DropPreparedStatement(stmt->name, true); + else + DropAllPreparedStatements(); +} + +/* + * Internal version of DEALLOCATE + * + * If showError is false, dropping a nonexistent statement is a no-op. + */ +void DropPreparedStatement(const char* stmt_name, bool showError) +{ + if (ENABLE_DN_GPC) { + /* no prepare statement on dn gpc */ + return ; + } + + PreparedStatement *entry = NULL; + + /* Find the query's hash table entry; raise error if wanted */ + entry = FetchPreparedStatement(stmt_name, showError, false); + ResourceOwner originalOwner = t_thrd.utils_cxt.CurrentResourceOwner; + + + if (NULL == originalOwner) { + /* + * make sure ResourceOwner is not null, since it may acess catalog + * when the pooler tries to create new connections + */ + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "DropPreparedStatement", + THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + } + + + if (entry != NULL) { + /* Release the plancache entry */ + Assert (entry->plansource->magic == CACHEDPLANSOURCE_MAGIC); + if (ENABLE_CN_GPC) + GPCDropLPIfNecessary(entry->stmt_name, true, true, NULL); + if (entry->plansource->gpc.status.InShareTable()) { + CN_GPC_LOG("prepare remove success", 0, entry->plansource->stmt_name); +#ifdef ENABLE_MULTIPLE_NODES + if (entry->plansource->gplan) + GPCCleanDatanodeStatement(entry->plansource->gplan->dn_stmt_num, entry->stmt_name); +#endif + entry->plansource->gpc.status.SubRefCount(); + } else { + CN_GPC_LOG("prepare remove private", entry->plansource, entry->stmt_name); + DropCachedPlan(entry->plansource); + CN_GPC_LOG("prepare remove private succ", 0, entry->stmt_name); + } + CN_GPC_LOG("remove prepare statment", 0, entry->stmt_name); + /* Now we can remove the hash table entry */ + DropFromQueryHashTable(entry->stmt_name); + } + + if (NULL == originalOwner && t_thrd.utils_cxt.CurrentResourceOwner) { + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_LOCKS, false, true); + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_AFTER_LOCKS, false, true); + + ResourceOwner tempOwner = t_thrd.utils_cxt.CurrentResourceOwner; + t_thrd.utils_cxt.CurrentResourceOwner = originalOwner; + ResourceOwnerDelete(tempOwner); + } +} + +/* + * Drop all cached statements. + */ +void DropAllPreparedStatements(void) +{ + HASH_SEQ_STATUS seq; + PreparedStatement *entry = NULL; + ResourceOwner originalOwner = t_thrd.utils_cxt.CurrentResourceOwner; + + if (ENABLE_DN_GPC) { + Assert (u_sess->pcache_cxt.prepared_queries == NULL); + CleanSessGPCPtr(u_sess); + return; + } + + /* nothing cached */ + if (!u_sess->pcache_cxt.prepared_queries) + return; + +#define ReleaseTempResourceOwner() \ + do { \ + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); \ + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_LOCKS, false, true); \ + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_AFTER_LOCKS, false, true); \ + if (NULL == originalOwner && t_thrd.utils_cxt.CurrentResourceOwner) { \ + ResourceOwner tempOwner = t_thrd.utils_cxt.CurrentResourceOwner; \ + t_thrd.utils_cxt.CurrentResourceOwner = originalOwner; \ + ResourceOwnerDelete(tempOwner); \ + } \ + } while (0); + + if (NULL == originalOwner) { + /* + * make sure ResourceOwner is not null, since it may acess catalog + * when the pooler tries to create new connections + */ + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "DropAllPreparedStatements", + THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + } + + bool failflag_dropcachedplan = false; + ErrorData* edata = NULL; + MemoryContext oldcontext = CurrentMemoryContext; + bool isSharedPlan = false; + + /* walk over cache */ + hash_seq_init(&seq, u_sess->pcache_cxt.prepared_queries); + while ((entry = (PreparedStatement*)hash_seq_search(&seq)) != NULL) { + PG_TRY(); + { + /* Release the plancache entry */ + Assert (entry->plansource->magic == CACHEDPLANSOURCE_MAGIC); + isSharedPlan = entry->plansource->gpc.status.InShareTable(); +#ifdef ENABLE_MULTIPLE_NODES + if (ENABLE_CN_GPC) + GPCDropLPIfNecessary(entry->stmt_name, true, true, NULL); + /* for gpc, in case has error, only send drop preparestatement to dn here, sub refcount later */ + if (isSharedPlan && entry->plansource->gplan != NULL) { + GPCCleanDatanodeStatement(entry->plansource->gplan->dn_stmt_num, entry->stmt_name); + } +#endif + if (!isSharedPlan) { + CN_GPC_LOG("prepare remove private", entry->plansource, entry->stmt_name); + DropCachedPlan(entry->plansource); + CN_GPC_LOG("prepare remove private succ", 0, entry->stmt_name); + } + } + PG_CATCH(); + { + failflag_dropcachedplan = true; + + /* Must reset elog.c's state */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + ereport(LOG, + (errmodule(MOD_EXECUTOR), + errcode(ERRCODE_INTERNAL_ERROR), + errmsg("failed to drop cached plan when drop all prepared statements: %s", edata->message))); + FreeErrorData(edata); + } + PG_END_TRY(); + if (isSharedPlan) { + CN_GPC_LOG("prepare remove ", entry->plansource, entry->plansource->stmt_name); + /* sub refcount savely */ + entry->plansource->gpc.status.SubRefCount(); + } + + /* Now we can remove the hash table entry */ + DropFromQueryHashTable(entry->stmt_name); + } + ReleaseTempResourceOwner(); + CN_GPC_LOG("remove prepare statment all", 0, 0); + + if (failflag_dropcachedplan) { + /* destory connections to other node to cleanup all cached statements */ + destroy_handles(); + ereport( + ERROR, (errmodule(MOD_EXECUTOR), errcode(ERRCODE_INTERNAL_ERROR), errmsg("failed to drop cached plan"))); + } +} + +/* + * When pool reloaded on CN, drop prepared statement on dn + * and invalid cached plans. + */ +void HandlePreparedStatementsForReload(void) +{ + HASH_SEQ_STATUS seq; + PreparedStatement *entry = NULL; + ErrorData* edata = NULL; + + /* nothing cached */ + if (!u_sess->pcache_cxt.prepared_queries) + return; + + if (ENABLE_CN_GPC) { + CN_GPC_LOG("Invalid all prepared statements for pool reload", 0, 0); + } + MemoryContext oldcontext = CurrentMemoryContext; + bool has_error = false; + /* walk over cache */ + hash_seq_init(&seq, u_sess->pcache_cxt.prepared_queries); + while ((entry = (PreparedStatement*)hash_seq_search(&seq)) != NULL) { + /* We don't handle these plans which don't include relation */ + if (list_length(entry->plansource->relationOids) == 0) + continue; + PG_TRY(); + { + /* clean CachedPlanSource */ + if (entry->plansource->gpc.status.IsSharePlan()) { + g_instance.plan_cache->RemovePlanSource(entry->plansource, entry->stmt_name); + } else { + DropCachedPlanInternal(entry->plansource); + } + entry->has_prepare_dn_stmt = false; + } + PG_CATCH(); + { + /* Must reset elog.c's state */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + ereport(LOG, + (errmodule(MOD_EXECUTOR), + errcode(ERRCODE_INTERNAL_ERROR), + errmsg("failed to drop internal cached plan when reload prepared statements: %s", edata->message))); + FreeErrorData(edata); + entry->has_prepare_dn_stmt = false; + has_error = true; + } + PG_END_TRY(); + } + + ereport(LOG, + (errmodule(MOD_OPT), errcode(ERRCODE_INTERNAL_ERROR), errmsg("Invalid all prepared statements for reload"))); + + /* invalid all cached plans */ + ResetPlanCache(); + + /* if error occurrs, report error to log jmp and destory handles */ + if (has_error) { + ereport(ERROR, + (errmodule(MOD_EXECUTOR), + errcode(ERRCODE_INTERNAL_ERROR), + errmsg("failed to drop internal cached plan when reload prepared statements"))); + } +} + +/* + * When CN retry, clean datanode_queries and invalid cached plans. + */ +void HandlePreparedStatementsForRetry(void) +{ + /* nothing cached */ + if (u_sess->pcache_cxt.prepared_queries == NULL) + return; + + /* + * If we set plansource to be invalid, its light proxy (if exits) will be cleaned in next + * RevalidateCachedQuery, and its generic plan (if exits) will be cleaned in next CheckCachedPlan, + * and its custom plan will be cleaned automatically when generating a new generic/custom plan + * next time. + * Moreover, because of CN retry, prepared statements on dn will be cleaned by destroy_handles in + * AbortTransaction later. + * + * We only need to set plansource invalid here. + */ + ResetPlanCache(); + + if (ENABLE_CN_GPC) { + /* set plansource to invalid like ungpc */ + CN_GPC_LOG("Invalid all prepared statements for retry", 0, 0); + HASH_SEQ_STATUS seq; + PreparedStatement* entry = NULL; + hash_seq_init(&seq, u_sess->pcache_cxt.prepared_queries); + while ((entry = (PreparedStatement*)hash_seq_search(&seq)) != NULL) { + if (entry->plansource->gpc.status.IsSharePlan()) + g_instance.plan_cache->RemovePlanSource(entry->plansource, entry->stmt_name); + } + } + + ereport(DEBUG2, (errmodule(MOD_OPT), errmsg("Invalid all prepared statements for retry"))); +} + +CachedPlanSource* GetCachedPlanSourceFromExplainExecute(const char* stmt_name) +{ + PreparedStatement *entry = NULL; + CachedPlanSource* psrc = NULL; + if (ENABLE_DN_GPC && IsConnFromCoord()) { + psrc = u_sess->pcache_cxt.cur_stmt_psrc; + if (SECUREC_UNLIKELY(psrc == NULL)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_PSTATEMENT), + errmsg("dn gpc's prepared statement does not exist"))); + } + } else { + /* Look it up in the hash table */ + entry = FetchPreparedStatement(stmt_name, true, true); + psrc = entry->plansource; + } + Assert(psrc != NULL); + + /* Shouldn't find a non-fixed-result cached plan */ + if (!psrc->fixed_result) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("EXPLAIN EXECUTE does not support variable-result cached plans"))); + } + + return psrc; +} + +/* + * Implements the 'EXPLAIN EXECUTE' utility statement. + * + * "into" is NULL unless we are doing EXPLAIN CREATE TABLE AS EXECUTE, + * in which case executing the query should result in creating that table. + * + * Note: the passed-in queryString is that of the EXPLAIN EXECUTE, + * not the original PREPARE; we get the latter string from the plancache. + */ +void ExplainExecuteQuery( + ExecuteStmt* execstmt, IntoClause* into, ExplainState* es, const char* queryString, ParamListInfo params) +{ + const char* query_string = NULL; + CachedPlan* cplan = NULL; + MemoryContext tmpCxt = NULL; + List* plan_list = NIL; + ListCell* p = NULL; + ParamListInfo paramLI = NULL; + EState* estate = NULL; + + CachedPlanSource* psrc = GetCachedPlanSourceFromExplainExecute(execstmt->name); + + query_string = psrc->query_string; + + /* Evaluate parameters, if any */ + if (psrc->num_params) { + /* + * Need an EState to evaluate parameters; must not delete it till end + * of query, in case parameters are pass-by-reference. Note that the + * passed-in "params" could possibly be referenced in the parameter + * expressions. + */ + estate = CreateExecutorState(); + estate->es_param_list_info = params; + paramLI = EvaluateParams(psrc, execstmt->params, queryString, estate); + } + + /* Replan if needed, and acquire a transient refcount */ + if (u_sess->attr.attr_common.max_datanode_for_plan > 0 && IS_PGXC_COORDINATOR && !IsConnFromCoord() && + paramLI != NULL) { + paramLI->params_need_process = true; + } + + u_sess->attr.attr_sql.explain_allow_multinode = true; + + if (!into) { + psrc->cursor_options |= CURSOR_OPT_SPQ_OK; + } + + if (ENABLE_CACHEDPLAN_MGR) { + cplan = GetWiseCachedPlan(psrc, paramLI, true); + } else { + cplan = GetCachedPlan(psrc, paramLI, true); + } + + /* use shared plan here, add refcount */ + if (cplan->isShared()) + (void)pg_atomic_fetch_add_u32((volatile uint32*)&cplan->global_refcount, 1); + + u_sess->attr.attr_sql.explain_allow_multinode = false; + + if (ENABLE_GPC && psrc->gplan) { + plan_list = CopyLocalStmt(cplan->stmt_list, u_sess->temp_mem_cxt, &tmpCxt); + } else { + plan_list = cplan->stmt_list; + } + + es->is_explain_gplan = false; + if (psrc->cplan == NULL) + es->is_explain_gplan = true; + + /* Explain each query */ + foreach (p, plan_list) { + PlannedStmt* pstmt = (PlannedStmt*)lfirst(p); + int instrument_option = pstmt->instrument_option; + + /* get g_RemoteQueryList by reseting sql_statement. */ + if (u_sess->attr.attr_common.max_datanode_for_plan > 0 && IS_PGXC_COORDINATOR && !IsConnFromCoord() && + es->is_explain_gplan && psrc->gplan_is_fqs) { + GetRemoteQuery(pstmt, queryString); + es->isexplain_execute = true; + } + + if (IsA(pstmt, PlannedStmt)) + ExplainOnePlan(pstmt, into, es, query_string, None_Receiver, paramLI); + else + ExplainOneUtility((Node*)pstmt, into, es, query_string, paramLI); + + pstmt->instrument_option = instrument_option; + + /* No need for CommandCounterIncrement, as ExplainOnePlan did it */ + + /* Separate plans with an appropriate separator */ + if (lnext(p) != NULL) + ExplainSeparatePlans(es); + } + + if (estate != NULL) + FreeExecutorState(estate); + + ReleaseCachedPlan(cplan, true); +} + +/* + * This set returning function reads all the prepared statements and + * returns a set of (name, statement, prepare_time, param_types, from_sql). + */ +Datum pg_prepared_statement(PG_FUNCTION_ARGS) +{ + ReturnSetInfo* rsinfo = (ReturnSetInfo*)fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate* tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not " + "allowed in this context"))); + + /* need to build tuplestore in query context */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + /* + * build tupdesc for result tuples. This must match the definition of the + * pg_prepared_statements view in system_views.sql + */ + tupdesc = CreateTemplateTupleDesc(5, false); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "statement", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "prepare_time", TIMESTAMPTZOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "parameter_types", REGTYPEARRAYOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "from_sql", BOOLOID, -1, 0); + + /* + * We put all the tuples into a tuplestore in one scan of the hashtable. + * This avoids any issue of the hashtable possibly changing between calls. + */ + tupstore = + tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, u_sess->attr.attr_memory.work_mem); + + /* generate junk in short-term context */ + MemoryContextSwitchTo(oldcontext); + + /* hash table might be uninitialized */ + if (u_sess->pcache_cxt.prepared_queries) { + HASH_SEQ_STATUS hash_seq; + PreparedStatement *prep_stmt = NULL; + + hash_seq_init(&hash_seq, u_sess->pcache_cxt.prepared_queries); + while ((prep_stmt = (PreparedStatement*)hash_seq_search(&hash_seq)) != NULL) { + Datum values[5]; + bool nulls[5]; + + errno_t rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + values[0] = CStringGetTextDatum(prep_stmt->stmt_name); + char* maskquery = maskPassword(prep_stmt->plansource->query_string); + const char* query = (maskquery == NULL) ? prep_stmt->plansource->query_string : maskquery; + values[1] = CStringGetTextDatum(query); + if (query != maskquery) + pfree_ext(maskquery); + values[2] = TimestampTzGetDatum(prep_stmt->prepare_time); + values[3] = build_regtype_array(prep_stmt->plansource->param_types, prep_stmt->plansource->num_params); + values[4] = BoolGetDatum(prep_stmt->from_sql); + + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + } + + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + return (Datum)0; +} + +Datum pg_prepared_statement_global(PG_FUNCTION_ARGS) +{ + if (!superuser() && !isMonitoradmin(GetUserId())) { + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_PROC, "pg_prepared_statements"); + } + + uint64 sessionid = (uint64)PG_GETARG_INT64(0); + ReturnSetInfo *rsinfo = (ReturnSetInfo*)fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate* tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not " + "allowed in this context"))); + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + /* + * build tupdesc for result tuples. This must match the definition of the + * pg_prepared_statements view in system_views.sql + */ + tupdesc = CreateTemplateTupleDesc(7, false); + + TupleDescInitEntry(tupdesc, (AttrNumber)1, "sessionid", INT8OID, -1, 0 ); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "username", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "statement", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "prepare_time", TIMESTAMPTZOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "parameter_types", REGTYPEARRAYOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "from_sql", BOOLOID, -1, 0); + + /* + * We put all the tuples into a tuplestore in one scan of the hashtable. + * This avoids any issue of the hashtable possibly changing between calls. + */ + tupstore = + tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, u_sess->attr.attr_memory.work_mem); + + /* generate junk in short-term context */ + MemoryContextSwitchTo(oldcontext); + + /* total number of tuples to be returned */ + if (ENABLE_THREAD_POOL) { + g_threadPoolControler->GetSessionCtrl()->GetSessionPreparedStatements(tupstore, tupdesc, sessionid); + } else { + GetThreadPreparedStatements(tupstore, tupdesc, sessionid); + } + + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + return (Datum)0; +} + +void GetPreparedStatements(HTAB* htbl, Tuplestorestate* tupStore, TupleDesc tupDesc, uint64 sessionId, char* userName) +{ + HASH_SEQ_STATUS hash_seq; + PreparedStatement *prep_stmt = NULL; + hash_seq_init(&hash_seq, htbl); + while ((prep_stmt = (PreparedStatement*)hash_seq_search(&hash_seq)) != NULL) { + Datum values[7]; + bool nulls[7]; + + errno_t rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + values[0] = UInt64GetDatum(sessionId); + values[1] = CStringGetTextDatum(userName); + values[2] = CStringGetTextDatum(prep_stmt->stmt_name); + char* maskquery = maskPassword(prep_stmt->plansource->query_string); + const char* query = (maskquery == NULL) ? prep_stmt->plansource->query_string : maskquery; + values[3] = CStringGetTextDatum(query); + if (query != maskquery) + pfree_ext(maskquery); + values[4] = TimestampTzGetDatum(prep_stmt->prepare_time); + values[5] = build_regtype_array(prep_stmt->plansource->param_types, prep_stmt->plansource->num_params); + values[6] = BoolGetDatum(prep_stmt->from_sql); + + tuplestore_putvalues(tupStore, tupDesc, values, nulls); + } +} + +void GetThreadPreparedStatements(Tuplestorestate* tupStore, TupleDesc tupDesc, uint64 sessionId) +{ + Assert(!ENABLE_THREAD_POOL); + PgBackendStatus *beentry = t_thrd.shemem_ptr_cxt.BackendStatusArray; + char* userName = NULL; + + PG_TRY(); + { + for(int i = 0; i < BackendStatusArray_size; i++){ + HTAB* htbl = beentry->my_prepared_queries; + + if (beentry->my_pstmt_htbl_lock != NULL) + if ((beentry->st_procpid > 0 || beentry -> st_sessionid > 0) && + (beentry->st_sessionid == sessionId || sessionId == 0)) { + Oid userid = beentry->st_userid; + userName = GetUserNameFromId(userid); + if (htbl) { + (void)syscalllockAcquire(beentry->my_pstmt_htbl_lock); + GetPreparedStatements(htbl, tupStore, tupDesc, beentry->st_sessionid, userName); + (void)syscalllockRelease(beentry->my_pstmt_htbl_lock); + } + } + + pfree_ext(userName); + + beentry++; + } + } + PG_CATCH(); + { + (void)syscalllockRelease(beentry->my_pstmt_htbl_lock); + pfree_ext(userName); + PG_RE_THROW(); + } + PG_END_TRY(); +} + +/* + * This utility function takes a C array of Oids, and returns a Datum + * pointing to a one-dimensional Postgres array of regtypes. An empty + * array is returned as a zero-element array, not NULL. + */ +static Datum build_regtype_array(const Oid* param_types, int num_params) +{ + Datum* tmp_ary = NULL; + ArrayType* result = NULL; + int i; + + tmp_ary = (Datum*)palloc(num_params * sizeof(Datum)); + + for (i = 0; i < num_params; i++) + tmp_ary[i] = ObjectIdGetDatum(param_types[i]); + + /* XXX: this hardcodes assumptions about the regtype type */ + result = construct_array(tmp_ary, num_params, REGTYPEOID, 4, true, 'i'); + return PointerGetDatum(result); +} + +#ifdef PGXC +DatanodeStatement* FetchDatanodeStatement(const char* stmt_name, bool throwError) +{ + DatanodeStatement* entry = NULL; + + /* + * If the hash table hasn't been initialized, it can't be storing + * anything, therefore it couldn't possibly store our plan. + */ + if (u_sess->pcache_cxt.datanode_queries) + entry = (DatanodeStatement*)hash_search(u_sess->pcache_cxt.datanode_queries, stmt_name, HASH_FIND, NULL); + else + entry = NULL; + + /* Report error if entry is not found */ + if (entry == NULL && throwError) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_PSTATEMENT), errmsg("datanode statement \"%s\" does not exist", stmt_name))); + + return entry; +} + +/* + * Drop Datanode statement and close it on nodes if active + */ +void DropDatanodeStatement(const char* stmt_name) +{ + DatanodeStatement* entry = NULL; + + entry = FetchDatanodeStatement(stmt_name, false); + if (entry != NULL) { + int i; + List* nodelist = NIL; + + /* make a List of integers from node numbers */ + for (i = 0; i < entry->current_nodes_number; i++) { + nodelist = lappend_int(nodelist, entry->dns_node_indices[i]); + } + + CN_GPC_LOG("drop datanode statment", NULL, entry->stmt_name); + + entry->current_nodes_number = 0; + entry->max_nodes_number = 0; + pfree_ext(entry->dns_node_indices); + + /* Okay to remove it */ + (void*)hash_search(u_sess->pcache_cxt.datanode_queries, entry->stmt_name, HASH_REMOVE, NULL); + if (!ENABLE_CN_GPC) + ExecCloseRemoteStatement(stmt_name, nodelist); + list_free_ext(nodelist); + } +} + +/* + * Mark all datanode statements as deactive. + */ +void DeActiveAllDataNodeStatements(void) +{ + int tmp_num = 0; + errno_t errorno = EOK; + + /* nothing cached */ + if (!u_sess->pcache_cxt.datanode_queries) + return; + + HASH_SEQ_STATUS seq; + DatanodeStatement* entry = NULL; + + /* walk over cache */ + hash_seq_init(&seq, u_sess->pcache_cxt.datanode_queries); + while ((entry = (DatanodeStatement*)hash_seq_search(&seq)) != NULL) { + tmp_num = entry->current_nodes_number; + entry->current_nodes_number = 0; + if (tmp_num > 0) { + Assert(tmp_num <= Max(u_sess->pgxc_cxt.NumTotalDataNodes, u_sess->pgxc_cxt.NumDataNodes)); + errorno = memset_s(entry->dns_node_indices, tmp_num * sizeof(int), 0, tmp_num * sizeof(int)); + securec_check_c(errorno, "\0", "\0"); + } + } +} + +/* + * Return true if there is at least one active Datanode statement, so acquired + * Datanode connections should not be released + */ +bool HaveActiveDatanodeStatements(void) +{ + HASH_SEQ_STATUS seq; + DatanodeStatement* entry = NULL; + + /* nothing cached */ + if (!u_sess->pcache_cxt.datanode_queries) + return false; + + /* walk over cache */ + hash_seq_init(&seq, u_sess->pcache_cxt.datanode_queries); + while ((entry = (DatanodeStatement*)hash_seq_search(&seq)) != NULL) { + /* Stop walking and return true */ + if (entry->current_nodes_number > 0) { + hash_seq_term(&seq); + return true; + } + } + /* nothing found */ + return false; +} + +/* + * Mark Datanode statement as active on specified node + * Return true if statement has already been active on the node and can be used + * Returns false if statement has not been active on the node and should be + * prepared on the node + */ +bool ActivateDatanodeStatementOnNode(const char* stmt_name, int nodeIdx) +{ + DatanodeStatement* entry = NULL; + int i; + + /* find the statement in cache */ + entry = FetchDatanodeStatement(stmt_name, true); + + /* see if statement already active on the node */ + for (i = 0; i < entry->current_nodes_number; i++) { + if (entry->dns_node_indices[i] == nodeIdx) { + return true; + } + } + + /* After cluster expansion, must expand entry->dns_node_indices array too */ + if (entry->current_nodes_number == entry->max_nodes_number) { + int* new_dns_node_indices = (int*)MemoryContextAllocZero( + u_sess->pcache_cxt.datanode_queries->hcxt, entry->max_nodes_number * CLUSTER_EXPANSION_BASE * sizeof(int)); + errno_t errorno = EOK; + errorno = memcpy_s(new_dns_node_indices, + entry->max_nodes_number * CLUSTER_EXPANSION_BASE * sizeof(int), + entry->dns_node_indices, + entry->max_nodes_number * sizeof(int)); + securec_check(errorno, "\0", "\0"); + pfree_ext(entry->dns_node_indices); + entry->dns_node_indices = new_dns_node_indices; + entry->max_nodes_number = entry->max_nodes_number * CLUSTER_EXPANSION_BASE; + elog(LOG, + "expand node ids array for active datanode statements " + "after cluster expansion, now array size is %d", + entry->max_nodes_number); + } + + /* statement is not active on the specified node append item to the list */ + entry->dns_node_indices[entry->current_nodes_number++] = nodeIdx; + return false; +} + +char* get_datanode_statement_name(const char* stmt_name, int n) +{ + char name[NAMEDATALEN]; + errno_t rc = strncpy_s(name, NAMEDATALEN, stmt_name, NAMEDATALEN - 1); + securec_check(rc, "\0", "\0"); + if (n) { + name[NAMEDATALEN - 1] = '\0'; + char modifier[NAMEDATALEN]; + int ss_rc = -1; + ss_rc = sprintf_s(modifier, NAMEDATALEN, "__%d", n); + securec_check_ss(ss_rc, "\0", "\0"); + name[NAMEDATALEN - strlen(modifier) - 1] = '\0'; + ss_rc = -1; + ss_rc = strcat_s(name, NAMEDATALEN, modifier); + securec_check(ss_rc, "\0", "\0"); + } + return pstrdup(name); +} + +#endif + +/* + * Function name: needRecompileQuery + * Check if perpared query need to be reprepared. + * input Parameter: + * stmt: the stmt need to be checked if it need to be reprepared. + * output result: + * True : need to do rePrepare proc before executing execute stmt. + * False: could execute stmt directly. + */ +bool needRecompileQuery(ExecuteStmt* stmt) +{ + bool ret_val = false; + PreparedStatement *entry = NULL; + CachedPlanSource* plansource = NULL; + + /* Look it up in the hash table */ + entry = FetchPreparedStatement(stmt->name, true, false); + + /* Find if there is query that has been enabled auto truncation.*/ + plansource = entry->plansource; + + ret_val = checkRecompileCondition(plansource); + + return ret_val; +} + +/* + * Function name: RePrepareQuery + * do re-PrepareQuery for stmt Prepare. + * input Parameter: + * stmt: the stmt need to be re-prepared. + * output result: + * void + */ +void RePrepareQuery(ExecuteStmt* stmt) +{ + PreparedStatement *entry = NULL; + char* query_string = NULL; + uint32 query_length; + errno_t err; + List* parseTree_list = NIL; + List* queryTree_list = NIL; + ListCell* parsetree_item = NULL; + ListCell* stmtlist_item = NULL; + + /* Look it up in the hash table */ + entry = FetchPreparedStatement(stmt->name, true, false); + + /* copy the original query text.*/ + query_length = strlen(entry->plansource->query_string); + query_string = (char*)palloc(query_length + 1); + err = strcpy_s(query_string, query_length + 1, entry->plansource->query_string); + + securec_check(err, "\0", "\0"); + /* Need drop old prepared statement and then generated new one with same name. */ + DropPreparedStatement(stmt->name, true); + + /* + * Do re prepare task. here we will do a simplified flow to get prepared + * stmt from query_string. since we are in execute stmt's context, we do + * not need do all the parts of exec_simple_query. + */ + parseTree_list = pg_parse_query(query_string); + + Assert(parseTree_list != NULL && parseTree_list->length > 0); + + /* + * Run through the raw parsetree(s) and process each one. + */ + foreach (parsetree_item, parseTree_list) { + Node* parsetree = (Node*)lfirst(parsetree_item); + t_thrd.postgres_cxt.cur_command_tag = transform_node_tag(parsetree); + List* planTree_list = NIL; + + queryTree_list = pg_analyze_and_rewrite(parsetree, query_string, NULL, 0); + + Assert(queryTree_list != NULL && queryTree_list->length > 0); + + planTree_list = pg_plan_queries(queryTree_list, 0, NULL); + + Assert(planTree_list != NULL && planTree_list->length > 0); + + foreach (stmtlist_item, planTree_list) { + Node* stmt_node = (Node*)lfirst(stmtlist_item); + PrepareQuery((PrepareStmt*)stmt_node, query_string); + } + } +} + +/* + * Function name: checkRecompileCondition + * determin if the stmt need to be recompiled. + * input Parameter: + * plansource: the stmt need to be checked if it need to be reprepared. + * output result: + * There are four scenario: + * td_compatible_truncation | Query->tdTruncCastStatus | return + * True TRUNC_CAST_QUERY False, means the insert stmt has set auto truncation + * according, here do not need recompile. + * True NOT_CAST_BECAUSEOF_GUC True, we should recompile to make sure the char and + * varchar truncation enabled. + * False TRUNC_CAST_QUERY True, we should recompile to make sure turn off auto + * truncation function for char and varchar type data. + * False NOT_CAST_BECAUSEOF_GUC False, means we did not use auto truncation function + * before, no need to re-compile. + * True/False UNINVOLVED_QUERY False, uninvolved query always false. + * Don't need re-generate plan. + */ +bool checkRecompileCondition(CachedPlanSource* plansource) +{ + ListCell* l = NULL; + foreach (l, plansource->query_list) { + Query* q = (Query*)lfirst(l); + Assert(IsA(q, Query)); + /* If some rte is referenced by synonym object, must recompile. */ + if (q->hasSynonyms) { + return true; + } + + if (q->tdTruncCastStatus == UNINVOLVED_QUERY) { + return false; + } + + if (u_sess->attr.attr_sql.td_compatible_truncation) { + if (q->tdTruncCastStatus == NOT_CAST_BECAUSEOF_GUC) { + return true; + } + } else { + if (q->tdTruncCastStatus == TRUNC_CAST_QUERY) { + return true; + } + } + } + return false; +} + +typedef struct { + int* nargs; + Oid** args; + List** constargs; + bool* ret; +} substitute_const_with_parameters_context; + +static Node* substitute_const_with_parameters_mutator(Node* node, substitute_const_with_parameters_context* context) +{ + if (node == NULL) + return NULL; + if (*context->ret) { + return NULL; + } + if (IsA(node, OpExpr) && list_length(((OpExpr*)node)->args) == 2) { + OpExpr* op_expr = (OpExpr*)node; + Node* arg1 = (Node*)linitial(op_expr->args); + Node* arg2 = (Node*)lsecond(op_expr->args); + + /* We only support parameter is const and operator is less than or less equal. */ + if (IsA(arg1, Const) && IsA(arg2, Const)) { + *context->ret = true; + return node; + } + } + if (IsA(node, FuncExpr)) { + FuncExpr* func_expr = (FuncExpr*)node; + if (func_expr->funcid >= DB4AI_PREDICT_BY_BOOL_OID && func_expr->funcid <= DB4AI_EXPLAIN_MODEL_OID) { + *context->ret = true; + return NULL; + } + } + if (IsA(node, UserVar)) { + *context->ret = true; + return NULL; + } + if (IsA(node, Const)) { + Const* con = (Const*)node; + Param* param = makeNode(Param); + param->paramkind = PARAM_EXTERN; + param->paramid = *context->nargs + 1; + param->paramtype = con->consttype; + param->paramtypmod = con->consttypmod; + param->paramcollid = con->constcollid; + param->location = con->location; + param->is_bind_param = true; + if (*context->args) { + *context->args = (Oid*)repalloc(*context->args, param->paramid * sizeof(Oid)); + } else { + *context->args = (Oid*)palloc(param->paramid * sizeof(Oid)); + } + errno_t rc = memset_s(*context->args + *context->nargs, sizeof(Oid), 0, sizeof(Oid)); + securec_check(rc, "\0", "\0"); + (*context->args)[param->paramid - 1] = param->paramtype; + *context->constargs = lappend(*context->constargs, con); + (*context->nargs)++; + return (Node*)param; + } + return expression_tree_mutator( + node, (Node* (*)(Node*, void*)) substitute_const_with_parameters_mutator, (void*)context); +} + +static Query* substitute_const_with_parameters(Query* expr, int* nargs, Oid** param_types, List** paramListInfo, bool* ret) +{ + substitute_const_with_parameters_context context; + + context.nargs = nargs; + *context.nargs = 0; + context.args = param_types; + context.constargs = paramListInfo; + context.ret = ret; + return query_tree_mutator(expr, (Node* (*)(Node*, void*)) substitute_const_with_parameters_mutator, &context, 0); +} + +static ParamListInfo PrepareParamsFromConsts(CachedPlanSource* psrc, List* params, const char* queryString) +{ + Oid* param_types = psrc->param_types; + int num_params = psrc->num_params; + int nparams = list_length(params); + ParamListInfo paramLI; + ListCell* l = NULL; + int i = 0; + + if (nparams != num_params) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("wrong number of parameters for prepared statement \"%s\"", psrc->stmt_name), + errdetail("Expected %d parameters but got %d.", num_params, nparams))); + + /* Quick exit if no parameters */ + if (num_params == 0) + return NULL; + + /* + * We have to run parse analysis for the expressions. Since the parser is + * not cool about scribbling on its input, copy first. + */ + params = (List*)copyObject(params); + + /* Prepare the expressions for execution */ + + paramLI = (ParamListInfo)palloc(offsetof(ParamListInfoData, params) + num_params * sizeof(ParamExternData)); + /* we have static list of params, so no hooks needed */ + paramLI->paramFetch = NULL; + paramLI->paramFetchArg = NULL; + paramLI->parserSetup = NULL; + paramLI->parserSetupArg = NULL; + paramLI->params_need_process = false; + paramLI->numParams = num_params; + paramLI->uParamInfo = DEFUALT_INFO; + paramLI->params_lazy_bind = false; + + foreach (l, params) { + Const* e = (Const*)lfirst(l); + ParamExternData* prm = ¶mLI->params[i]; + + prm->ptype = param_types[i]; + prm->pflags = PARAM_FLAG_CONST; + prm->value = e->constvalue; + prm->isnull = e->constisnull; + prm->tabInfo = NULL; + i++; + } + return paramLI; +} + +bool quickPlanner(List* querytree_list, Node* parsetree, const char*queryString, CommandDest dest, char* completionTag) +{ + if (!u_sess->attr.attr_common.enable_iud_fusion) { + return false; + } + if (querytree_list == NULL || querytree_list->length != 1) { + return false; + } + Query* query = (Query*)linitial(querytree_list); + if (query->hasSubLinks || (query->rtable == NULL || query->rtable->length != 1) || query->groupClause != NULL) { + return false; + } + if (query->commandType != CMD_UPDATE && query->commandType != CMD_DELETE) { + return false; + } + RangeTblEntry* rte = (RangeTblEntry*)linitial(query->rtable); + if (rte == NULL || rte->ispartrel) { + return false; + } + constexpr uint32 plancache_namesize = 64; + if (strlen(queryString) >= plancache_namesize) { + return false; + } + int nargs; + Oid* param_types = NULL; + List* paramListInfo = NULL; + CachedPlan* cplan = NULL; + List* plan_list = NIL; + ParamListInfo paramLI; + EState* estate = NULL; + Portal portal; + int eflags; + long count; + bool ret = false; + query = substitute_const_with_parameters(query, &nargs, ¶m_types, ¶mListInfo, &ret); + if (ret) { + return false; + } + if (paramListInfo == NULL || paramListInfo->length == 0) { + return false; + } + StringInfo select_sql = makeStringInfo(); + deparse_query((Query*)query, select_sql, NIL, false, false); + if (select_sql->len >= (int)plancache_namesize) { + return false; + } + PreparedStatement *entry = NULL; + entry = FetchPreparedStatement(select_sql->data, false, false); + CachedPlanSource* psrc = NULL; + DestReceiver* receiver = CreateDestReceiver(dest); + /* Create a new portal to run the query in */ + portal = CreateNewPortal(); + /* Don't display the portal in pg_cursors, it is for internal use only */ + portal->visible = false; + if (dest == DestRemote) { + SetRemoteDestReceiverParams(receiver, portal); + } + MemoryContext oldcxt = MemoryContextSwitchTo(PortalGetHeapMemory(portal)); + if (entry == NULL) { + // MemoryContext oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + psrc = CreateCachedPlan((Node*)parsetree, + select_sql->data, +#ifdef PGXC + select_sql->data, +#endif + CreateCommandTag((Node*)parsetree)); + MemoryContextSwitchTo(oldcxt); + List* new_querytree_list = NULL; + new_querytree_list = list_make1(query); + CompleteCachedPlan(psrc, new_querytree_list, NULL, param_types, NULL, nargs, NULL, NULL, 0, true, select_sql->data); + StorePreparedStatement(select_sql->data, psrc, true); + entry = FetchPreparedStatement(select_sql->data, false, false); + if (entry == NULL) { + MemoryContextSwitchTo(oldcxt); + return false; + } + } + psrc = entry->plansource; + if (!psrc->is_valid) { + DropPreparedStatement(entry->stmt_name, true); + return false; + } + if (nargs != entry->plansource->num_params) { + DropPreparedStatement(entry->stmt_name, true); + return false; + } + for (int i = 0; i < nargs; i++) { + if (entry->plansource->param_types[i] != param_types[i]) { + DropPreparedStatement(entry->stmt_name, true); + return false; + } + } + if (entry->plansource->num_params > 0) { + paramLI = PrepareParamsFromConsts(psrc, paramListInfo, queryString); + } + + OpFusion::clearForCplan((OpFusion*)psrc->opFusionObj, psrc); + + PG_TRY(); + { + if (psrc->opFusionObj != NULL) { + Assert(psrc->cplan == NULL); + (void)RevalidateCachedQuery(psrc); + } + } + PG_CATCH(); + { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Invalid Param in QuickPlanner"))); + DropPreparedStatement(entry->stmt_name, true); + return false; + } + PG_END_TRY(); + if (psrc->opFusionObj != NULL) { + OpFusion *opFusionObj = (OpFusion *)(psrc->opFusionObj); + if (opFusionObj->IsGlobal()) { + opFusionObj = (OpFusion *)OpFusion::FusionFactory(opFusionObj->m_global->m_type, + u_sess->cache_mem_cxt, psrc, NULL, paramLI); + Assert(opFusionObj != NULL); + } + opFusionObj->setPreparedDestReceiver(receiver); + opFusionObj->useOuterParameter(paramLI); + opFusionObj->setCurrentOpFusionObj(opFusionObj); + + CachedPlanSource* cps = opFusionObj->m_global->m_psrc; + bool needBucketId = cps != NULL && cps->gplan; + if (needBucketId) { + setCachedPlanBucketId(cps->gplan, paramLI); + } + + if (OpFusion::process(FUSION_EXECUTE, NULL, completionTag, false, NULL)) { + MemoryContextSwitchTo(oldcxt); + return true; + } + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Bypass process Failed"))); + } + PG_TRY(); + { + /* Copy the plan's saved query string into the portal's memory */ + char* query_string = MemoryContextStrdup(PortalGetHeapMemory(portal), entry->plansource->query_string); + + /* Replan if needed, and increment plan refcount for portal */ + if (ENABLE_CACHEDPLAN_MGR) { + cplan = GetWiseCachedPlan(psrc, paramLI, false); + } else { + cplan = GetCachedPlan(psrc, paramLI, false); + } + + plan_list = cplan->stmt_list; + + /* + * Now we can define the portal. + * + * DO NOT put any code that could possibly throw an error between the + * above GetCachedPlan call and here. + */ + PortalDefineQuery(portal, NULL, query_string, entry->plansource->commandTag, plan_list, cplan); + portal->nextval_default_expr_type = psrc->nextval_default_expr_type; + + /* incase change shared plan in execute stage */ + CopyPlanForGPCIfNecessary(entry->plansource, portal); + } + PG_CATCH(); + { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Invalid Param in QuickPlanner2"))); + DropPreparedStatement(entry->stmt_name, true); + return false; + } + PG_END_TRY(); + /* Plain old EXECUTE */ + eflags = 0; + count = FETCH_ALL; + if (OpFusion::IsSqlBypass(psrc, plan_list)) { + psrc->opFusionObj = + OpFusion::FusionFactory(OpFusion::getFusionType(cplan, paramLI, NULL), + u_sess->cache_mem_cxt, psrc, NULL, paramLI); + psrc->is_checked_opfusion = true; + if (psrc->opFusionObj != NULL) { + ((OpFusion*)psrc->opFusionObj)->setPreparedDestReceiver(receiver); + ((OpFusion*)psrc->opFusionObj)->useOuterParameter(paramLI); + ((OpFusion*)psrc->opFusionObj)->setCurrentOpFusionObj((OpFusion*)psrc->opFusionObj); + + if (OpFusion::process(FUSION_EXECUTE, NULL, completionTag, false, NULL)) { + MemoryContextSwitchTo(oldcxt); + return true; + } + Assert(0); + } + } + MemoryContextSwitchTo(oldcxt); + /* + * Run the portal as appropriate. + */ + PortalStart(portal, paramLI, eflags, GetActiveSnapshot()); + + (void)PortalRun(portal, count, false, receiver, receiver, completionTag); + + PortalDrop(portal, false); + + if (estate != NULL) + FreeExecutorState(estate); + return true; +} diff --git a/contrib/dolphin/sql/test_mysql_prepare.sql b/contrib/dolphin/sql/test_mysql_prepare.sql index 4535cf56f..0a753af9b 100644 --- a/contrib/dolphin/sql/test_mysql_prepare.sql +++ b/contrib/dolphin/sql/test_mysql_prepare.sql @@ -236,6 +236,15 @@ deallocate s0; deallocate s1; deallocate s2; +--prepare replace +prepare s0 as select * from t1_xc_fqs t1 left join t2_xc_fqs t2 on t1.id1=t2.id1 and t2.id1=? order by t1.id1; +execute s0 using 1; +prepare s0 as 'select * from t1_xc_fqs t1 right join t2_xc_fqs t2 on t1.id1=t2.id1 and t2.id1=? order by t2.id1'; +execute s0 using @a; +prepare s0 as select * from t1_xc_fqs t1 full join t2_xc_fqs t2 on t1.id1=t2.id1 and t2.id1=? order by t1.id1, t2.id1; +execute s0 using 1; +deallocate s0; + reset dolphin.b_compatibility_mode; reset enable_set_variable_b_format; drop schema test_mysql_prepare cascade; -- Gitee From 37ca73c6d1064121cc77501955d75316d8225b9b Mon Sep 17 00:00:00 2001 From: huangjiajun <1148612505@qq.com> Date: Fri, 19 Jan 2024 19:30:59 +0800 Subject: [PATCH 209/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BD=BF=E7=94=A8or?= =?UTF-8?q?=E8=BF=90=E7=AE=97=E7=AC=A6=E6=97=B6=E5=8F=91=E7=94=9F=E6=8A=A5?= =?UTF-8?q?=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/convert.out | 1 + .../json_operator_test.out | 516 +- .../multi_type_or_test.out | 4985 +++++++++++++++++ contrib/dolphin/parallel_schedule_dolphin | 2 +- .../dolphin/plugin_parser/parse_coerce.cpp | 20 +- .../rollback_script/dolphin--3.0--2.0.sql | 13 +- contrib/dolphin/sql/builtin_funcs/convert.sql | 1 + .../json_operator_test.sql | 128 +- .../multi_type_or_test.sql | 2379 ++++++++ .../upgrade_script/dolphin--2.0--3.0.sql | 20 + 10 files changed, 7669 insertions(+), 396 deletions(-) create mode 100644 contrib/dolphin/expected/operator_compatibility_test/multi_type_or_test.out create mode 100644 contrib/dolphin/sql/operator_compatibility_test/multi_type_or_test.sql diff --git a/contrib/dolphin/expected/builtin_funcs/convert.out b/contrib/dolphin/expected/builtin_funcs/convert.out index 356b837f4..3d2e06f35 100644 --- a/contrib/dolphin/expected/builtin_funcs/convert.out +++ b/contrib/dolphin/expected/builtin_funcs/convert.out @@ -157,6 +157,7 @@ select convert(1 using decimal(10,3)); 1.000 (1 row) +set dolphin.b_compatibility_mode = off; select pg_typeof(convert('1', char)); pg_typeof ----------- diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index 3c8c26855..2fafef725 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -478,13 +478,13 @@ CREATE TABLE test_json_type AS SELECT -- `binary` << `json` AS `binary<>json | 1 enum_t<>json | 5 set_t<>enum_t | 0 json<>set_t | 0 json< Date: Thu, 18 Jan 2024 18:46:03 +0800 Subject: [PATCH 210/434] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dcreate/alter=20table?= =?UTF-8?q?=E9=83=A8=E5=88=86=E9=80=89=E9=A1=B9=E4=B8=8D=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E7=A9=BA=E6=A0=BC=E5=88=86=E5=89=B2=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/table_option_separator.out | 19 + contrib/dolphin/parallel_schedule_dolphin | 2 +- contrib/dolphin/plugin_parser/gram.y | 596 +++++++++++++----- contrib/dolphin/plugin_parser/parser.cpp | 22 + .../dolphin/sql/table_option_separator.sql | 11 + 5 files changed, 503 insertions(+), 147 deletions(-) create mode 100644 contrib/dolphin/expected/table_option_separator.out create mode 100644 contrib/dolphin/sql/table_option_separator.sql diff --git a/contrib/dolphin/expected/table_option_separator.out b/contrib/dolphin/expected/table_option_separator.out new file mode 100644 index 000000000..7aed0bb5e --- /dev/null +++ b/contrib/dolphin/expected/table_option_separator.out @@ -0,0 +1,19 @@ +create schema table_option_separator; +set current_schema = table_option_separator; +create table t1(a int) engine=innodb encryption='n'; +WARNING: ENCRYPTION for TABLE is not supported for current version. skipped +alter table t1 min_rows=1 max_rows=2; +WARNING: MIN_ROWS for TABLE is not supported for current version. skipped +WARNING: MAX_ROWS for TABLE is not supported for current version. skipped +create table t2(a int) engine=innodb,encryption='n'; +WARNING: ENCRYPTION for TABLE is not supported for current version. skipped +alter table t2 min_rows=1,max_rows=2; +WARNING: MIN_ROWS for TABLE is not supported for current version. skipped +WARNING: MAX_ROWS for TABLE is not supported for current version. skipped +alter table t1 min_rows=1,max_rows=2; +WARNING: MIN_ROWS for TABLE is not supported for current version. skipped +WARNING: MAX_ROWS for TABLE is not supported for current version. skipped +drop table t1; +drop table t2; +reset current_schema; +drop schema table_option_separator; diff --git a/contrib/dolphin/parallel_schedule_dolphin b/contrib/dolphin/parallel_schedule_dolphin index 0ed92900e..534c6449f 100644 --- a/contrib/dolphin/parallel_schedule_dolphin +++ b/contrib/dolphin/parallel_schedule_dolphin @@ -30,7 +30,7 @@ test: test_shows_4 test_shows_5 test: nvarchar regexp upsert zerofill test_set_charset test_optimize test_charset_collate charset_utf8mb4_b_db charset_gbk_b_db -test: test_binary test_blob test_datatype test_fixed test_mysql_enum bxconst_test +test: test_binary test_blob test_datatype test_fixed test_mysql_enum bxconst_test table_option_separator test: test_enum_collation diff --git a/contrib/dolphin/plugin_parser/gram.y b/contrib/dolphin/plugin_parser/gram.y index e5485985c..a6eea6770 100644 --- a/contrib/dolphin/plugin_parser/gram.y +++ b/contrib/dolphin/plugin_parser/gram.y @@ -83,7 +83,7 @@ #include "plugin_parser/parse_utilcmd.h" #include "pgxc/pgxc.h" #include "nodes/nodes.h" -#include "pgxc/poolmgr.h" +#include "pgxc/poolmgr.h"/ #include "plugin_parser/parser.h" #include "storage/lmgr.h" #include "storage/tcap.h" @@ -432,6 +432,7 @@ static CharsetCollateOptions* MakeCharsetCollateOptions(CharsetCollateOptions *o static Node *checkNullNode(Node *n); static CreateTableOptions* MakeCreateTableOptions(CreateTableOptions *tableOptions, SingleTableOption *tableOption); +static CreateTableOptions* MergeCreateTableOptions(CreateTableOptions *frontTableOptions, CreateTableOptions *rearTableOptions); static CreateIndexOptions* MakeCreateIndexOptions(CreateIndexOptions *indexOptions, SingleIndexOption *indexOption); static SingleTableOption* CreateSingleTableOption(TableOptionType tableOptionType); #define TYPE_LEN 4 /* strlen("TYPE") */ @@ -605,8 +606,11 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul struct CondInfo* condinfo; struct TypeAttr* typeattr; } -%type CreateOption CreateIfNotExistsOption CreateAsOption +%type CreateOption CreateIfNotExistsOption CreateAsOption CreateTableOption %type CreateOptionList CreateIfNotExistsOptionList CreateAsOptionList + CreateTableOptionList CreateOptionEtcList CreateOptionBeforeList CreateOptionBefore CreateOptionAfterList CreateOptionAfter + CreateIfNotExistsOptionEtcList CreateIfNotExistsOptionBeforeList CreateIfNotExistsOptionBefore CreateIfNotExistsOptionAfterList CreateIfNotExistsOptionAfter + CreateAsOptionEtcList CreateAsOptionBeforeList CreateAsOptionBefore CreateAsOptionAfterList CreateAsOptionAfter %type TableIndexOption PartitionTableIndexOption %type TableIndexOptionList PartitionTableIndexOptionList %type index_method_relation_clause fulltext_index_method_relation_clause @@ -685,10 +689,10 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul %type OptNoLog %type unique_name -%type alter_table_cmd alter_partition_cmd alter_type_cmd opt_collate_clause exchange_partition_cmd move_partition_cmd +%type alter_table_cmd alter_table_option alter_partition_cmd alter_type_cmd opt_collate_clause exchange_partition_cmd move_partition_cmd modify_column_cmd reset_partition_cmd replica_identity add_column_first_after event_from_clause -%type alter_table_cmds alter_partition_cmds alter_table_or_partition alter_index_or_partition alter_type_cmds add_column_cmds modify_column_cmds alter_index_rebuild_partition +%type alter_table_cmds alter_table_option_list alter_partition_cmds alter_table_or_partition alter_index_or_partition alter_type_cmds add_column_cmds modify_column_cmds alter_index_rebuild_partition %type AlterPartitionRebuildStmt AlterPartitionRemoveStmt AlterPartitionCheckStmt AlterPartitionRepairStmt AlterPartitionOptimizeStmt @@ -1306,6 +1310,7 @@ static inline SortByNulls GetNullOrderRule(SortByDir sortBy, SortByNulls nullRul FORCE_INDEX USE_INDEX IGNORE_INDEX LOCK_TABLES LABEL_LOOP LABEL_REPEAT LABEL_WHILE WITH_PARSER + STORAGE_DISK STORAGE_MEMORY /* Precedence: lowest to highest */ %nonassoc AUTHID /* AUTHID has lower priority than the BODY_P */ @@ -4939,6 +4944,14 @@ alter_table_cmds: $$ = list_concat($1, (List*)$3); } } + | alter_table_option_list + { + $$ = $1; + } + | alter_table_cmds ',' alter_table_option_list + { + $$ = list_concat($1, $3); + } ; /* ALTER TABLE PARTITION sql clauses */ @@ -5995,12 +6008,121 @@ alter_table_cmd: n->def = (Node *)$1; $$ = (Node *) n; } - | row_format_option - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_SetTableRowFormat; - $$ = (Node *) n; - } +/* PGXC_BEGIN */ + /* ALTER TABLE DISTRIBUTE BY ... */ + | OptDistributeByInternal + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_DistributeBy; + n->def = (Node *)$1; + $$ = (Node *)n; + } + /* ALTER TABLE TO [ NODE (nodelist) | GROUP groupname ] */ + | OptSubClusterInternal + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_SubCluster; + n->def = (Node *)$1; + $$ = (Node *)n; + } + /* ALTER TABLE ADD NODE (nodelist) */ + | ADD_P NODE pgxcnodes + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_AddNodeList; + n->def = (Node *)$3; + $$ = (Node *)n; + } + /* ALTER TABLE DELETE NODE (nodelist) */ + | DELETE_P NODE pgxcnodes + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_DeleteNodeList; + n->def = (Node *)$3; + $$ = (Node *)n; + } + /* ALTER TABLE UPDATE SLICE LIKE (reftalbename), only used for redis range/list distribution table */ + | UPDATE SLICE LIKE dolphin_qualified_name + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_UpdateSliceLike; + n->exchange_with_rel = $4; + $$ = (Node *)n; + } + /* ALTER TABLE ENABLE ROW LEVEL SECURITY */ + | ENABLE_P ROW LEVEL SECURITY + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_EnableRls; + $$ = (Node *)n; + } + /* ALTER TABLE DISABLE ROW LEVEL SECURITY */ + | DISABLE_P ROW LEVEL SECURITY + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_DisableRls; + $$ = (Node *)n; + } + /* ALTER TABLE FORCE ROW LEVEL SECURITY */ + | FORCE ROW LEVEL SECURITY + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_ForceRls; + $$ = (Node *)n; + } + /* ALTER TABLE NO FORCE ROW LEVEL SECURITY */ + | NO FORCE ROW LEVEL SECURITY + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_NoForceRls; + $$ = (Node *)n; + } + /* ALTER TABLE ENCRYPTION KEY ROTATION */ + | ENCRYPTION KEY ROTATION + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_EncryptionKeyRotation; + $$ = (Node *)n; + } + | CharsetCollate + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_SetCharsetCollate; + n->def = (Node *)$1; + $$ = (Node*)n; + } + | CONVERT TO convert_charset opt_collate + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_ConvertCharset; + CharsetCollateOptions *cc = makeNode(CharsetCollateOptions); + cc->cctype = OPT_CHARSETCOLLATE; + cc->charset = $3; + cc->collate = $4; + n->def = (Node *)cc; + $$ = (Node*)n; + } +/* PGXC_END */ + ; + +alter_table_option_list: + alter_table_option + { + $$ = list_make1($1); + } + | alter_table_option_list alter_table_option + { + $$ = lappend($1, $2); + } + ; + +alter_table_option: + row_format_option + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_SetTableRowFormat; + $$ = (Node *) n; + } | autoextend_size_option { AlterTableCmd *n = makeNode(AlterTableCmd); @@ -6121,7 +6243,7 @@ alter_table_cmd: n->subtype = AT_UNION; $$ = (Node *) n; } - | OptTableSpace_without_empty tablespace_storage_option + | OptTableSpace_without_empty { AlterTableCmd *n = makeNode(AlterTableCmd); n->subtype = AT_TABLESPACE; @@ -6134,82 +6256,6 @@ alter_table_cmd: n->subtype = AT_TABLESPACE_STORAGE; $$ = (Node *) n; } -/* PGXC_BEGIN */ - /* ALTER TABLE DISTRIBUTE BY ... */ - | OptDistributeByInternal - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_DistributeBy; - n->def = (Node *)$1; - $$ = (Node *)n; - } - /* ALTER TABLE TO [ NODE (nodelist) | GROUP groupname ] */ - | OptSubClusterInternal - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_SubCluster; - n->def = (Node *)$1; - $$ = (Node *)n; - } - /* ALTER TABLE ADD NODE (nodelist) */ - | ADD_P NODE pgxcnodes - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_AddNodeList; - n->def = (Node *)$3; - $$ = (Node *)n; - } - /* ALTER TABLE DELETE NODE (nodelist) */ - | DELETE_P NODE pgxcnodes - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_DeleteNodeList; - n->def = (Node *)$3; - $$ = (Node *)n; - } - /* ALTER TABLE UPDATE SLICE LIKE (reftalbename), only used for redis range/list distribution table */ - | UPDATE SLICE LIKE dolphin_qualified_name - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_UpdateSliceLike; - n->exchange_with_rel = $4; - $$ = (Node *)n; - } - /* ALTER TABLE ENABLE ROW LEVEL SECURITY */ - | ENABLE_P ROW LEVEL SECURITY - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_EnableRls; - $$ = (Node *)n; - } - /* ALTER TABLE DISABLE ROW LEVEL SECURITY */ - | DISABLE_P ROW LEVEL SECURITY - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_DisableRls; - $$ = (Node *)n; - } - /* ALTER TABLE FORCE ROW LEVEL SECURITY */ - | FORCE ROW LEVEL SECURITY - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_ForceRls; - $$ = (Node *)n; - } - /* ALTER TABLE NO FORCE ROW LEVEL SECURITY */ - | NO FORCE ROW LEVEL SECURITY - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_NoForceRls; - $$ = (Node *)n; - } - /* ALTER TABLE ENCRYPTION KEY ROTATION */ - | ENCRYPTION KEY ROTATION - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_EncryptionKeyRotation; - $$ = (Node *)n; - } | AutoIncrementValue { #ifdef ENABLE_MULTIPLE_NODES @@ -6228,34 +6274,15 @@ alter_table_cmd: n->def = $1; $$ = (Node *)n; } - | CharsetCollate - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_SetCharsetCollate; - n->def = (Node *)$1; - $$ = (Node*)n; - } - | CONVERT TO convert_charset opt_collate - { - AlterTableCmd *n = makeNode(AlterTableCmd); - n->subtype = AT_ConvertCharset; - CharsetCollateOptions *cc = makeNode(CharsetCollateOptions); - cc->cctype = OPT_CHARSETCOLLATE; - cc->charset = $3; - cc->collate = $4; - n->def = (Node *)cc; - $$ = (Node*)n; - } -/* PGXC_END */ /* table comments start */ | COMMENT opt_equal SCONST - { + { BCompatibilityOptionSupportCheck($1); AlterTableCmd *n = makeNode(AlterTableCmd); n->subtype = AT_COMMENTS; n->name = $3; $$ = (Node *)n; - } + } /* table comments end */ ; @@ -8050,7 +8077,74 @@ opt_rename: * INTERNAL DATA xxxxxxxx * *****************************************************************************/ - + +CreateOptionEtcList: + CreateOptionList + { + $$ = $1; + } + | CreateTableOptionList + { + $$ = $1; + } + | CreateOptionBeforeList + { + $$ = $1; + } + | CreateOptionAfterList + { + $$ = $1; + } + | CreateOptionBeforeList CreateOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + | CreateOptionAfterList CreateTableOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateOptionBeforeList: + CreateOptionBefore + { + $$ = $1; + } + | CreateOptionBeforeList CreateOptionBefore + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +/* + * Before indicates that CreatOptionList comes first, and after indicates that it comes later. + * The same goes for the following. + */ +CreateOptionBefore: + CreateOptionList CreateTableOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateOptionAfterList: + CreateOptionAfter + { + $$ = $1; + } + | CreateOptionAfterList CreateOptionAfter + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateOptionAfter: + CreateTableOptionList CreateOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + CreateOptionList: CreateOption { $$ = MakeCreateTableOptions(NULL, $1); @@ -8061,6 +8155,11 @@ CreateOptionList: CreateOption } ; +/* + * CreateAsOption: createAsStmt option + * CreateTableOption: equivalent to table_option of MySQL + * CreateOption: other create option + */ CreateOption: CreateIfNotExistsOption { @@ -8075,6 +8174,69 @@ CreateOption: } ; +CreateIfNotExistsOptionEtcList: + CreateIfNotExistsOptionList + { + $$ = $1; + } + | CreateTableOptionList + { + $$ = $1; + } + | CreateIfNotExistsOptionBeforeList + { + $$ = $1; + } + | CreateIfNotExistsOptionAfterList + { + $$ = $1; + } + | CreateIfNotExistsOptionBeforeList CreateIfNotExistsOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + | CreateIfNotExistsOptionAfterList CreateTableOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateIfNotExistsOptionBeforeList: + CreateIfNotExistsOptionBefore + { + $$ = $1; + } + | CreateIfNotExistsOptionBeforeList CreateIfNotExistsOptionBefore + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateIfNotExistsOptionBefore: + CreateIfNotExistsOptionList CreateTableOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateIfNotExistsOptionAfterList: + CreateIfNotExistsOptionAfter + { + $$ = $1; + } + | CreateIfNotExistsOptionAfterList CreateIfNotExistsOptionAfter + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateIfNotExistsOptionAfter: + CreateTableOptionList CreateIfNotExistsOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + CreateIfNotExistsOptionList: CreateIfNotExistsOption { $$ = MakeCreateTableOptions(NULL, $1); @@ -8136,27 +8298,71 @@ CreateIfNotExistsOption: { $$ = $1; } - | OptAutoIncrement_without_empty - { - SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); - n->option_type = OPT_AUTO_INC; - n->option.autoIncStart = $1; - $$ = n; - } - | COMMENT opt_equal SCONST - { - SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); - n->option_type = OPT_COMMENT_TAB; - CommentStmt *node = makeNode(CommentStmt); - node->objtype = OBJECT_TABLE; - node->objname = NIL; - node->objargs = NIL; - node->comment = $3; - n->option.comment = node; - $$ = n; - } ; +CreateAsOptionEtcList: + CreateAsOptionList + { + $$ = $1; + } + | CreateTableOptionList + { + $$ = $1; + } + | CreateAsOptionBeforeList + { + $$ = $1; + } + | CreateAsOptionAfterList + { + $$ = $1; + } + | CreateAsOptionBeforeList CreateAsOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + | CreateAsOptionAfterList CreateTableOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateAsOptionBeforeList: + CreateAsOptionBefore + { + $$ = $1; + } + | CreateAsOptionBeforeList CreateAsOptionBefore + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateAsOptionBefore: + CreateAsOptionList CreateTableOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateAsOptionAfterList: + CreateAsOptionAfter + { + $$ = $1; + } + | CreateAsOptionAfterList CreateAsOptionAfter + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + +CreateAsOptionAfter: + CreateTableOptionList CreateAsOptionList + { + $$ = MergeCreateTableOptions($1, $2); + } + ; + CreateAsOptionList: CreateAsOption { $$ = MakeCreateTableOptions(NULL, $1); @@ -8168,14 +8374,7 @@ CreateAsOptionList: CreateAsOption ; CreateAsOption: - OptTableSpace_without_empty - { - SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); - n->option_type = OPT_TABLESPACE; - n->option.char_content = $1; - $$ = n; - } - | OptWith_without_empty + OptWith_without_empty { SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); n->option_type = OPT_WITH; @@ -8212,6 +8411,27 @@ CreateAsOption: $$ = n; } /* PGXC_END */ + ; + +CreateTableOptionList: + CreateTableOption + { + $$ = MakeCreateTableOptions(NULL, $1); + } + | CreateTableOptionList opt_comma CreateTableOption + { + $$ = MakeCreateTableOptions($1, $3); + } + ; + +CreateTableOption: + OptTableSpace_without_empty + { + SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); + n->option_type = OPT_TABLESPACE; + n->option.char_content = $1; + $$ = n; + } | opt_compression_without_empty { SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); @@ -8330,7 +8550,26 @@ CreateAsOption: { $$ = CreateSingleTableOption(OPT_TABLESPACE_STORAGE); } - ; + | OptAutoIncrement_without_empty + { + SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); + n->option_type = OPT_AUTO_INC; + n->option.autoIncStart = $1; + $$ = n; + } + | COMMENT opt_equal SCONST + { + SingleTableOption *n = (SingleTableOption*)palloc0(sizeof(SingleTableOption)); + n->option_type = OPT_COMMENT_TAB; + CommentStmt *node = makeNode(CommentStmt); + node->objtype = OBJECT_TABLE; + node->objname = NIL; + node->objargs = NIL; + node->comment = $3; + n->option.comment = node; + $$ = n; + } + ; autoextend_size_option: AUTOEXTEND_SIZE opt_equal Iconst {} @@ -8411,13 +8650,13 @@ stats_sample_pages_option: ; tablespace_storage_option_without_empty: - STORAGE DISK {} - | STORAGE MEMORY {} + STORAGE_DISK {} + | STORAGE_MEMORY {} ; tablespace_storage_option: - STORAGE DISK {} - | STORAGE MEMORY {} + STORAGE_DISK {} + | STORAGE_MEMORY {} | /*EMPTY*/ {} ; @@ -8427,7 +8666,7 @@ charset_with_opt_equal: ; CreateStmt: CREATE OptTemp TABLE dolphin_qualified_name '(' OptTableElementList ')' - CreateOptionList + CreateOptionEtcList { CreateStmt *n = makeNode(CreateStmt); $4->relpersistence = $2; @@ -8489,7 +8728,7 @@ CreateStmt: CREATE OptTemp TABLE dolphin_qualified_name '(' OptTableElementList $$ = (Node *)n; } | CREATE OptTemp TABLE IF_P NOT EXISTS dolphin_qualified_name '(' - OptTableElementList ')' CreateIfNotExistsOptionList + OptTableElementList ')' CreateIfNotExistsOptionEtcList { CreateStmt *n = makeNode(CreateStmt); $7->relpersistence = $2; @@ -11945,7 +12184,7 @@ create_as_target: ; create_as_target_dolphin: - dolphin_qualified_name opt_column_list CreateAsOptionList + dolphin_qualified_name opt_column_list CreateAsOptionEtcList { $$ = makeNode(IntoClause); $$->rel = $1; @@ -11989,7 +12228,7 @@ create_as_target_dolphin: /* PGXC_END */ } - | dolphin_qualified_name '(' OptTableElementList ')' OptDuplicate CreateAsOptionList + | dolphin_qualified_name '(' OptTableElementList ')' OptDuplicate CreateAsOptionEtcList { if (u_sess->attr.attr_sql.sql_compatibility != B_FORMAT) { ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -41118,7 +41357,72 @@ static CreateTableOptions* MakeCreateTableOptions(CreateTableOptions *tableOptio break; } return tableOptions; -} +} + +static CreateTableOptions* MergeCreateTableOptions(CreateTableOptions *frontTableOptions, CreateTableOptions *rearTableOptions) { + if (rearTableOptions->inhRelations == NULL) { + rearTableOptions->inhRelations = frontTableOptions->inhRelations; + } + + if (rearTableOptions->options == NULL) { + rearTableOptions->options = frontTableOptions->options; + } + + if (rearTableOptions->oncommit == ONCOMMIT_NOOP) { + rearTableOptions->oncommit = frontTableOptions->oncommit; + } + + if (rearTableOptions->tablespacename == NULL) { + rearTableOptions->tablespacename = frontTableOptions->tablespacename; + } + + if (rearTableOptions->row_compress == REL_CMPRS_PAGE_PLAIN) { + rearTableOptions->row_compress = frontTableOptions->row_compress; + } + + if (rearTableOptions->partTableState == NULL) { + rearTableOptions->partTableState = frontTableOptions->partTableState; + } + + if (rearTableOptions->compress_type == NULL) { + rearTableOptions->compress_type = frontTableOptions->compress_type; + } + + /* PGXC_BEGIN */ + if (rearTableOptions->distributeby == NULL) { + rearTableOptions->distributeby = frontTableOptions->distributeby; + } + + if (rearTableOptions->subcluster == NULL) { + rearTableOptions->subcluster = frontTableOptions->subcluster; + } + /* PGXC_END */ + if (rearTableOptions->internalData == NULL) { + rearTableOptions->internalData = frontTableOptions->internalData; + } + + if (rearTableOptions->relkind == OBJECT_TABLE) { + rearTableOptions->relkind = frontTableOptions->relkind; + } + + if (rearTableOptions->autoIncStart == NULL) { + rearTableOptions->autoIncStart = frontTableOptions->autoIncStart; + } + + if (rearTableOptions->comment == NULL) { + rearTableOptions->comment = frontTableOptions->comment; + } + + if (rearTableOptions->charset == 0) { + rearTableOptions->charset = frontTableOptions->charset; + } + + if (rearTableOptions->collate == NULL) { + rearTableOptions->collate = frontTableOptions->collate; + } + + return rearTableOptions; +} static CreateIndexOptions *MakeCreateIndexOptions(CreateIndexOptions *indexOptions, SingleIndexOption *indexOption) { diff --git a/contrib/dolphin/plugin_parser/parser.cpp b/contrib/dolphin/plugin_parser/parser.cpp index b5d2c2424..24fbb7fde 100644 --- a/contrib/dolphin/plugin_parser/parser.cpp +++ b/contrib/dolphin/plugin_parser/parser.cpp @@ -893,6 +893,28 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) case SELECT: GetSessionContext()->is_create_alter_stmt = false; break; + case STORAGE: + /* + * STORAGE DISK/MEMORY must be reduced to one token, to allow STORAGE(...). + */ + GET_NEXT_TOKEN(); + + switch (next_token) { + case DISK: + cur_token = STORAGE_DISK; + break; + case MEMORY: + cur_token = STORAGE_MEMORY; + break; + default: + /* save the lookahead token for next time */ + SET_LOOKAHEAD_TOKEN(); + /* and back up the output info to cur_token */ + lvalp->core_yystype = cur_yylval; + *llocp = cur_yylloc; + break; + } + break; #endif default: break; diff --git a/contrib/dolphin/sql/table_option_separator.sql b/contrib/dolphin/sql/table_option_separator.sql new file mode 100644 index 000000000..839ee47fd --- /dev/null +++ b/contrib/dolphin/sql/table_option_separator.sql @@ -0,0 +1,11 @@ +create schema table_option_separator; +set current_schema = table_option_separator; +create table t1(a int) engine=innodb encryption='n'; +alter table t1 min_rows=1 max_rows=2; +create table t2(a int) engine=innodb,encryption='n'; +alter table t2 min_rows=1,max_rows=2; +alter table t1 min_rows=1,max_rows=2; +drop table t1; +drop table t2; +reset current_schema; +drop schema table_option_separator; -- Gitee From 4cfe49eec371b995097841df109de9bb4a3cfa5f Mon Sep 17 00:00:00 2001 From: lukeman Date: Thu, 18 Jan 2024 19:24:11 +0800 Subject: [PATCH 211/434] =?UTF-8?q?=E5=A4=84=E7=90=86issue=EF=BC=9A?= =?UTF-8?q?=E5=85=BC=E5=AE=B9B=E5=BA=93=E4=BD=BF=E7=94=A8cast/convert?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E5=AF=B9binary=E5=88=97=E8=BD=ACsigned?= =?UTF-8?q?=E6=97=B6=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- contrib/dolphin/checklist/checklist.md | 1 + contrib/dolphin/expected/test_binary.out | 4 + contrib/dolphin/expected/uint_cast3.out | 444 ++++++------------ contrib/dolphin/include/plugin_utils/int16.h | 74 +++ .../dolphin/include/plugin_utils/varlena.h | 1 + .../dolphin/plugin_parser/parse_coerce.cpp | 9 +- contrib/dolphin/plugin_utils/adt/int16.cpp | 15 +- contrib/dolphin/plugin_utils/adt/int8.cpp | 31 +- .../dolphin/plugin_utils/adt/unsigned_int.cpp | 27 ++ contrib/dolphin/plugin_utils/adt/varlena.cpp | 2 +- .../rollback_script/dolphin--3.0--2.0.sql | 4 + contrib/dolphin/sql/uint_cast3.sql | 33 +- .../upgrade_script/dolphin--2.0--3.0.sql | 15 + 13 files changed, 314 insertions(+), 346 deletions(-) create mode 100755 contrib/dolphin/include/plugin_utils/int16.h diff --git a/contrib/dolphin/checklist/checklist.md b/contrib/dolphin/checklist/checklist.md index b75588dc3..8d71dad18 100644 --- a/contrib/dolphin/checklist/checklist.md +++ b/contrib/dolphin/checklist/checklist.md @@ -183,6 +183,7 @@ |plugin_utils|datetime.h |src\include\utils\datetime.h | |plugin_utils|fmgr.h |src\include\fmgr.h | |plugin_utils|int8.h |src\include\utils\int8.h | +|plugin_utils|int16.h |src\include\utils\int16.h | |plugin_utils|timestamp.h |src\include\utils\timestamp.h | |plugin_utils|plpgsql.h |src\include\utils\plpgsql.h | |plugin_utils|varbit.h |src\include\utils\varbit.h | diff --git a/contrib/dolphin/expected/test_binary.out b/contrib/dolphin/expected/test_binary.out index 43fa5b094..dc4a2aa81 100644 --- a/contrib/dolphin/expected/test_binary.out +++ b/contrib/dolphin/expected/test_binary.out @@ -939,12 +939,16 @@ WARNING: Data truncated for input data: "1234" -- binary to bigint select (20220101)::binary(30)::bigint; +WARNING: Data truncated for input data: "20220101" +CONTEXT: referenced column: int8 int8 ---------- 20220101 (1 row) select (-2075)::binary(30)::bigint; +WARNING: Data truncated for input data: "-2075" +CONTEXT: referenced column: int8 int8 ------- -2075 diff --git a/contrib/dolphin/expected/uint_cast3.out b/contrib/dolphin/expected/uint_cast3.out index e5e94627d..8cf5a59f3 100644 --- a/contrib/dolphin/expected/uint_cast3.out +++ b/contrib/dolphin/expected/uint_cast3.out @@ -652,336 +652,168 @@ CONTEXT: referenced column: c7 2 | 6 | 61 | 61 | 6 | 61 | 61 (3 rows) ---- 测试uint1/uint2/uint4/uint8,预期表现和unsigned一致 -select c1, cast(c2 as uint1), cast(c3 as uint1), cast(c4 as uint1), -cast(c5 as uint1) from t_binary0001 order by 1,2,3,4,5; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 - c1 | c2 | c3 | c4 | c5 -----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 -(3 rows) - -select c1, convert(c2, uint1), convert(c3, uint1), convert(c4, uint1), -convert(c5, uint1), convert(c6, uint1), convert(c7, uint1) from -t_binary0001 order by 1,2,3,4,5,6,7; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c7 - c1 | c2 | c3 | c4 | c5 | c6 | c7 -----+----+----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 | 61 | 61 -(3 rows) - -select c1, cast(c2 as uint2), cast(c3 as uint2), cast(c4 as uint2), -cast(c5 as uint2) from t_binary0001 order by 1,2,3,4,5; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 - c1 | c2 | c3 | c4 | c5 -----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 -(3 rows) - -select c1, convert(c2, uint2), convert(c3, uint2), convert(c4, uint2), -convert(c5, uint2), convert(c6, uint2), convert(c7, uint2) from -t_binary0001 order by 1,2,3,4,5,6,7; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c7 - c1 | c2 | c3 | c4 | c5 | c6 | c7 -----+----+----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 | 61 | 61 -(3 rows) - -select c1, cast(c2 as uint4), cast(c3 as uint4), cast(c4 as uint4), -cast(c5 as uint4) from t_binary0001 order by 1,2,3,4,5; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 - c1 | c2 | c3 | c4 | c5 -----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 -(3 rows) - -select c1, convert(c2, uint4), convert(c3, uint4), convert(c4, uint4), -convert(c5, uint4), convert(c6, uint4), convert(c7, uint4) from -t_binary0001 order by 1,2,3,4,5,6,7; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c7 - c1 | c2 | c3 | c4 | c5 | c6 | c7 -----+----+----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 | 61 | 61 -(3 rows) - -select c1, cast(c2 as uint8), cast(c3 as uint8), cast(c4 as uint8), -cast(c5 as uint8) from t_binary0001 order by 1,2,3,4,5; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 - c1 | c2 | c3 | c4 | c5 -----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 -(3 rows) - -select c1, convert(c2, uint8), convert(c3, uint8), convert(c4, uint8), -convert(c5, uint8), convert(c6, uint8), convert(c7, uint8) from -t_binary0001 order by 1,2,3,4,5,6,7; -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "abcdefghij" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c2 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "a" -CONTEXT: referenced column: c5 -WARNING: invalid input syntax for type double precision: "a熊猫竹" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­a熊猫竹竹爱åƒç«¹å­" -CONTEXT: referenced column: c7 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c3 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c4 -WARNING: invalid input syntax for type double precision: "61e7" -CONTEXT: referenced column: c6 -WARNING: invalid input syntax for type double precision: "61e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e7868ae78cabe7abb9e7abb9e788b1e59083e7abb9e5ad9061e78" -CONTEXT: referenced column: c7 - c1 | c2 | c3 | c4 | c5 | c6 | c7 -----+----+----+----+----+----+---- - 1 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 0 | 0 | 0 | 0 | 0 | 0 - 2 | 6 | 61 | 61 | 6 | 61 | 61 -(3 rows) - --- 建表并æ’å…¥æ•°æ® create table t_binary(a binary(255)); -create table t_unsigned(a int8 unsigned); +create table t_integer(a1 int8 unsigned, a2 int4 unsigned, a3 int2 unsigned, a4 int1 unsigned, + a5 int8, a6 int4, a7 int2, a8 int1); insert into t_binary values('2e1'); insert into t_binary values('1ab'); insert into t_binary values('0x1ab'); insert into t_binary values('123'); --- 测试转æ¢ç»“æžœ -select cast(a as unsigned) from t_binary; +select cast(a as int8 unsigned), cast(a as int4 unsigned), cast(a as int2 unsigned), cast(a as int1 unsigned), +cast(a as int8), cast(a as int4), cast(a as int2), cast(a as int1) from t_binary; +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type int16: "2e1" +DETAIL: text contain invalid character +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "2e1" +CONTEXT: referenced column: a WARNING: invalid input syntax for type double precision: "2e1" CONTEXT: referenced column: a WARNING: invalid input syntax for type double precision: "1ab" CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type int16: "1ab" +DETAIL: text contain invalid character +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type int16: "0x1ab" +DETAIL: text contain invalid character +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a WARNING: invalid input syntax for type double precision: "0x1ab" CONTEXT: referenced column: a WARNING: Data truncated for input data: "123" CONTEXT: referenced column: a - a ------ - 2 - 1 - 0 - 123 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a + a | a | a | a | a | a | a | a +-----+-----+-----+-----+-----+-----+-----+----- + 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 123 | 123 | 123 | 123 | 123 | 123 | 123 | 123 (4 rows) -insert ignore into t_unsigned select a from t_binary; +insert ignore into t_integer select a, a, a, a, a, a, a, a from t_binary; WARNING: Data truncated for input data: "2e1" -CONTEXT: referenced column: a +CONTEXT: referenced column: a1 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a2 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a3 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a4 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a5 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a6 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a7 +WARNING: Data truncated for input data: "2e1" +CONTEXT: referenced column: a8 WARNING: invalid input syntax for type double precision: "1ab" -CONTEXT: referenced column: a +CONTEXT: referenced column: a1 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a2 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a3 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a4 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a5 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a6 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a7 +WARNING: invalid input syntax for type double precision: "1ab" +CONTEXT: referenced column: a8 WARNING: invalid input syntax for type double precision: "0x1ab" -CONTEXT: referenced column: a +CONTEXT: referenced column: a1 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a2 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a3 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a4 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a5 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a6 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a7 +WARNING: invalid input syntax for type double precision: "0x1ab" +CONTEXT: referenced column: a8 WARNING: Data truncated for input data: "123" -CONTEXT: referenced column: a -select * from t_unsigned; - a ------ - 20 - 1 - 0 - 123 +CONTEXT: referenced column: a1 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a2 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a3 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a4 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a5 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a6 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a7 +WARNING: Data truncated for input data: "123" +CONTEXT: referenced column: a8 +select * from t_integer; + a1 | a2 | a3 | a4 | a5 | a6 | a7 | a8 +-----+-----+-----+-----+-----+-----+-----+----- + 20 | 20 | 20 | 20 | 20 | 20 | 20 | 20 + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 123 | 123 | 123 | 123 | 123 | 123 | 123 | 123 (4 rows) --- æ¸…ç† -drop table t_unsigned cascade; +drop table t_integer cascade; drop table t_binary cascade; drop table t_binary0001 cascade; SET dolphin.sql_mode = ''; diff --git a/contrib/dolphin/include/plugin_utils/int16.h b/contrib/dolphin/include/plugin_utils/int16.h new file mode 100755 index 000000000..308ad1d8d --- /dev/null +++ b/contrib/dolphin/include/plugin_utils/int16.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2021, openGauss Contributors + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * int16.h + * Declarations for operations on 1216-bit integers. + * + * IDENTIFICATION + * dolphin/include/plugin_utils/int16.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef INT16_H +#define INT16_H + +#include "plugin_utils/fmgr.h" + +#ifdef DOLPHIN +extern bool scanint16(const char* str, bool errorOK, int128* result, bool* hasError = nullptr); +#else +extern bool scanint16(const char* str, bool errorOK, int128* result); +#endif + +extern Datum int16in(PG_FUNCTION_ARGS); +extern Datum int16out(PG_FUNCTION_ARGS); +extern Datum int16recv(PG_FUNCTION_ARGS); +extern Datum int16send(PG_FUNCTION_ARGS); + +extern Datum int16eq(PG_FUNCTION_ARGS); +extern Datum int16ne(PG_FUNCTION_ARGS); +extern Datum int16lt(PG_FUNCTION_ARGS); +extern Datum int16gt(PG_FUNCTION_ARGS); +extern Datum int16le(PG_FUNCTION_ARGS); +extern Datum int16ge(PG_FUNCTION_ARGS); + +extern Datum int16pl(PG_FUNCTION_ARGS); +extern Datum int16mi(PG_FUNCTION_ARGS); +extern Datum int16mul(PG_FUNCTION_ARGS); +extern Datum int16div(PG_FUNCTION_ARGS); + +extern Datum int1_16(PG_FUNCTION_ARGS); +extern Datum int16_1(PG_FUNCTION_ARGS); +extern Datum int2_16(PG_FUNCTION_ARGS); +extern Datum int16_2(PG_FUNCTION_ARGS); +extern Datum int4_16(PG_FUNCTION_ARGS); +extern Datum int16_4(PG_FUNCTION_ARGS); +extern Datum int8_16(PG_FUNCTION_ARGS); +extern Datum int16_8(PG_FUNCTION_ARGS); +extern Datum dtoi16(PG_FUNCTION_ARGS); +extern Datum i16tod(PG_FUNCTION_ARGS); +extern Datum ftoi16(PG_FUNCTION_ARGS); +extern Datum i16tof(PG_FUNCTION_ARGS); +extern Datum oidtoi16(PG_FUNCTION_ARGS); +extern Datum i16tooid(PG_FUNCTION_ARGS); +extern Datum int16_bool(PG_FUNCTION_ARGS); +extern Datum bool_int16(PG_FUNCTION_ARGS); +extern Datum int16_numeric(PG_FUNCTION_ARGS); +extern Datum numeric_int16(PG_FUNCTION_ARGS); + +#endif /* INT16_H */ + diff --git a/contrib/dolphin/include/plugin_utils/varlena.h b/contrib/dolphin/include/plugin_utils/varlena.h index e3c93ce13..d7a47d2bc 100644 --- a/contrib/dolphin/include/plugin_utils/varlena.h +++ b/contrib/dolphin/include/plugin_utils/varlena.h @@ -7,6 +7,7 @@ #ifdef DOLPHIN extern Datum bit_blob(VarBit* input); double float8in_internal(char* str, char** endptr_p, bool* hasError, CoercionContext ccontext); +char* AnyElementGetCString(Oid anyOid, Datum anyDatum, bool* hasError = nullptr); #endif #endif \ No newline at end of file diff --git a/contrib/dolphin/plugin_parser/parse_coerce.cpp b/contrib/dolphin/plugin_parser/parse_coerce.cpp index 2559e7412..151968446 100644 --- a/contrib/dolphin/plugin_parser/parse_coerce.cpp +++ b/contrib/dolphin/plugin_parser/parse_coerce.cpp @@ -3581,7 +3581,14 @@ bool IsEquivalentEnums(Oid enumOid1, Oid enumOid2) void TryFindSpecifiedCastFunction(const Oid sourceTypeId, const Oid targetTypeId, Oid defaultFuncId, Oid* funcId) { - if (sourceTypeId == TEXTOID && targetTypeId == TIMEOID) { + bool sourceIsVarlena = ENABLE_B_CMPT_MODE && findSignedFunctionIdx(sourceTypeId) == S_VARLENA; + if (sourceIsVarlena && targetTypeId == INT1OID) { + *funcId = get_func_oid("varlena_cast_int1", PG_CATALOG_NAMESPACE, NULL); + } else if (sourceIsVarlena && targetTypeId == INT2OID) { + *funcId = get_func_oid("varlena_cast_int2", PG_CATALOG_NAMESPACE, NULL); + } else if (sourceIsVarlena && targetTypeId == INT4OID) { + *funcId = get_func_oid("varlena_cast_int4", PG_CATALOG_NAMESPACE, NULL); + } else if (sourceTypeId == TEXTOID && targetTypeId == TIMEOID) { *funcId = get_func_oid("text_time_explicit", PG_CATALOG_NAMESPACE, NULL); } else if (ENABLE_B_CMPT_MODE && targetTypeId == INT8OID) { *funcId = findSignedExplicitCastFunction(sourceTypeId, defaultFuncId); diff --git a/contrib/dolphin/plugin_utils/adt/int16.cpp b/contrib/dolphin/plugin_utils/adt/int16.cpp index 3f672f265..c1d0889b1 100644 --- a/contrib/dolphin/plugin_utils/adt/int16.cpp +++ b/contrib/dolphin/plugin_utils/adt/int16.cpp @@ -37,9 +37,9 @@ #include "funcapi.h" #include "libpq/pqformat.h" #include "utils/builtins.h" -#include "utils/int16.h" #include "utils/int8.h" #include "utils/numeric.h" +#include "plugin_utils/int16.h" #ifdef DOLPHIN #include "plugin_commands/mysqlmode.h" #endif @@ -89,7 +89,11 @@ static inline bool check_trailing_symbol(unsigned char ptr) return ptr != '\0' && isspace(ptr); } +#ifdef DOLPHIN +bool scanint16(const char* str, bool errorOK, int128* result, bool* hasError) +#else bool scanint16(const char* str, bool errorOK, int128* result) +#endif { const char* ptr = str; int128 tmp = 0; @@ -136,6 +140,9 @@ bool scanint16(const char* str, bool errorOK, int128* result) errcause("invalid input."), erraction("use numeric for large integer value."))); *result = neg ? PG_INT128_MIN : PG_INT128_MAX; + if (hasError) { + *hasError = true; + } return true; #else if (errorOK) { @@ -165,6 +172,9 @@ bool scanint16(const char* str, bool errorOK, int128* result) errdetail("text contain invalid character"), errcause("invalid input."), erraction("check the validity of input."))); + if (hasError) { + *hasError = true; + } #else if (errorOK) { @@ -193,6 +203,9 @@ bool scanint16(const char* str, bool errorOK, int128* result) errcause("invalid input."), erraction("use numeric for large integer value."))); tmp = -(tmp + 1); + if (hasError) { + *hasError = true; + } } else { tmp = -tmp; } diff --git a/contrib/dolphin/plugin_utils/adt/int8.cpp b/contrib/dolphin/plugin_utils/adt/int8.cpp index d379d2c9f..87a740650 100644 --- a/contrib/dolphin/plugin_utils/adt/int8.cpp +++ b/contrib/dolphin/plugin_utils/adt/int8.cpp @@ -27,7 +27,8 @@ #include "plugin_commands/mysqlmode.h" #ifdef DOLPHIN #include "plugin_utils/varbit.h" -#include "utils/int16.h" +#include "plugin_utils/varlena.h" +#include "plugin_utils/int16.h" #include "plugin_utils/date.h" #include "plugin_utils/datetime.h" #include "plugin_utils/timestamp.h" @@ -1803,18 +1804,26 @@ Datum text_cast_int8(PG_FUNCTION_ARGS) Datum varlena_cast_int8(PG_FUNCTION_ARGS) { - Datum txt = PG_GETARG_DATUM(0); char* tmp = NULL; int128 result; - Oid typeOutput = InvalidOid; - bool typIsVarlena = false; - getTypeOutputInfo(fcinfo->argTypes[0], &typeOutput, &typIsVarlena); - if (typIsVarlena) { - tmp = DatumGetCString(DirectFunctionCall1(textout, txt)); - } else { - tmp = DatumGetCString(OidOutputFunctionCall(typeOutput, txt)); + bool hasLenError = false; + tmp = AnyElementGetCString(fcinfo->argTypes[0], PG_GETARG_DATUM(0), &hasLenError); + bool hasError = false; + (void)scanint16(tmp, fcinfo->can_ignore || !SQL_MODE_STRICT(), &result, &hasError); + if (result > (int128)INT64_MAX || result < (int128)INT64_MIN) { + if (result > (int128)UINT64_MAX) { + result = -1; + } else { + result = result > 0 ? (int64)result : INT64_MIN; + } + ereport((fcinfo->can_ignore || !SQL_MODE_STRICT()) ? WARNING : ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("bigint out of range"))); + } else if (!hasError && hasLenError) { + ereport((!fcinfo->can_ignore && SQL_MODE_STRICT()) ? ERROR : WARNING, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("Data truncated for input data: \"%s\"", tmp))); } - result = DatumGetInt128(DirectFunctionCall1(int16in, CStringGetDatum(tmp))); - PG_RETURN_INT64(checkSignedRange(result, fcinfo)); + PG_RETURN_INT64((int64)result); } #endif diff --git a/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp b/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp index d81e9983c..3f15c43ba 100644 --- a/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp +++ b/contrib/dolphin/plugin_utils/adt/unsigned_int.cpp @@ -5201,6 +5201,33 @@ Datum varlena_cast_ui8(PG_FUNCTION_ARGS) return DirectFunctionCall1Coll(f8_cast_ui8, InvalidOid, val, fcinfo->can_ignore); } +PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_int1); +extern "C" DLL_PUBLIC Datum varlena_cast_int1(PG_FUNCTION_ARGS); +Datum varlena_cast_int1(PG_FUNCTION_ARGS) +{ + fcinfo->ccontext = COERCION_EXPLICIT; + Datum val = Varlena2Float8(fcinfo); + PG_RETURN_INT8(DirectFunctionCall1Coll(f8toi1, InvalidOid, val, fcinfo->can_ignore)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_int2); +extern "C" DLL_PUBLIC Datum varlena_cast_int2(PG_FUNCTION_ARGS); +Datum varlena_cast_int2(PG_FUNCTION_ARGS) +{ + fcinfo->ccontext = COERCION_EXPLICIT; + Datum val = Varlena2Float8(fcinfo); + PG_RETURN_INT16(DirectFunctionCall1Coll(dtoi2, InvalidOid, val, fcinfo->can_ignore)); +} + +PG_FUNCTION_INFO_V1_PUBLIC(varlena_cast_int4); +extern "C" DLL_PUBLIC Datum varlena_cast_int4(PG_FUNCTION_ARGS); +Datum varlena_cast_int4(PG_FUNCTION_ARGS) +{ + fcinfo->ccontext = COERCION_EXPLICIT; + Datum val = Varlena2Float8(fcinfo); + PG_RETURN_INT32(DirectFunctionCall1Coll(dtoi4, InvalidOid, val, fcinfo->can_ignore)); +} + PG_FUNCTION_INFO_V1_PUBLIC(dolphin_float4not); extern "C" DLL_PUBLIC Datum dolphin_float4not(PG_FUNCTION_ARGS); Datum dolphin_float4not(PG_FUNCTION_ARGS) diff --git a/contrib/dolphin/plugin_utils/adt/varlena.cpp b/contrib/dolphin/plugin_utils/adt/varlena.cpp index 5a1e2cb7c..bfe5e6f93 100644 --- a/contrib/dolphin/plugin_utils/adt/varlena.cpp +++ b/contrib/dolphin/plugin_utils/adt/varlena.cpp @@ -10742,7 +10742,7 @@ Datum blob_any_value(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(vlena); } -static char* AnyElementGetCString(Oid anyOid, Datum anyDatum, bool* hasError = nullptr) +char* AnyElementGetCString(Oid anyOid, Datum anyDatum, bool* hasError) { if (!OidIsValid(anyOid)) { return DatumGetCString(DirectFunctionCall1(textout, anyDatum)); diff --git a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql index dc33c2d85..aff62dff4 100644 --- a/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql +++ b/contrib/dolphin/rollback_script/dolphin--3.0--2.0.sql @@ -416,6 +416,10 @@ DROP FUNCTION IF EXISTS pg_catalog.timestamp_explicit(TEXT); DROP FUNCTION IF EXISTS pg_catalog.ln(year); DROP FUNCTION IF EXISTS pg_catalog.ln(json); +DROP FUNCTION IF EXISTS pg_catalog.varlena_cast_int1(anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.varlena_cast_int2(anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.varlena_cast_int4(anyelement) CASCADE; + DROP FUNCTION IF EXISTS pg_catalog.str_to_date(boolean, TEXT); DROP FUNCTION IF EXISTS pg_catalog.str_to_date(longblob, TEXT); DROP FUNCTION IF EXISTS pg_catalog.str_to_date(anyenum, TEXT); diff --git a/contrib/dolphin/sql/uint_cast3.sql b/contrib/dolphin/sql/uint_cast3.sql index 829011356..8d6fe6aa5 100644 --- a/contrib/dolphin/sql/uint_cast3.sql +++ b/contrib/dolphin/sql/uint_cast3.sql @@ -137,42 +137,23 @@ cast(c5 as unsigned) from t_binary0001 order by 1,2,3,4,5; select c1, convert(c2, unsigned), convert(c3, unsigned), convert(c4, unsigned), convert(c5, unsigned), convert(c6, unsigned), convert(c7, unsigned) from t_binary0001 order by 1,2,3,4,5,6,7; ---- 测试uint1/uint2/uint4/uint8,预期表现和unsigned一致 -select c1, cast(c2 as uint1), cast(c3 as uint1), cast(c4 as uint1), -cast(c5 as uint1) from t_binary0001 order by 1,2,3,4,5; -select c1, convert(c2, uint1), convert(c3, uint1), convert(c4, uint1), -convert(c5, uint1), convert(c6, uint1), convert(c7, uint1) from -t_binary0001 order by 1,2,3,4,5,6,7; -select c1, cast(c2 as uint2), cast(c3 as uint2), cast(c4 as uint2), -cast(c5 as uint2) from t_binary0001 order by 1,2,3,4,5; -select c1, convert(c2, uint2), convert(c3, uint2), convert(c4, uint2), -convert(c5, uint2), convert(c6, uint2), convert(c7, uint2) from -t_binary0001 order by 1,2,3,4,5,6,7; -select c1, cast(c2 as uint4), cast(c3 as uint4), cast(c4 as uint4), -cast(c5 as uint4) from t_binary0001 order by 1,2,3,4,5; -select c1, convert(c2, uint4), convert(c3, uint4), convert(c4, uint4), -convert(c5, uint4), convert(c6, uint4), convert(c7, uint4) from -t_binary0001 order by 1,2,3,4,5,6,7; -select c1, cast(c2 as uint8), cast(c3 as uint8), cast(c4 as uint8), -cast(c5 as uint8) from t_binary0001 order by 1,2,3,4,5; -select c1, convert(c2, uint8), convert(c3, uint8), convert(c4, uint8), -convert(c5, uint8), convert(c6, uint8), convert(c7, uint8) from -t_binary0001 order by 1,2,3,4,5,6,7; --- 建表并æ’å…¥æ•°æ® create table t_binary(a binary(255)); -create table t_unsigned(a int8 unsigned); +create table t_integer(a1 int8 unsigned, a2 int4 unsigned, a3 int2 unsigned, a4 int1 unsigned, + a5 int8, a6 int4, a7 int2, a8 int1); insert into t_binary values('2e1'); insert into t_binary values('1ab'); insert into t_binary values('0x1ab'); insert into t_binary values('123'); --- 测试转æ¢ç»“æžœ -select cast(a as unsigned) from t_binary; -insert ignore into t_unsigned select a from t_binary; -select * from t_unsigned; +select cast(a as int8 unsigned), cast(a as int4 unsigned), cast(a as int2 unsigned), cast(a as int1 unsigned), +cast(a as int8), cast(a as int4), cast(a as int2), cast(a as int1) from t_binary; +insert ignore into t_integer select a, a, a, a, a, a, a, a from t_binary; +select * from t_integer; --- æ¸…ç† -drop table t_unsigned cascade; +drop table t_integer cascade; drop table t_binary cascade; drop table t_binary0001 cascade; diff --git a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql index b1dbcbc8c..a3e75a9ba 100644 --- a/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql +++ b/contrib/dolphin/upgrade_script/dolphin--2.0--3.0.sql @@ -1104,6 +1104,21 @@ CREATE OR REPLACE FUNCTION pg_catalog.ln(json) RETURNS double precision LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.ln(cast($1 as double precision))'; +DROP FUNCTION IF EXISTS pg_catalog.varlena_cast_int1(anyelement) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.varlena_cast_int1 ( +anyelement +) RETURNS int1 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'varlena_cast_int1'; + +DROP FUNCTION IF EXISTS pg_catalog.varlena_cast_int2(anyelement) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.varlena_cast_int2 ( +anyelement +) RETURNS int2 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'varlena_cast_int2'; + +DROP FUNCTION IF EXISTS pg_catalog.varlena_cast_int4(anyelement) CASCADE; +CREATE OR REPLACE FUNCTION pg_catalog.varlena_cast_int4 ( +anyelement +) RETURNS int4 LANGUAGE C IMMUTABLE STRICT as '$libdir/dolphin', 'varlena_cast_int4'; + CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(boolean, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(longblob, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; CREATE OR REPLACE FUNCTION pg_catalog.str_to_date(anyenum, TEXT) RETURNS TEXT LANGUAGE SQL IMMUTABLE STRICT as 'select pg_catalog.str_to_date(cast($1 as TEXT), $2)'; -- Gitee From 4f03c184311a8b987783bc0fc2a295ae37015cfa Mon Sep 17 00:00:00 2001 From: wangfeihuo Date: Wed, 24 Jan 2024 17:48:23 +0800 Subject: [PATCH 212/434] =?UTF-8?q?=E3=80=90=E6=A0=87=E9=A2=98=E3=80=91?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8QS9F=E6=89=80=E7=A4=BA=E7=9A=84cts=E5=9C=BA?= =?UTF-8?q?=E6=99=AF=E4=B8=8Bfloor=E5=87=BD=E6=95=B0datetime=E7=AD=89?= =?UTF-8?q?=E9=83=A8=E5=88=86=E7=B1=BB=E5=9E=8B=E5=AD=97=E6=AE=B5=E5=92=8C?= =?UTF-8?q?mysql=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98=20?= =?UTF-8?q?=E3=80=90=E5=AE=9E=E7=8E=B0=E5=86=85=E5=AE=B9=E3=80=91:=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DI8QS9F=E6=89=80=E7=A4=BA=E7=9A=84cts=E5=9C=BA?= =?UTF-8?q?=E6=99=AF=E4=B8=8Bfloor=E5=87=BD=E6=95=B0datetime=E7=AD=89?= =?UTF-8?q?=E9=83=A8=E5=88=86=E7=B1=BB=E5=9E=8B=E5=AD=97=E6=AE=B5=E5=92=8C?= =?UTF-8?q?mysql=E4=B8=8D=E4=B8=80=E8=87=B4=E7=9A=84=E9=97=AE=E9=A2=98?= =?UTF-8?q?=E3=80=82=20=E3=80=90=E6=A0=B9=E5=9B=A0=E5=88=86=E6=9E=90?= =?UTF-8?q?=E3=80=91:=20=E5=9B=A0=E4=B8=BActs=E5=9C=BA=E6=99=AF=E4=BF=AE?= =?UTF-8?q?=EF=BC=8Cinsert=E8=AF=AD=E5=8F=A5=E4=BC=9A=E8=A2=AB=E6=9B=BF?= =?UTF-8?q?=E6=8D=A2=E6=88=90INSERT=20INTO=20xxx=20SELECT=20floor("datetim?= =?UTF-8?q?e"::numeric=20xxx,=E6=AD=A4=E5=9C=BA=E6=99=AF=E4=B8=8B=EF=BC=8C?= =?UTF-8?q?sql=E8=A7=A3=E9=87=8A=E7=9A=84=E6=97=B6=E5=80=99numeric?= =?UTF-8?q?=E4=BC=9A=E8=A2=AB=E6=9B=BF=E6=8D=A2=E6=88=90"datetime"::numeri?= =?UTF-8?q?c(10,0)=20=E3=80=90=E5=AE=9E=E7=8E=B0=E6=96=B9=E6=A1=88?= =?UTF-8?q?=E3=80=91:=20=E7=94=9F=E6=88=90Insert=E7=9A=84=E6=97=B6?= =?UTF-8?q?=E5=80=99=EF=BC=8C=E5=A6=82=E6=9E=9C=E6=B2=A1=E6=8C=87=E5=AE=9A?= =?UTF-8?q?numeric=E7=9A=84=E7=B2=BE=E5=BA=A6=EF=BC=8C=E5=88=99=E8=BF=94?= =?UTF-8?q?=E5=9B=9Enumber=20=E3=80=90=E5=85=B3=E8=81=94=E9=9C=80=E6=B1=82?= =?UTF-8?q?=E6=88=96issue=E3=80=91:=20https://e.gitee.com/opengaussorg/das?= =?UTF-8?q?hboard=3Fissue=3DI8QS9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../expected/builtin_funcs/db_b_if.out | 8 +- .../expected/builtin_funcs/math_func.out | 69 ++++ .../expected/builtin_funcs/soundex.out | 2 +- .../case_sensitive_test/create_view5.out | 2 +- contrib/dolphin/expected/db_b_parser3.out | 2 +- contrib/dolphin/expected/db_b_parser4.out | 2 +- .../json_operator_test.out | 370 ++++++++--------- .../multi_type_operator_test.out | 56 +-- .../numeric_operator_test_max.out | 368 ++++++++--------- .../numeric_operator_test_normal.out | 374 +++++++++--------- .../time_operator_test.out | 362 ++++++++--------- contrib/dolphin/expected/prefixkey_index.out | 2 +- contrib/dolphin/expected/test_condition.out | 62 +-- .../dolphin/output/view_definer_test.source | 6 +- .../dolphin/plugin_utils/adt/format_type.cpp | 4 + .../dolphin/sql/builtin_funcs/math_func.sql | 27 ++ 16 files changed, 908 insertions(+), 808 deletions(-) diff --git a/contrib/dolphin/expected/builtin_funcs/db_b_if.out b/contrib/dolphin/expected/builtin_funcs/db_b_if.out index 1200d2d5b..7a432e71d 100644 --- a/contrib/dolphin/expected/builtin_funcs/db_b_if.out +++ b/contrib/dolphin/expected/builtin_funcs/db_b_if.out @@ -109,7 +109,7 @@ LINE 1: select if (true, 'aaa'::binary(5), date '2022-01-30') as a, ... CONTEXT: referenced column: a -- binary to numeric select if (true, 'aaa'::binary(5), 1.1::numeric(10, 2)) as a, if (false, 'aaa'::binary(5), 1.1::numeric(10, 2)) as b; -ERROR: CASE/ELSE could not convert type numeric to "binary" +ERROR: CASE/ELSE could not convert type number to "binary" LINE 1: select if (true, 'aaa'::binary(5), 1.1::numeric(10, 2)) as a... ^ CONTEXT: referenced column: a @@ -133,7 +133,7 @@ LINE 1: select if (true, 'aaa'::varbinary(5), date '2022-01-30') as ... CONTEXT: referenced column: a -- varbinary to numeric select if (true, 'aaa'::varbinary(5), 1.1::numeric(10, 2)) as a, if (false, 'aaa'::varbinary(5), 1.1::numeric(10, 2)) as b; -ERROR: CASE/ELSE could not convert type numeric to "varbinary" +ERROR: CASE/ELSE could not convert type number to "varbinary" LINE 1: select if (true, 'aaa'::varbinary(5), 1.1::numeric(10, 2)) a... ^ CONTEXT: referenced column: a @@ -157,7 +157,7 @@ LINE 1: select if (true, 'aaa'::blob, date '2022-01-30') as a, if (f... CONTEXT: referenced column: a -- blob to numeric select if (true, 'aaa'::blob, 1.1::numeric(10, 2)) as a, if (false, 'aaa'::blob, 1.1::numeric(10, 2)) as b; -ERROR: CASE types numeric and blob cannot be matched +ERROR: CASE types number and blob cannot be matched LINE 1: select if (true, 'aaa'::blob, 1.1::numeric(10, 2)) as a, if ... ^ CONTEXT: referenced column: a @@ -188,7 +188,7 @@ LINE 1: select if (true, 1.1::float8, true) as a, if (false, 1.1::fl... CONTEXT: referenced column: a -- numeric to boolean select if (true, 2.2::numeric(10, 2), true) as a, if (false, 2.2::numeric(10, 2), true) as b; -ERROR: CASE types boolean and numeric cannot be matched +ERROR: CASE types boolean and number cannot be matched LINE 1: select if (true, 2.2::numeric(10, 2), true) as a, if (false,... ^ CONTEXT: referenced column: a diff --git a/contrib/dolphin/expected/builtin_funcs/math_func.out b/contrib/dolphin/expected/builtin_funcs/math_func.out index 5ea937cbc..fad5a5ed3 100644 --- a/contrib/dolphin/expected/builtin_funcs/math_func.out +++ b/contrib/dolphin/expected/builtin_funcs/math_func.out @@ -404,6 +404,73 @@ CONTEXT: referenced column: floor 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 7 | 1 | 20230205 | 191050 | 191050 | 20230205191050 | 20230205191050 | 20230205191050 | 20230205191050 | 2023 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 5 | 0 (1 row) +set dolphin.sql_mode = 'sql_mode_full_group,pipes_as_concat,ansi_quotes'; +create table test_floor_cts select floor(`int1`) as floor_1, floor(`uint1`) as floor_2, + floor(`int2`) as floor_3, floor(`uint2`) as floor_4, floor(`int4`) as floor_5, floor(`uint4`) as floor_6, + floor(`int8`) as floor_7, floor(`uint8`) as floor_8, floor(`float4`) as floor_9, floor(`float8`) as floor_10, + floor(`numeric`) as floor_11, floor(`bit1`) as floor_12, floor(`bit64`) as floor_13, floor(`boolean`) as floor_14, + floor(`date`) as floor_15, floor(`time`) as floor_16, floor(`time(4)`) as floor_17, floor(`datetime`) as floor_18, + floor(`datetime(4)`) as floor_19, floor(`timestamp`) as floor_20, floor(`timestamp(4)`) as floor_21, + floor(`char`) as floor_22, floor(`varchar`) as floor_23, floor(`binary`) as floor_24, + floor(`varbinary`) as floor_25, floor(`tinyblob`) as floor_26, floor(`blob`) as floor_27, floor(`mediumblob`) as floor_28, floor(`longblob`) as floor_29, + floor(`text`) as floor_30, floor(`enum_t`) as floor_31, floor(`set_t`) as floor_32 from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: floor_22 +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: floor_23 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor_24 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor_25 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor_26 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor_27 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor_28 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: floor_29 +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: floor_30 +create table test_ln_cts as select + ln(`int1`) as ln_1, ln(`uint1`) as ln_2, ln(`int2`) as ln_3, ln(`uint2`) as ln_4, + ln(`int4`) as ln_5, ln(`uint4`) as ln_6, ln(`int8`) as ln_7, ln(`uint8`) as ln_8, + ln(`float4`) as ln_9, ln(`float8`) as ln_10, ln(`numeric`) as ln_11, ln(`bit1`) as ln_12, + ln(`bit64`) as ln_13, ln(`boolean`) as ln_14, ln(`date`) as ln_15, ln(`time`) as ln_16, + ln(`time(4)`) as ln_17, ln(`datetime`) as ln_18, ln(`datetime(4)`) as ln_19, ln(`timestamp`) as ln_20, + ln(`timestamp(4)`) as ln_21, ln(`char`) as ln_23, ln(`varchar`) as ln_24, ln(`binary`) as ln_25, + ln(`varbinary`) as ln_26, ln(`tinyblob`) as ln_27, ln(`blob`) as ln_28, ln(`mediumblob`) as ln_29, ln(`longblob`) as ln_30, + ln(`text`) as ln_31, ln(`enum_t`) as ln_32, ln(`set_t`) as ln_33 from test_type_table; +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: ln_23 +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: ln_24 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln_25 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln_26 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln_27 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln_28 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln_29 +WARNING: invalid input syntax for type double precision: "1.23a" +CONTEXT: referenced column: ln_30 +WARNING: invalid input syntax for type numeric: "1.23a" +CONTEXT: referenced column: ln_31 +select * from test_floor_cts; + floor_1 | floor_2 | floor_3 | floor_4 | floor_5 | floor_6 | floor_7 | floor_8 | floor_9 | floor_10 | floor_11 | floor_12 | floor_13 | floor_14 | floor_15 | floor_16 | floor_17 | floor_18 | floor_19 | floor_20 | floor_21 | floor_22 | floor_23 | floor_24 | floor_25 | floor_26 | floor_27 | floor_28 | floor_29 | floor_30 | floor_31 | floor_32 +---------+---------+---------+---------+---------+---------+---------+---------+---------+----------+----------+----------+----------+----------+----------+----------+----------+----------------+----------------+----------------+----------------+----------+----------+----------+----------+----------+----------+----------+----------+----------+----------+---------- + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 7 | 1 | 20230205 | 191050 | 191050 | 20230205191050 | 20230205191050 | 20230205191050 | 20230205191050 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 5 +(1 row) + +select * from test_ln_cts; + ln_1 | ln_2 | ln_3 | ln_4 | ln_5 | ln_6 | ln_7 | ln_8 | ln_9 | ln_10 | ln_11 | ln_12 | ln_13 | ln_14 | ln_15 | ln_16 | ln_17 | ln_18 | ln_19 | ln_20 | ln_21 | ln_23 | ln_24 | ln_25 | ln_26 | ln_27 | ln_28 | ln_29 | ln_30 | ln_31 | ln_32 | ln_33 +------+--------------------+------+--------------------+------+--------------------+------+--------------------+------+-------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+--------------------+--------------------+-------------------- + 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0.0000000000000000 | 0 | 0 | 0.0000000000000000 | 0.0000000000000000 | 1.9459101490553133 | 0.0000000000000000 | 16.822687342557736 | 12.160290452875046 | 12.160292261823946 | 30.638197909965809 | 30.638197909965832 | 30.638197909965809 | 30.638197909965832 | 0.2070141693843261 | 0.2070141693843261 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.207014169384326 | 0.2070141693843261 | 0.0000000000000000 | 1.6094379124341004 +(1 row) + select exp(709); exp ----------------------- @@ -423,5 +490,7 @@ drop table if exists test_double_acos; drop table if exists test_double_exp; drop table if exists test_double_degrees; drop table if exists test_type_table; +drop table if exists test_floor_cts; +drop table if exists test_ln_cts; drop schema test_math_func cascade; reset current_schema; diff --git a/contrib/dolphin/expected/builtin_funcs/soundex.out b/contrib/dolphin/expected/builtin_funcs/soundex.out index 2fba64913..dc6c6bc24 100644 --- a/contrib/dolphin/expected/builtin_funcs/soundex.out +++ b/contrib/dolphin/expected/builtin_funcs/soundex.out @@ -199,7 +199,7 @@ LINE 1: select true sounds like 3; HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: referenced column: soundex_difference select 2.2 sounds like false; -ERROR: function pg_catalog.soundex_difference(numeric, boolean) does not exist +ERROR: function pg_catalog.soundex_difference(number, boolean) does not exist LINE 1: select 2.2 sounds like false; ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. diff --git a/contrib/dolphin/expected/case_sensitive_test/create_view5.out b/contrib/dolphin/expected/case_sensitive_test/create_view5.out index 8d1d2e34f..7f6a9ea12 100644 --- a/contrib/dolphin/expected/case_sensitive_test/create_view5.out +++ b/contrib/dolphin/expected/case_sensitive_test/create_view5.out @@ -137,7 +137,7 @@ CREATE VIEW tt1 AS --------+----------------------+-----------+----------+------------- a | character varying | | extended | b | character varying | | extended | - c | numeric | | main | + c | number | | main | d | character varying(4) | | extended | View definition: SELECT * diff --git a/contrib/dolphin/expected/db_b_parser3.out b/contrib/dolphin/expected/db_b_parser3.out index 2ddae8819..8fecca77c 100644 --- a/contrib/dolphin/expected/db_b_parser3.out +++ b/contrib/dolphin/expected/db_b_parser3.out @@ -336,7 +336,7 @@ select 0x; --equal to select 0 x; --测试点五:验è¯truncate函数 select truncate(111.28);--返回111 -ERROR: function truncate(numeric) does not exist +ERROR: function truncate(number) does not exist LINE 1: select truncate(111.28); ^ HINT: No function matches the given name and argument types. You might need to add explicit type casts. diff --git a/contrib/dolphin/expected/db_b_parser4.out b/contrib/dolphin/expected/db_b_parser4.out index 43ee7e01e..c79bf6b39 100644 --- a/contrib/dolphin/expected/db_b_parser4.out +++ b/contrib/dolphin/expected/db_b_parser4.out @@ -13,7 +13,7 @@ Table "db_b_parser4.tb_db_b_parser_0002" Column | Type | Modifiers --------+---------------+----------- a | numeric(10,0) | - b | numeric | + b | number | c | numeric(10,0) | d | numeric(10,0) | e | numeric(10,0) | diff --git a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out index 2fafef725..04d08e244 100644 --- a/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out +++ b/contrib/dolphin/expected/operator_compatibility_test/json_operator_test.out @@ -1221,8 +1221,8 @@ SHOW COLUMNS FROM test_json_type; int1&json | tinyint | YES | | NULL | int1||json | boolean | YES | | NULL | int1&&json | boolean | YES | | NULL | - int1_div_json | numeric | YES | | NULL | - int1_mod_json | numeric | YES | | NULL | + int1_div_json | number | YES | | NULL | + int1_mod_json | number | YES | | NULL | int1_xor_json | integer | YES | | NULL | int1_and_json | boolean | YES | | NULL | int1_or_json | boolean | YES | | NULL | @@ -1238,8 +1238,8 @@ SHOW COLUMNS FROM test_json_type; uint1&json | uint1 | YES | | NULL | uint1||json | boolean | YES | | NULL | uint1&&json | boolean | YES | | NULL | - uint1_div_json | numeric | YES | | NULL | - uint1_mod_json | numeric | YES | | NULL | + uint1_div_json | number | YES | | NULL | + uint1_mod_json | number | YES | | NULL | uint1_xor_json | integer | YES | | NULL | uint1_and_json | boolean | YES | | NULL | uint1_or_json | boolean | YES | | NULL | @@ -1255,8 +1255,8 @@ SHOW COLUMNS FROM test_json_type; int2&json | smallint | YES | | NULL | int2||json | boolean | YES | | NULL | int2&&json | boolean | YES | | NULL | - int2_div_json | numeric | YES | | NULL | - int2_mod_json | numeric | YES | | NULL | + int2_div_json | number | YES | | NULL | + int2_mod_json | number | YES | | NULL | int2_xor_json | integer | YES | | NULL | int2_and_json | boolean | YES | | NULL | int2_or_json | boolean | YES | | NULL | @@ -1272,8 +1272,8 @@ SHOW COLUMNS FROM test_json_type; uint2&json | uint2 | YES | | NULL | uint2||json | boolean | YES | | NULL | uint2&&json | boolean | YES | | NULL | - uint2_div_json | numeric | YES | | NULL | - uint2_mod_json | numeric | YES | | NULL | + uint2_div_json | number | YES | | NULL | + uint2_mod_json | number | YES | | NULL | uint2_xor_json | integer | YES | | NULL | uint2_and_json | boolean | YES | | NULL | uint2_or_json | boolean | YES | | NULL | @@ -1289,8 +1289,8 @@ SHOW COLUMNS FROM test_json_type; int4&json | integer | YES | | NULL | int4||json | boolean | YES | | NULL | int4&&json | boolean | YES | | NULL | - int4_div_json | numeric | YES | | NULL | - int4_mod_json | numeric | YES | | NULL | + int4_div_json | number | YES | | NULL | + int4_mod_json | number | YES | | NULL | int4_xor_json | integer | YES | | NULL | int4_and_json | boolean | YES | | NULL | int4_or_json | boolean | YES | | NULL | @@ -1306,8 +1306,8 @@ SHOW COLUMNS FROM test_json_type; uint4&json | uint4 | YES | | NULL | uint4||json | boolean | YES | | NULL | uint4&&json | boolean | YES | | NULL | - uint4_div_json | numeric | YES | | NULL | - uint4_mod_json | numeric | YES | | NULL | + uint4_div_json | number | YES | | NULL | + uint4_mod_json | number | YES | | NULL | uint4_xor_json | integer | YES | | NULL | uint4_and_json | boolean | YES | | NULL | uint4_or_json | boolean | YES | | NULL | @@ -1323,8 +1323,8 @@ SHOW COLUMNS FROM test_json_type; int8&json | bigint | YES | | NULL | int8||json | boolean | YES | | NULL | int8&&json | boolean | YES | | NULL | - int8_div_json | numeric | YES | | NULL | - int8_mod_json | numeric | YES | | NULL | + int8_div_json | number | YES | | NULL | + int8_mod_json | number | YES | | NULL | int8_xor_json | integer | YES | | NULL | int8_and_json | boolean | YES | | NULL | int8_or_json | boolean | YES | | NULL | @@ -1340,8 +1340,8 @@ SHOW COLUMNS FROM test_json_type; uint8&json | uint8 | YES | | NULL | uint8||json | boolean | YES | | NULL | uint8&&json | boolean | YES | | NULL | - uint8_div_json | numeric | YES | | NULL | - uint8_mod_json | numeric | YES | | NULL | + uint8_div_json | number | YES | | NULL | + uint8_mod_json | number | YES | | NULL | uint8_xor_json | integer | YES | | NULL | uint8_and_json | boolean | YES | | NULL | uint8_or_json | boolean | YES | | NULL | @@ -1349,7 +1349,7 @@ SHOW COLUMNS FROM test_json_type; float4-json | double precision | YES | | NULL | float4*json | double precision | YES | | NULL | float4/json | double precision | YES | | NULL | - float4%json | numeric | YES | | NULL | + float4%json | number | YES | | NULL | float4^json | double precision | YES | | NULL | float4>>json | bigint | YES | | NULL | float4<>json | bigint | YES | | NULL | float8<>json | bigint | YES | | NULL | numeric<>json | "bit" | YES | | NULL | bit1<>json | "bit" | YES | | NULL | bit64<>json | bigint | YES | | NULL | boolean<>json | bigint | YES | | NULL | date<>json | bigint | YES | | NULL | time<>json | bigint | YES | | NULL | time(4)<>json | bigint | YES | | NULL | datetime<>json | bigint | YES | | NULL | datetime(4)<>json | bigint | YES | | NULL | timestamp<>json | bigint | YES | | NULL | timestamp(4)<>json | bigint | YES | | NULL | year<>json | bigint | YES | | NULL | char<>json | bigint | YES | | NULL | varchar<>json | bigint | YES | | NULL | text<>json | bigint | YES | | NULL | enum_t<>json | bigint | YES | | NULL | set_t<>float4 | bigint | YES | | NULL | json<>float8 | bigint | YES | | NULL | json<>numeric | bigint | YES | | NULL | json<>boolean | bigint | YES | | NULL | json<>date | bigint | YES | | NULL | json<>time | bigint | YES | | NULL | json<