diff --git a/.github/vale-styles/Yugabyte/spelling-exceptions.txt b/.github/vale-styles/Yugabyte/spelling-exceptions.txt index 0cb8a89d3fca..2d1d30f6c486 100644 --- a/.github/vale-styles/Yugabyte/spelling-exceptions.txt +++ b/.github/vale-styles/Yugabyte/spelling-exceptions.txt @@ -300,6 +300,8 @@ hotfix hotfixed hotfixes hotfixing +hotspot +hotspots http https Hudi @@ -352,6 +354,7 @@ Klar Knative Kramdown Kroki +Kube Kubecost kubectl Kubernetes diff --git a/LICENSE.md b/LICENSE.md index 8558c904f172..9fafa3520aaa 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,5 +1,8 @@ ## YugabyteDB Licensing +Source code in this repository is variously licensed under the [Apache License 2.0](licenses/APACHE-LICENSE-2.0.txt) and the [Polyform Free Trial License 1.0.0](licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt). A copy of each license can be found in the [licenses](licenses) directory. -Source code in this repository is variously licensed under the Apache License 2.0 and the Polyform Free Trial License 1.0.0. A copy of each license can be found in the [licenses](licenses) directory. +The build produces two sets of binaries: -The build produces two sets of binaries - one set that falls under the Apache License 2.0 and another set that falls under the Polyform Free Trial License 1.0.0. The binaries that contain `-managed` in the artifact name are licensed under the Polyform Free Trial License 1.0.0. By default, only the Apache License 2.0 binaries are generated. +(1) YugabyteDB, available [here](https://download.yugabyte.com/local#linux) and licensed under Apache License 2.0 + +(2) YugabyteDB Aeon (Self-Managed), available [here](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/installer/) and licensed under Polyform Free Trial License 1.0.0. diff --git a/docs/.eslintrc b/docs/.eslintrc deleted file mode 100644 index 65d33041f737..000000000000 --- a/docs/.eslintrc +++ /dev/null @@ -1,91 +0,0 @@ -{ - "root": true, - "parser": "@babel/eslint-parser", - "env": { - "browser": true, - "node": true - }, - "plugins": [ - "jsx-a11y", - "import" - ], - "extends": [ - "airbnb", - "eslint:recommended", - "xo-space" - ], - "rules": { - "arrow-parens": 0, - "camelcase": 0, - "comma-dangle": [ - 1, - "always-multiline" - ], - "complexity": [ - "error", - { - "max": 25 - } - ], - "import/no-extraneous-dependencies": 0, - "import/prefer-default-export": 0, - "key-spacing": [ - "error", - { - "multiLine": { - "mode": "minimum" - } - } - ], - "new-cap": [ - "error", - { - "capIsNewExceptions": [ - "DrawerNavigator", - "StackNavigator", - "TabNavigator" - ] - } - ], - "no-cond-assign": [ - 2, - "except-parens" - ], - "no-console": 1, - "no-debugger": 1, - "no-multi-assign": 0, - "no-return-assign": [ - 2, - "except-parens" - ], - "no-unused-vars": 1, - "no-use-before-define": 0, - "no-warning-comments": 0, - "object-curly-spacing": 0, - "one-var": 0, - "one-var-declaration-per-line": 0, - "react/forbid-prop-types": 0, - "react/jsx-closing-bracket-location": 0, - "react/jsx-first-prop-new-line": 0, - "react/prefer-stateless-function": 0, - "react/require-default-props": 0, - "react/jsx-filename-extension": [ - 1, - { - "extensions": [ - ".js", - ".jsx" - ] - } - ] - }, - "settings": { - "import/resolver": { - "node": { - "paths": [ - "./src" - ] - } - } - } -} diff --git a/docs/assets/scss/_sidebar-toc.scss b/docs/assets/scss/_sidebar-toc.scss index 2e192b2337fa..922fdc80e32a 100644 --- a/docs/assets/scss/_sidebar-toc.scss +++ b/docs/assets/scss/_sidebar-toc.scss @@ -15,6 +15,7 @@ height: auto; scrollbar-width: thin; overflow-y: auto; + scroll-behavior: smooth; &::-webkit-scrollbar { height: 5px; diff --git a/docs/assets/scss/_styles_project.scss b/docs/assets/scss/_styles_project.scss index 4ab64ea4d640..aae48e96e73c 100644 --- a/docs/assets/scss/_styles_project.scss +++ b/docs/assets/scss/_styles_project.scss @@ -31,6 +31,13 @@ @import "./_yb_tags.scss"; @import "./_yb_container.scss"; +@font-face { + font-family: 'SFMonoSemibold'; + src: url('/fonts/sf-mono/SFMonoSemibold.woff2') format('woff2'); + font-weight: normal; + font-style: normal; +} + html { padding: 0 !important; } diff --git a/docs/assets/scss/_yb_container.scss b/docs/assets/scss/_yb_container.scss index bdf994562dc5..e16ed6e90ca8 100644 --- a/docs/assets/scss/_yb_container.scss +++ b/docs/assets/scss/_yb_container.scss @@ -183,11 +183,11 @@ html { @media (max-width: 1199px) { .td-main { .content-parent { - // max-width: calc(100% - 250px);max-width justify-content: center; padding-left: 20px; padding-right: 20px; margin-left: 0; + overflow: hidden; .content-child { max-width: 100%; @@ -196,7 +196,6 @@ html { main { width: 100%; - // max-width: 828px;max-width } } } @@ -259,12 +258,6 @@ html { } } -@media (max-width: 1300px) and(min-width: 1200px) { - .td-main .content-parent { - // padding: 0 85px;padding - } -} - .dragging { .td-main .content-parent .content-child, .td-main aside.td-sidebar .left-sidebar-wrap, diff --git a/docs/assets/scss/_yb_headings.scss b/docs/assets/scss/_yb_headings.scss index 84daf2013d6e..8ebed43f627f 100644 --- a/docs/assets/scss/_yb_headings.scss +++ b/docs/assets/scss/_yb_headings.scss @@ -232,6 +232,53 @@ } } +body.configuration { + .td-content { + h2, + .h2, + h3, + .h3 { + margin-top: 96px; + } + + h5:not(:first-child) { + font-family: 'SFMonoSemibold'; + font-size: 16px; + font-weight: normal; + line-height: 24px; + position: relative; + margin-top: 64px; + + &::after { + position: absolute; + top: calc(7.5rem + 20px - 32px); + content: ""; + display: block; + width: 100%; + height: 1px; + background: #D7DEE4; + } + } + + h5.first-h5, + header + h5:not(:first-child), + .main-heading-with-version + h5:not(:first-child), + h2 + h5:not(:first-child), + h3 + h5:not(:first-child), + h4 + h5:not(:first-child) { + margin-top: 32px; + + &::after { + display: none; + } + } + + h5.first-h5 { + margin-top: 96px; + } + } +} + @media (hover: none) and (pointer: coarse), (max-width: 576px) { .td-heading-self-link { visibility: hidden; @@ -241,6 +288,3 @@ .td-searchpage .td-content h1 { margin-bottom: 16px; } -.content-parent { - overflow: hidden; -} diff --git a/docs/assets/scss/_yb_tags.scss b/docs/assets/scss/_yb_tags.scss index 2d45287736fe..9507788c9c86 100644 --- a/docs/assets/scss/_yb_tags.scss +++ b/docs/assets/scss/_yb_tags.scss @@ -36,6 +36,28 @@ color: #097345; } + &.restart-needed { + background: #E8E9FE; + color: #4F4FA4; + } + + &.t-server { + background: #E5EDFF; + color: #2B59C3; + + &::before { + content: ""; + background: url(/icons/t-server.svg) center no-repeat; + width: 18px; + height: 18px; + margin-right: 4px; + } + } + + &.deprecated { + background: #FEEDED; + color: #DA1515; + } &.ysql { background: #CBCCFB; @@ -51,6 +73,13 @@ text-decoration: none !important; } } + .tags-row{ + margin-bottom: 16px; + display: flex; + flex-flow: wrap; + gap: 6px; + font-size: 0; + } } .tag.release { @@ -112,7 +141,7 @@ white-space: normal; text-transform: none; - &:before { + &::before { position: absolute; left: -13px; content: ""; diff --git a/docs/config/_default/hugo.toml b/docs/config/_default/hugo.toml index e79b2ebf4e07..65230491826c 100644 --- a/docs/config/_default/hugo.toml +++ b/docs/config/_default/hugo.toml @@ -18,7 +18,7 @@ enableGitInfo = true [module] [module.hugoVersion] extended = true - min = "0.104.3" + min = "0.145.0" [[module.imports]] path = "github.com/google/docsy" disable = false diff --git a/docs/config/_default/menus.toml b/docs/config/_default/menus.toml index b5061baa4fe6..f52f856cdf13 100644 --- a/docs/config/_default/menus.toml +++ b/docs/config/_default/menus.toml @@ -713,17 +713,41 @@ showSection = true [[preview_tutorials]] - name = "Cloud" + name = "AI" + weight = 20 + identifier = "tutorials-ai" + url = "/preview/tutorials/ai/" + [preview_tutorials.params] + showSection = true + +[[preview_tutorials]] + name = "RAG" + weight = 10 + identifier = "tutorials-ai-rag" + parent = "tutorials-ai" + [preview_tutorials.params] + showSection = true + +[[preview_tutorials]] + name = "Vector basics" + weight = 20 + identifier = "tutorials-ai-vector" + parent = "tutorials-ai" + [preview_tutorials.params] + showSection = true + +[[preview_tutorials]] + name = "Agentic" weight = 30 - identifier = "tutorials-cloud" + identifier = "tutorials-ai-agentic" + parent = "tutorials-ai" [preview_tutorials.params] showSection = true [[preview_tutorials]] - name = "AI" - weight = 35 - identifier = "tutorials-ai" - url = "/preview/tutorials/ai/" + name = "Cloud" + weight = 30 + identifier = "tutorials-cloud" [preview_tutorials.params] showSection = true diff --git a/docs/content/_index.md b/docs/content/_index.md index a1ed100a7294..2424c58e35ce 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -7,7 +7,6 @@ type: indexpage layout: list breadcrumbDisable: true weight: 1 -showRightNav: true unversioned: true --- diff --git a/docs/content/preview/api/ysql/the-sql-language/statements/ddl_alter_table.md b/docs/content/preview/api/ysql/the-sql-language/statements/ddl_alter_table.md index e3fe49cc1d25..599ddc16ad90 100644 --- a/docs/content/preview/api/ysql/the-sql-language/statements/ddl_alter_table.md +++ b/docs/content/preview/api/ysql/the-sql-language/statements/ddl_alter_table.md @@ -31,7 +31,7 @@ Use the `ALTER TABLE` statement to change the definition of a table.

{{< note title="Table inheritance is not yet supported" >}} -YSQL in the present "latest" YugabyteDB does not yet support the "table inheritance" feature that is described in the [PostgreSQL documentation](https://www.postgresql.org/docs/15/ddl-inherit.html). The attempt to create a table that inherits another table causes the _0A000 (feature_not_supported)_ error with the message _"INHERITS not supported yet"_. This means that the syntax that the `table_expr` rule allows doesn't not yet bring any useful meaning. +YSQL in the present "latest" YugabyteDB does not yet support the "table inheritance" feature that is described in the [PostgreSQL documentation](https://www.postgresql.org/docs/15/ddl-inherit.html). The attempt to create a table that inherits another table causes the _0A000 (feature_not_supported)_ error with the message _"INHERITS not supported yet"_. This means that the syntax that the `table_expr` rule allows doesn't yet bring any useful meaning. It says that you can write, for example, this: @@ -54,9 +54,23 @@ These variants are useful only when at least one other table inherits `t`. But a Specify one of the following actions. -#### ADD [ COLUMN ] [ IF NOT EXISTS ] *column_name* *data_type* [*constraint*](#constraints) +#### ADD [ COLUMN ] [ IF NOT EXISTS ] *column_name* *data_type* *constraint* -Add the specified column with the specified data type and constraint. +Add the specified column with the specified data type and [constraint](#constraints). + +##### Table rewrites + +ADD COLUMN … DEFAULT statements require a [table rewrite](#alter-table-operations-that-involve-a-table-rewrite) when the default value is a _volatile_ expression. [Volatile expressions](https://www.postgresql.org/docs/current/xfunc-volatility.html#XFUNC-VOLATILITY) can return different results for different rows, so a table rewrite is required to fill in values for existing rows. For non-volatile expressions, no table rewrite is required. + +Examples of volatile expressions: + +- ALTER TABLE … ADD COLUMN v1 INT DEFAULT random() +- ALTER TABLE .. ADD COLUMN v2 UUID DEFAULT gen_random_uuid() + +Examples of non-volatile expressions (no table rewrite): + +- ALTER TABLE … ADD COLUMN nv1 INT DEFAULT 5 +- ALTER TABLE … ADD COLUMN nv2 timestamp DEFAULT now() -- uses the same timestamp now() for all existing rows #### RENAME TO *table_name* @@ -71,7 +85,9 @@ Renaming a table is a non blocking metadata change operation. #### SET TABLESPACE *tablespace_name* Asynchronously change the tablespace of an existing table. + The tablespace change will immediately reflect in the config of the table, however the tablet move by the load balancer happens in the background. + While the load balancer is performing the move it is perfectly safe from a correctness perspective to do reads and writes, however some query optimization that happens based on the data location may be off while data is being moved. ##### Example @@ -221,24 +237,20 @@ alter table parents drop column b cascade; It quietly succeeds. Now `\d children` shows that the foreign key constraint `children_fk` has been transitively dropped. -#### ADD [*alter_table_constraint*](#constraints) +#### ADD *alter_table_constraint* -Add the specified constraint to the table. +Add the specified [constraint](#constraints) to the table. +##### Table rewrites -{{< warning >}} -Adding a `PRIMARY KEY` constraint results in a full table rewrite and full rewrite of all indexes associated with the table. -This happens because of the clustered storage by primary key that YugabyteDB uses to store rows and indexes. -Tables without a `PRIMARY KEY` have a hidden one underneath and rows are stored clustered on it. The secondary indexes of the table -link to this hidden `PRIMARY KEY`. -While the tables and indexes are being rewritten, you may lose any modifications made to the table. -For reference, the same semantics as [Alter type with table rewrite](#alter-type-with-table-rewrite) apply. -{{< /warning >}} +Adding a `PRIMARY KEY` constraint results in a full table rewrite of the main table and all associated indexes, which can be a potentially expensive operation. For more details about table rewrites, see [Alter table operations that involve a table rewrite](#alter-table-operations-that-involve-a-table-rewrite). + +The table rewrite is needed because of how YugabyteDB stores rows and indexes. In YugabyteDB, data is distributed based on the primary key; when a table does not have an explicit primary key assigned, YugabyteDB automatically creates an internal row ID to use as the table's primary key. As a result, these rows need to be rewritten to use the newly added primary key column. For more information, refer to [Primary keys](../../../../../develop/data-modeling/primary-keys-ysql). #### ALTER [ COLUMN ] *column_name* [ SET DATA ] TYPE *data_type* [ COLLATE *collation* ] [ USING *expression* ] Change the type of an existing column. The following semantics apply: -- If data on disk is required to change, a full table rewrite is needed. + - If the optional `COLLATE` clause is not specified, the default collation for the new column type will be used. - If the optional `USING` clause is not provided, the default conversion for the new column value will be the same as an assignment cast from the old type to the new type. - A `USING` clause must be included when there is no implicit assignment cast available from the old type to the new type. @@ -246,48 +258,51 @@ Change the type of an existing column. The following semantics apply: - Alter type is not supported for tables with rules (limitation inherited from PostgreSQL). - Alter type is not supported for tables with CDC streams, or xCluster replication when it requires data on disk to change. See [#16625](https://github.com/yugabyte/yugabyte-db/issues/16625). -##### Alter type without table-rewrite +##### Table rewrites -If the change doesn't require data on disk to change, concurrent DMLs to the table can be safely performed as shown in the following example: +Altering a column's type requires a [full table rewrite](#alter-table-operations-that-involve-a-table-rewrite), and any indexes that contain this column when the underlying storage format changes or if the data changes. +The following type changes commonly require a table rewrite: -```sql -CREATE TABLE test (id BIGSERIAL PRIMARY KEY, a VARCHAR(50)); -ALTER TABLE test ALTER COLUMN a TYPE VARCHAR(51); -``` +| From | To | Reason for table rewrite | +| ------------ | -------------- | --------------------------------------------------------------------- | +| INTEGER | TEXT | Different storage formats. | +| TEXT | INTEGER | Needs parsing and validation. | +| JSON | JSONB | Different internal representation. | +| UUID | TEXT | Different binary format. | +| BYTEA | TEXT | Different encoding. | +| TIMESTAMP | DATE | Loses time info; storage changes. | +| BOOLEAN | INTEGER | Different sizes and encoding. | +| REAL | NUMERIC | Different precision and format. | +| NUMERIC(p,s) | NUMERIC(p2,s2) | Requires data changes if scale is changed or if precision is smaller. | -##### Alter type with table rewrite +The following type changes do not require a rewrite when there is no associated index table on the column. When there is an associated index table on the column, a rewrite is performed on the index table alone but not on the main table. -If the change requires data on disk to change, a full table rewrite will be done and the following semantics apply: -- The action creates an entirely new table under the hood, and concurrent DMLs may not be reflected in the new table which can lead to correctness issues. -- The operation preserves split properties for hash-partitioned tables and hash-partitioned secondary indexes. For range-partitioned tables (and secondary indexes), split properties are only preserved if the altered column is not part of the table's (or secondary index's) range key. +| From | To | Notes | +| ------------ | ------------------ | ------------------------------------------------------ | +| VARCHAR(n) | VARCHAR(m) (m > n) | Length increase is compatible. | +| VARCHAR(n) | TEXT | Always compatible. | +| SERIAL | INTEGER | Underlying type is INTEGER; usually OK. | +| NUMERIC(p,s) | NUMERIC(p2,s2) | If new precision is larger and scale remains the same. | +| CHAR(n) | CHAR(m) (m > n) | PG stores it as padded TEXT, so often fine. | +| Domain types | Their base type | Compatible, unless additional constraints exist. | -Following is an example of alter type with table rewrite: +Altering a column with a (non-trivial) USING clause always requires a rewrite. -```sql -CREATE TABLE test (id BIGSERIAL PRIMARY KEY, a VARCHAR(50)); -INSERT INTO test(a) VALUES ('1234555'); -ALTER TABLE test ALTER COLUMN a TYPE VARCHAR(40); --- try to change type to BIGINT -ALTER TABLE test ALTER COLUMN a TYPE BIGINT; -ERROR: column "a" cannot be cast automatically to type bigint -HINT: You might need to specify "USING a::bigint". --- use USING clause to cast the values -ALTER TABLE test ALTER COLUMN a SET DATA TYPE BIGINT USING a::BIGINT; -``` +The table rewrite operation preserves split properties for hash-partitioned tables and hash-partitioned secondary indexes. For range-partitioned tables (and secondary indexes), split properties are only preserved if the altered column is not part of the table's (or secondary index's) range key. -Another option is to use a custom function as follows: +For example, the following ALTER TYPE statements would cause a table rewrite: -```sql -CREATE OR REPLACE FUNCTION myfunc(text) RETURNS BIGINT - AS 'select $1::BIGINT;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; +- ALTER TABLE foo + ALTER COLUMN foo_timestamp TYPE timestamp with time zone + USING + timestamp with time zone 'epoch' + foo_timestamp * interval '1 second'; +- ALTER TABLE t ALTER COLUMN t_num1 TYPE NUMERIC(9,5) -- from NUMERIC(6,1); +- ALTER TABLE test ALTER COLUMN a SET DATA TYPE BIGINT USING a::BIGINT; -- from INT -ALTER TABLE test ALTER COLUMN a SET DATA TYPE BIGINT USING myfunc(a); -``` +The following ALTER TYPE statement does not cause a table rewrite: +- ALTER TABLE test ALTER COLUMN a TYPE VARCHAR(51); -- from VARCHAR(50) #### DROP CONSTRAINT *constraint_name* [ RESTRICT | CASCADE ] @@ -296,13 +311,9 @@ Drop the named constraint from the table. - `RESTRICT` — Remove only the specified constraint. - `CASCADE` — Remove the specified constraint and any dependent objects. -{{< warning >}} -Dropping the `PRIMARY KEY` constraint results in a full table rewrite and full rewrite of all indexes associated with the table. -This happens because of the clustered storage by primary key that YugabyteDB uses to store rows and indexes. -While the tables and indexes are being rewritten, you may lose any modifications made to the table. -For reference, the same semantics as [Alter type with table rewrite](#alter-type-with-table-rewrite) apply. -{{< /warning >}} +##### Table rewrites +Dropping the `PRIMARY KEY` constraint results in a full table rewrite and full rewrite of all indexes associated with the table, which is a potentially expensive operation. For more details and common limitations of table rewrites, refer to [Alter table operations that involve a table rewrite](#alter-table-operations-that-involve-a-table-rewrite). #### RENAME [ COLUMN ] *column_name* TO *column_name* @@ -325,15 +336,21 @@ ALTER TABLE test RENAME CONSTRAINT vague_name TO unique_a_constraint; #### ENABLE / DISABLE ROW LEVEL SECURITY This enables or disables row level security for the table. + If enabled and no policies exist for the table, then a default-deny policy is applied. + If disabled, then existing policies for the table will not be applied and will be ignored. + See [CREATE POLICY](../dcl_create_policy) for details on how to create row level security policies. #### FORCE / NO FORCE ROW LEVEL SECURITY This controls the application of row security policies for the table when the user is the table owner. + If enabled, row level security policies will be applied when the user is the table owner. + If disabled (the default) then row level security will not be applied when the user is the table owner. + See [CREATE POLICY](../dcl_create_policy) for details on how to create row level security policies. ### Constraints @@ -371,6 +388,24 @@ Constraints marked as `INITIALLY IMMEDIATE` will be checked after every row with Constraints marked as `INITIALLY DEFERRED` will be checked at the end of the transaction. +## Alter table operations that involve a table rewrite + +Most ALTER TABLE statements only involve a schema modification and complete quickly. However, certain specific ALTER TABLE statements require a new copy of the underlying table (and associated index tables, in some cases) to be made and can potentially take a long time, depending on the sizes of the tables and indexes involved. This is typically referred to as a "table rewrite". This behavior is [similar to PostgreSQL](https://www.crunchydata.com/blog/when-does-alter-table-require-a-rewrite), though the exact scenarios when a rewrite is triggered may differ between PostgreSQL and YugabyteDB. + +It is not safe to execute concurrent DML on the table during a table rewrite because the results of any concurrent DML are not guaranteed to be reflected in the copy of the table being made. This restriction is similar to PostgreSQL, which explicitly prevents concurrent DML during a table rewrite by acquiring an ACCESS EXCLUSIVE table lock. + +If you need to perform one of these expensive rewrites, it is recommended to combine them into a single ALTER TABLE statement to avoid multiple expensive rewrites. For example: + +```sql +ALTER TABLE t ADD COLUMN c6 UUID DEFAULT gen_random_uuid(), ALTER COLUMN c8 TYPE TEXT +``` + +The following ALTER TABLE operations involve making a full copy of the underlying table (and possibly associated index tables): + +1. [Adding](#add-alter) or [dropping](#drop-constraint-constraint-restrict-cascade) the primary key of a table. +1. [Adding a column with a (volatile) default value](#add-column-if-not-exists-column-data-constraint). +1. [Changing the type of a column](#alter-column-column-set-data-type-data-collate-collation-using-expression). + ## See also -- [`CREATE TABLE`](../ddl_create_table) +- [CREATE TABLE](../ddl_create_table) diff --git a/docs/content/preview/architecture/design-goals.md b/docs/content/preview/architecture/design-goals.md index 644d9e07c06e..f0bc51ec2d9b 100644 --- a/docs/content/preview/architecture/design-goals.md +++ b/docs/content/preview/architecture/design-goals.md @@ -56,7 +56,7 @@ YugabyteDB supports single-row linearizable writes. Linearizability is one of th YugabyteDB supports multi-row transactions with three isolation levels: Serializable, Snapshot (also known as repeatable read), and Read Committed isolation. -- The [YSQL API](../../api/ysql/) supports Serializable, Snapshot (default), and Read Committed isolation {{}} using the PostgreSQL isolation level syntax of `SERIALIZABLE`, `REPEATABLE READ`, and `READ COMMITTED` respectively. For more details, see [Isolation levels](#transaction-isolation-levels). +- The [YSQL API](../../api/ysql/) supports Serializable, Snapshot (default), and Read Committed isolation using the PostgreSQL isolation level syntax of `SERIALIZABLE`, `REPEATABLE READ`, and `READ COMMITTED` respectively. For more details, see [Isolation levels](#transaction-isolation-levels). - The [YCQL API](../../api/ycql/) supports only Snapshot isolation (default) using the [BEGIN TRANSACTION](../../api/ycql/dml_transaction/) syntax. ## Partition Tolerance - CAP @@ -97,7 +97,7 @@ Depending on the use case, the database may need to support diverse workloads, s Transaction isolation is foundational to handling concurrent transactions in databases. YugabyteDB supports three strict transaction isolation levels in [YSQL](../../api/ysql/). -- [Read Committed](../transactions/read-committed/) {{}}, which maps to the SQL isolation level of the same name +- [Read Committed](../transactions/read-committed/), which maps to the SQL isolation level of the same name - [Serializable](../../explore/transactions/isolation-levels/#serializable-isolation), which maps to the SQL isolation level of the same name - [Snapshot](../../explore/transactions/isolation-levels/#snapshot-isolation), which maps to the SQL Repeatable Read isolation level diff --git a/docs/content/preview/architecture/docdb-replication/async-replication.md b/docs/content/preview/architecture/docdb-replication/async-replication.md index 7bd60b1dccf3..d09aa88c7408 100644 --- a/docs/content/preview/architecture/docdb-replication/async-replication.md +++ b/docs/content/preview/architecture/docdb-replication/async-replication.md @@ -38,7 +38,7 @@ However, asynchronous replication can be beneficial in certain scenarios: - **Low write latency**: With synchronous replication, each write must reach a consensus across a majority of data centers. This can add tens or even hundreds of milliseconds of extra latency for writes in a multi-region deployment. xCluster reduces this latency by eliminating the need for immediate consensus across regions. - **Only two data centers needed**: With synchronous replication, to tolerate the failure of `f` fault domains, you need at least `2f + 1` fault domains. Therefore, to survive the loss of one data center, a minimum of three data centers is required, which can increase operational costs. For more details, see [fault tolerance](../replication/#fault-tolerance). With xCluster, you can achieve multi-region deployments with only two data centers. -- **Disaster recovery**: xCluster utilizes independent YugabyteDB universes in each region that can function independently of each other. This setup allows for quick failover with minimal data loss in the event of a regional outage caused by hardware or software issues. +- **Disaster recovery**: xCluster uses independent YugabyteDB universes in each region that can function independently of each other. This setup allows for quick failover with minimal data loss in the event of a regional outage caused by hardware or software issues. Asynchronous xCluster replication has the following drawbacks: @@ -198,11 +198,11 @@ xCluster currently supports active-active single-master and active-active multi- ### Active-active single-master -In this setup, replication is unidirectional from a source universe to a target universe, typically located in different data centers or regions. The source universe can handle both reads and writes, while the target universe is read-only. Since only the source universe can accept writes, this mode is referred to as single-master. Note that within the source universe, all nodes can serve writes. +In this setup, replication is unidirectional from a source universe to a target universe, typically located in different data centers or regions. The source universe can handle both reads and writes, while the target universe is read-only. Because only the source universe can accept writes, this mode is referred to as single-master. Note that in the source universe, all nodes can serve writes. These deployments are typically used for serving low-latency reads from the target universes and for disaster recovery purposes. When the primary purpose is disaster recovery, these deployments are referred to as active-standby, as the target universe is on standby to take over if the source universe fails. -Transactional mode is generally preferred here because it ensures consistency even if the source universe is lost. However, non-transactional mode can also be used depending on the specific requirements and trade-offs. +Transactional mode is generally preferred for this deployment because it ensures consistency even if the source universe is lost. However, non-transactional mode can also be used depending on the specific requirements and trade-offs. {{}} To learn more, watch [Disaster Recovery in YugabyteDB](https://youtu.be/6rmrcVQqb0o?si=4CuiByQGLaNzhdn_) @@ -216,7 +216,7 @@ The following diagram shows an example of this deployment: In a multi-master deployment, data replication is bidirectional between two universes, allowing both universes to perform reads and writes. Writes to any universe are asynchronously replicated to the other universe with a timestamp for the update. This mode implements last-writer-wins, where if the same key is updated in both universes around the same time, the write with the larger timestamp overrides the other one. This deployment mode is called multi-master because both universes serve writes. -The multi-master deployment utilizes bidirectional replication, which involves two unidirectional replication streams operating in non-transactional mode. Special measures are taken to assign timestamps that ensure last-writer-wins semantics, and data received from the replication stream is not re-replicated. +The multi-master deployment uses bidirectional replication, which involves two unidirectional replication streams operating in non-transactional mode. Special measures are taken to assign timestamps that ensure last-writer-wins semantics, and data received from the replication stream is not re-replicated. The following diagram illustrates this deployment: @@ -234,7 +234,6 @@ The following deployment scenarios are not yet supported: - _Star_: This involves connecting all universes to each other, for example: `A <-> B <-> C <-> A` - ## Limitations The following limitations apply to all xCluster modes and deployment scenarios: @@ -285,7 +284,20 @@ Limitations specific to each scenario and mode are listed below: ### Transactional -- No writes are allowed in the target universe. +- By default, no writes are allowed in the target universe. + + {{}}You can allow writes to the target on an exception basis, overriding the default read-only behavior by setting the following YSQL configuration parameter before executing a DML operation: + + ```sql + SET yb_non_ddl_txn_for_sys_tables_allowed = true + ``` + + This is intended strictly for specialized use cases, such as enabling tools like Flyway to update maintenance tables (for example, schema version trackers) on the replica. + + {{< warning title="Important" >}} +Improper use can compromise replication consistency and lead to data divergence. Use this setting only when absolutely necessary and with a clear understanding of its implications. + {{< /warning >}} + - YCQL is not yet supported. - In Semi-automatic and Manual modes, schema changes are not automatically replicated. They must be manually applied to both source and target universes. Refer to [DDLs in semi-automatic mode](../../../deploy/multi-dc/async-replication/async-transactional-setup-semi-automatic/#making-ddl-changes) and [DDLs in manual mode](../../../deploy/multi-dc/async-replication/async-transactional-tables) for more information. diff --git a/docs/content/preview/architecture/docdb-replication/cdc-logical-replication.md b/docs/content/preview/architecture/docdb-replication/cdc-logical-replication.md index 5ec8b24bf08e..a38c00f53102 100644 --- a/docs/content/preview/architecture/docdb-replication/cdc-logical-replication.md +++ b/docs/content/preview/architecture/docdb-replication/cdc-logical-replication.md @@ -70,7 +70,7 @@ Each tablet sends changes in transaction commit time order. Further, in a transa ![VWAL-walsender](/images/architecture/vwal_walsender_interaction.png) -VWAL collects changes across multiple tablets, assembles the transactions, assigns LSN to each change and transaction boundary (BEGIN, COMMIT) record, and sends the changes to the walsender in transaction commit time order. +VWAL collects changes across multiple tablets, assembles the transactions, assigns a Log Sequence Number ([LSN](../../../develop/change-data-capture/using-logical-replication/key-concepts/#lsn-type)) to each change and transaction boundary (BEGIN, COMMIT) record, and sends the changes to the walsender in transaction commit time order. **Step 3 - walsender to client** diff --git a/docs/content/preview/architecture/transactions/isolation-levels.md b/docs/content/preview/architecture/transactions/isolation-levels.md index 057977327d99..c5b25a084490 100644 --- a/docs/content/preview/architecture/transactions/isolation-levels.md +++ b/docs/content/preview/architecture/transactions/isolation-levels.md @@ -15,13 +15,13 @@ Transaction isolation is foundational to handling concurrent transactions in dat YugabyteDB supports the following three strictest transaction isolation levels: -1. Read Committed {{}}, which maps to the SQL isolation level of the same name. This isolation level guarantees that each statement sees all data that has been committed before it is issued (this implicitly also means that the statement sees a consistent snapshot). In addition, this isolation level internally handles read restart and conflict errors. In other words, the client does not see read restart and conflict errors (barring an exception). +1. Read Committed, which maps to the SQL isolation level of the same name. This isolation level guarantees that each statement sees all data that has been committed before it is issued (this implicitly also means that the statement sees a consistent snapshot). In addition, this isolation level internally handles read restart and conflict errors. In other words, the client does not see read restart and conflict errors (barring an exception). 2. Serializable, which maps to the SQL isolation level of the same name. This isolation level guarantees that transactions run in a way equivalent to a serial (sequential) schedule. 3. Snapshot, which maps to the SQL Repeatable Read isolation level. This isolation level guarantees that all reads made in a transaction see a consistent snapshot of the database, and the transaction itself can successfully commit only if no updates it has made conflict with any concurrent updates made by transactions that committed after that snapshot. Transaction isolation level support differs between the YSQL and YCQL APIs: -- [YSQL](../../../api/ysql/) supports Serializable, Snapshot, and Read Committed {{}} isolation levels. +- [YSQL](../../../api/ysql/) supports Serializable, Snapshot, and Read Committed isolation levels. - [YCQL](../../../api/ycql/dml_transaction/) supports only Snapshot isolation using the `BEGIN TRANSACTION` syntax. Similarly to PostgreSQL, you can specify Read Uncommitted for YSQL, but it behaves the same as Read Committed. diff --git a/docs/content/preview/architecture/transactions/read-committed.md b/docs/content/preview/architecture/transactions/read-committed.md index f6eea11c8b1f..6709ad122066 100644 --- a/docs/content/preview/architecture/transactions/read-committed.md +++ b/docs/content/preview/architecture/transactions/read-committed.md @@ -3,8 +3,6 @@ title: Read Committed isolation level headerTitle: Read Committed isolation level linkTitle: Read Committed description: Details about the Read Committed isolation level -tags: - feature: early-access menu: preview: identifier: architecture-read-committed diff --git a/docs/content/preview/architecture/transactions/read-restart-error.md b/docs/content/preview/architecture/transactions/read-restart-error.md index 2bf9c52d0450..aaad015c7d97 100644 --- a/docs/content/preview/architecture/transactions/read-restart-error.md +++ b/docs/content/preview/architecture/transactions/read-restart-error.md @@ -16,6 +16,7 @@ rightNav: The distributed nature of YugabyteDB means that clock skew can be present between different physical nodes in the database cluster. Given that YugabyteDB is a multi-version concurrency control (MVCC) database, this clock skew can sometimes result in an unresolvable ambiguity of whether a version of data should, or not be part of a read in snapshot-based transaction isolations (that is, repeatable read and read committed). There are multiple solutions for this problem, [each with their own challenges](https://www.yugabyte.com/blog/evolving-clock-sync-for-distributed-databases/). PostgreSQL doesn't require defining semantics around read restart errors because it is a single-node database without clock skew. Read restart errors are raised to maintain the _read-after-commit-visibility_ guarantee: any read query should see all data that was committed before the read query was issued (even in the presence of clock skew between nodes). In other words, read restart errors prevent the following stale read anomaly: + 1. First, user X commits some data, for which the database picks a commit timestamp, say commit_time. 2. Next, user X informs user Y about the commit via a channel outside the database, say a phone call. 3. Then, user Y issues a read that picks a read time, which is less than the prior commit_time due to clock skew. @@ -32,11 +33,13 @@ The following scenario describes how clock skew can result in the above mentione * Tokens 17, 29 are inserted into an empty tokens table. Then, all the tokens from the table are retrieved. The SQL commands for the scenario are as follows: + ```sql INSERT INTO tokens VALUES (17); INSERT INTO tokens VALUES (29); SELECT * FROM tokens; ``` + * The SELECT must return both 17 and 29. * However, due to clock skew, the INSERT operation picks a commit time higher than the reference time, while the SELECT picks a lower read time and thus omits the prior INSERT from the result set. @@ -85,17 +88,20 @@ You can handle and mitigate read restart errors using the following techniques: Examples: Set transaction properties at the session level. + ```sql SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE; SELECT * FROM large_table; ``` Enclose the offending query within a transaction block. + ```sql BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE; SELECT * FROM large_table; COMMIT; ``` + - Using read only, deferrable transactions is not always feasible, either because the query is not read only, or the query is part of a read-write transaction, or because an additional 500ms of latency is not acceptable. In these cases, try increasing the value of `ysql_output_buffer_size`. This will enable YugabyteDB to retry the query internally on behalf of the user. As long as the output of a statement hasn't crossed ysql_output_buffer_size to result in flushing partial data to the external client, the YSQL query layer retries read restart errors for all statements in a Read Committed transaction block, for the first statement in a Repeatable Read transaction block, and for any standalone statement outside a transaction block. As a tradeoff, increasing the buffer size also increases the memory consumed by the YSQL backend processes, resulting in a higher risk of out-of-memory errors. diff --git a/docs/content/preview/contribute/docs/macos.md b/docs/content/preview/contribute/docs/macos.md index f97885cc1df2..f5322c712404 100644 --- a/docs/content/preview/contribute/docs/macos.md +++ b/docs/content/preview/contribute/docs/macos.md @@ -18,11 +18,11 @@ private=true Recent versions of macOS have only a `python3` executable, as does the Homebrew install. You can use [pyenv](https://github.com/pyenv/pyenv) to manage multiple versions of python on your system. Make sure to point to Python v3.10 or earlier. -* **Hugo**: Install Hugo v0.143.1 Follow these steps: +* **Hugo**: Install Hugo v0.145.0 Follow these steps: * Unpin Hugo to stop its formula from being updated - `brew unpin hugo` * Uninstall any older version if installed - `brew uninstall hugo` - * Download the v0.143.1 formula file from [Homebrew's repository](https://github.com/Homebrew/homebrew-core/blob/8dda2dcd7a7e2cec735942ef69879cfba621f7b8/Formula/h/hugo.rb). + * Download the v0.145.0 formula file from [Homebrew's repository](https://github.com/Homebrew/homebrew-core/blob/f55947be0ab55cfa5274d7232608d87b0e2ebf94/Formula/h/hugo.rb). * Install the downloaded formula - `brew install hugo.rb` * Lastly, prevent automatic updates of Hugo version - `brew pin hugo` diff --git a/docs/content/preview/contribute/docs/ubuntu.md b/docs/content/preview/contribute/docs/ubuntu.md index 75aff168bed7..2312f88863ec 100644 --- a/docs/content/preview/contribute/docs/ubuntu.md +++ b/docs/content/preview/contribute/docs/ubuntu.md @@ -8,7 +8,7 @@ private=true Recent versions of Ubuntu default to `python` and point to Python 3. If not, you can install a new version of Python, and use [pyenv](https://github.com/pyenv/pyenv) to manage multiple Python versions. Ensure you're using Python 3.10 or earlier. -* **Hugo**: Install the `hugo_extended_0.143.1` version from the [official Hugo releases](https://github.com/gohugoio/hugo/releases) for your Linux machine architecture. Make sure the `hugo` binary is available in the shell path. +* **Hugo**: Install the `hugo_extended_0.145.0` version from the [official Hugo releases](https://github.com/gohugoio/hugo/releases) for your Linux machine architecture. Make sure the `hugo` binary is available in the shell path. * **Go**: Install latest Go from the [official Go website](https://golang.org/dl/). diff --git a/docs/content/preview/deploy/kubernetes/single-zone/oss/helm-chart.md b/docs/content/preview/deploy/kubernetes/single-zone/oss/helm-chart.md index 8265f8a1a0f8..1a1006b9db3a 100644 --- a/docs/content/preview/deploy/kubernetes/single-zone/oss/helm-chart.md +++ b/docs/content/preview/deploy/kubernetes/single-zone/oss/helm-chart.md @@ -428,6 +428,12 @@ helm repo update helm upgrade yb-demo yugabytedb/yugabyte --version {{}} --wait -n yb-demo ``` +Then finalize the upgrade as follows: + +```sh +kubectl exec -it yb-master-0 -- /home/yugabyte/bin/yb-admin --master_addresses yb-master-0.yb-masters.default.svc.cluster.local:7100 finalize_upgrade +``` + ## Update the configuration of YugabyteDB pods You can update most settings in the helm chart by running a `helm upgrade` with the new values. By default, this performs a [rolling update](https://github.com/yugabyte/charts/blob/853d7ac744cf6d637b5877f4681940825beda8f6/stable/yugabyte/values.yaml#L60) of the pods. diff --git a/docs/content/preview/develop/_index.md b/docs/content/preview/develop/_index.md index 5084599f7ed6..d9fa6c765f23 100644 --- a/docs/content/preview/develop/_index.md +++ b/docs/content/preview/develop/_index.md @@ -43,8 +43,8 @@ To learn how to build applications on top of YugabyteDB, see [Learn app developm Use these best practices to build distributed applications on top of YugabyteDB; this includes a list of techniques that you can adopt to make your application perform its best. -{{}} -For more details, see [Best practices](./best-practices-ysql). +{{}} +For more details, see [Best practices](./best-practices-develop). {{}} ## Drivers and ORMs diff --git a/docs/content/preview/develop/best-practices-develop/_index.md b/docs/content/preview/develop/best-practices-develop/_index.md new file mode 100644 index 000000000000..83a00e37b543 --- /dev/null +++ b/docs/content/preview/develop/best-practices-develop/_index.md @@ -0,0 +1,51 @@ +--- +title: Best practices for applications +headerTitle: Best practices +linkTitle: Best practices +description: Tips and tricks to build applications +headcontent: Tips and tricks to build applications for high performance and availability +aliases: + - /preview/develop/best-practices-ysql/ +menu: + preview: + identifier: best-practices-develop + parent: develop + weight: 570 +type: indexpage +--- + +## YSQL + +{{}} + + {{}} + + {{}} + + {{}} + +{{}} + +## YCQL + +{{}} + + {{}} + +{{}} diff --git a/docs/content/preview/develop/best-practices-develop/administration.md b/docs/content/preview/develop/best-practices-develop/administration.md new file mode 100644 index 000000000000..c3be52f1af49 --- /dev/null +++ b/docs/content/preview/develop/best-practices-develop/administration.md @@ -0,0 +1,57 @@ +--- +title: Best practices for YSQL database administrators +headerTitle: Best practices for YSQL database administrators +linkTitle: YSQL database administrators +description: Tips and tricks to build YSQL applications +headcontent: Tips and tricks for administering YSQL databases +menu: + preview: + identifier: best-practices-ysql-administration + parent: best-practices-develop + weight: 30 +type: docs +--- + +Database administrators can fine-tune YugabyteDB deployments for better reliability, performance, and operational efficiency by following targeted best practices. This guide outlines key recommendations for configuring single-AZ environments, optimizing memory use, accelerating CI/CD tests, and safely managing concurrent DML and DDL operations. These tips are designed to help DBAs maintain stable, scalable YSQL clusters in real-world and test scenarios alike. + +## Single availability zone (AZ) deployments + +In single AZ deployments, you need to set the [yb-tserver](../../../reference/configuration/yb-tserver) flag `--durable_wal_write=true` to not lose data if the whole data center goes down (for example, power failure). + +## Allow for tablet replica overheads + +Although you can manually provision the amount of memory each TServer uses using flags ([--memory_limit_hard_bytes](../../../reference/configuration/yb-tserver/#memory-limit-hard-bytes) or [--default_memory_limit_to_ram_ratio](../../../reference/configuration/yb-tserver/#default-memory-limit-to-ram-ratio)), this can be tricky as you need to take into account how much memory the kernel needs, along with the PostgreSQL processes and any Master process that is going to be colocated with the TServer. + +Accordingly, you should use the [--use_memory_defaults_optimized_for_ysql](../../../reference/configuration/yb-tserver/#use-memory-defaults-optimized-for-ysql) flag, which gives good memory division settings for using YSQL, optimized for your node's size. + +If this flag is true, then the [memory division flag defaults](../../../reference/configuration/yb-tserver/#memory-division-flags) change to provide much more memory for PostgreSQL; furthermore, they optimize for the node size. + +Note that although the default setting is false, when creating a new universe using yugabyted or YugabyteDB Anywhere, the flag is set to true, unless you explicitly set it to false. + +## Settings for CI and CD integration tests + +You can set certain flags to increase performance using YugabyteDB in CI and CD automated test scenarios as follows: + +- Point the flags `--fs_data_dirs`, and `--fs_wal_dirs` to a RAMDisk directory to make DML, DDL, cluster creation, and cluster deletion faster, ensuring that data is not written to disk. +- Set the flag `--yb_num_shards_per_tserver=1`. Reducing the number of shards lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. +- Use colocated databases in YSQL. Colocation lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. +- Set the flag `--replication_factor=1` for test scenarios, as keeping the data three way replicated (default) is not necessary. Reducing that to 1 reduces space usage and increases performance. +- Use `TRUNCATE table1,table2,table3..tablen;` instead of CREATE TABLE, and DROP TABLE between test cases. + +## Concurrent DML during a DDL operation + +In YugabyteDB, DML is allowed to execute while a DDL statement modifies the schema that is accessed by the DML statement. For example, an `ALTER TABLE .. ADD COLUMN` DDL statement may add a new column while a `SELECT * from
` executes concurrently on the same relation. In PostgreSQL, this is typically not allowed because such DDL statements take a table-level exclusive lock that prevents concurrent DML from executing. (Support for similar behavior in YugabyteDB is being tracked in issue {{}}.) + +In YugabyteDB, when a DDL modifies the schema of tables that are accessed by concurrent DML statements, the DML statement may do one of the following: + +- Operate with the old schema prior to the DDL. +- Operate with the new schema after the DDL completes. +- Encounter temporary errors such as `schema mismatch errors` or `catalog version mismatch`. It is recommended for the client to [retry such operations](https://www.yugabyte.com/blog/retry-mechanism-spring-boot-app/) whenever possible. + +Most DDL statements complete quickly, so this is typically not a significant issue in practice. However, [certain kinds of ALTER TABLE DDL statements](../../../api/ysql/the-sql-language/statements/ddl_alter_table/#alter-table-operations-that-involve-a-table-rewrite) involve making a full copy of the table(s) whose schema is being modified. For these operations, it is not recommended to run any concurrent DML statements on the table being modified by the `ALTER TABLE`, as the effect of such concurrent DML may not be reflected in the table copy. + +## Concurrent DDL during a DDL operation + +DDL statements that affect entities in different databases can be run concurrently. However, for DDL statements that impact the same database, it is recommended to execute them sequentially. + +DDL statements that relate to shared objects, such as roles or tablespaces, are considered as affecting all databases in the cluster, so they should also be run sequentially. diff --git a/docs/content/preview/develop/best-practices-ycql.md b/docs/content/preview/develop/best-practices-develop/best-practices-ycql.md similarity index 54% rename from docs/content/preview/develop/best-practices-ycql.md rename to docs/content/preview/develop/best-practices-develop/best-practices-ycql.md index 1c8fa50559cd..00b032af74c3 100644 --- a/docs/content/preview/develop/best-practices-ycql.md +++ b/docs/content/preview/develop/best-practices-develop/best-practices-ycql.md @@ -1,34 +1,34 @@ --- title: Best practices for YCQL applications -headerTitle: Best practices -linkTitle: Best practices +headerTitle: Best practices for YCQL applications +linkTitle: YCQL applications description: Tips and tricks to build YCQL applications headcontent: Tips and tricks to build YCQL applications for high performance and availability +aliases: + - /preview/develop/best-practices-ycql/ menu: preview: identifier: best-practices-ycql - parent: develop - weight: 571 -aliases: - - /preview/develop/best-practices/ + parent: best-practices-develop + weight: 40 type: docs --- -{{}} +To build high-performance and scalable applications using YCQL, developers should follow key schema design and operational best practices tailored for YugabyteDB's distributed architecture. This guide covers strategies for using indexes efficiently, optimizing read/write paths with batching and prepared statements, managing JSON and collection data types, and ensuring memory settings align with your query layer. These practices help ensure reliable performance, especially under real-world workloads. ## Global secondary indexes -Indexes use multi-shard transactional capability of YugabyteDB and are global and strongly consistent (ACID). To add secondary indexes, you need to create tables with [transactions enabled](../../api/ycql/ddl_create_table/#table-properties-1). They can also be used as materialized views by using the [`INCLUDE` clause](../../api/ycql/ddl_create_index#included-columns). +Indexes use multi-shard transactional capability of YugabyteDB and are global and strongly consistent (ACID). To add secondary indexes, you need to create tables with [transactions enabled](../../../api/ycql/ddl_create_table/#table). They can also be used as materialized views by using the [INCLUDE clause](../../../api/ycql/ddl_create_index#included-columns). ## Unique indexes -YCQL supports [unique indexes](../../api/ycql/ddl_create_index#unique-index). A unique index disallows duplicate values from being inserted into the indexed columns. +YCQL supports [unique indexes](../../../api/ycql/ddl_create_index#unique-index). A unique index disallows duplicate values from being inserted into the indexed columns. ## Covering indexes When querying by a secondary index, the original table is consulted to get the columns that aren't specified in the index. This can result in multiple random reads across the main table. -Sometimes, a better way is to include the other columns that you're querying that are not part of the index using the [`INCLUDE` clause](../../api/ycql/ddl_create_index/#included-columns). When additional columns are included in the index, they can be used to respond to queries directly from the index without querying the table. +Sometimes, a better way is to include the other columns that you're querying that are not part of the index using the [INCLUDE clause](../../../api/ycql/ddl_create_index/#included-columns). When additional columns are included in the index, they can be used to respond to queries directly from the index without querying the table. This turns a (possible) random read from the main table to just a filter on the index. @@ -38,23 +38,23 @@ For operations like `UPDATE ... IF EXISTS` and `INSERT ... IF NOT EXISTS` that r ## JSONB -YugabyteDB supports the [`jsonb`](../../api/ycql/type_jsonb/) data type to model JSON data, which does not have a set schema and might change often. You can use JSONB to group less accessed columns of a table. YCQL also supports JSONB expression indexes that can be used to speed up data retrieval that would otherwise require scanning the JSON entries. +YugabyteDB supports the [JSONB](../../../api/ycql/type_jsonb/) data type to model JSON data, which does not have a set schema and might change often. You can use JSONB to group less accessed columns of a table. YCQL also supports JSONB expression indexes that can be used to speed up data retrieval that would otherwise require scanning the JSON entries. {{< note title="Use JSONB columns only when necessary" >}} -`jsonb` columns are slower to read and write compared to normal columns. They also take more space because they need to store keys in strings and make keeping data consistency more difficult. A good schema design is to keep most columns as regular columns or collections, and use `jsonb` only for truly dynamic values. Don't create a `data jsonb` column where you store everything; instead, use a `dynamic_data jsonb` column with the others being primitive columns. +JSONB columns are slower to read and write compared to normal columns. They also take more space because they need to store keys in strings and make keeping data consistency more difficult. A good schema design is to keep most columns as regular columns or collections, and use JSONB only for truly dynamic values. Don't create a `data jsonb` column where you store everything; instead, use a `dynamic_data jsonb` column with the others being primitive columns. {{< /note >}} ## Increment and decrement numeric types -In YugabyteDB, YCQL extends Apache Cassandra to add increment and decrement operators for integer data types. [Integers](../../api/ycql/type_int) can be set, inserted, incremented, and decremented while `COUNTER` can only be incremented or decremented. YugabyteDB implements CAS(compare-and-set) operations in one round trip, compared to four for Apache Cassandra. +In YugabyteDB, YCQL extends Apache Cassandra to add increment and decrement operators for integer data types. [Integers](../../../api/ycql/type_int) can be set, inserted, incremented, and decremented while `COUNTER` can only be incremented or decremented. YugabyteDB implements CAS(compare-and-set) operations in one round trip, compared to four for Apache Cassandra. ## Expire older records automatically with TTL -YCQL supports automatic expiration of data using the [TTL feature](../../api/ycql/ddl_create_table/#use-table-property-to-define-the-default-expiration-time-for-rows). You can set a retention policy for data at table/row/column level and the older data is automatically purged from the database. +YCQL supports automatic expiration of data using the [TTL feature](../../../api/ycql/ddl_create_table/#use-table-property-to-define-the-default-expiration-time-for-rows). You can set a retention policy for data at table/row/column level and the older data is automatically purged from the database. -If configuring TTL for a time series dataset or any dataset with a table-level TTL, it is recommended for CPU and space efficiency to expire older files directly by using TTL-specific configuration options. More details can be found in [Efficient data expiration for TTL](../learn/ttl-data-expiration-ycql/#efficient-data-expiration-for-ttl). +If configuring TTL for a time series dataset or any dataset with a table-level TTL, it is recommended for CPU and space efficiency to expire older files directly by using TTL-specific configuration options. More details can be found in [Efficient data expiration for TTL](../../learn/ttl-data-expiration-ycql/#efficient-data-expiration-for-ttl). {{}} TTL does not apply to transactional tables and so, its unsupported in that context. @@ -62,7 +62,7 @@ TTL does not apply to transactional tables and so, its unsupported in that conte ## Use YugabyteDB drivers -Use YugabyteDB-specific [client drivers](../../drivers-orms/) because they are cluster- and partition-aware, and support `jsonb` columns. +Use YugabyteDB-specific [client drivers](../../../drivers-orms/) because they are cluster- and partition-aware, and support `jsonb` columns. ## Leverage connection pooling in the YCQL client @@ -90,22 +90,22 @@ Collections are designed for storing small sets of values that are not expected ## Collections with many elements -Each element inside a collection ends up as a [separate key value](../../architecture/docdb/data-model#examples) in DocDB adding per-element overhead. +Each element inside a collection ends up as a [separate key value](../../../architecture/docdb/data-model#examples) in DocDB adding per-element overhead. -If your collections are immutable, or you update the whole collection in full, consider using the `JSONB` data type. An alternative would also be to use ProtoBuf or FlatBuffers and store the serialized data in a `BLOB` column. +If your collections are immutable, or you update the whole collection in full, consider using the JSONB data type. An alternative would also be to use ProtoBuf or FlatBuffers and store the serialized data in a BLOB column. ## Use partition_hash for large table scans -`partition_hash` function can be used for querying a subset of the data to get approximate row counts or to break down full-table operations into smaller sub-tasks that can be run in parallel. See [example usage](../../api/ycql/expr_fcall#partition-hash-function) along with a working Python script. +`partition_hash` function can be used for querying a subset of the data to get approximate row counts or to break down full-table operations into smaller sub-tasks that can be run in parallel. See [example usage](../../../api/ycql/expr_fcall#partition-hash-function) along with a working Python script. ## TRUNCATE tables instead of DELETE -[TRUNCATE](../../api/ycql/dml_truncate/) deletes the database files that store the table and is much faster than [DELETE](../../api/ycql/dml_delete/) which inserts a _delete marker_ for each row in transactions and they are removed from storage when a compaction runs. +[TRUNCATE](../../../api/ycql/dml_truncate/) deletes the database files that store the table and is much faster than [DELETE](../../../api/ycql/dml_delete/) which inserts a _delete marker_ for each row in transactions and they are removed from storage when a compaction runs. ## Memory and tablet limits -If you are not using YSQL, ensure the [use_memory_defaults_optimized_for_ysql](../../reference/configuration/yb-master/#use-memory-defaults-optimized-for-ysql) flag is set to false. This flag optimizes YugabyteDB's memory setup for YSQL, reserving a considerable amount of memory for PostgreSQL; if you are not using YSQL then that memory is wasted when it could be helping improve performance by allowing more data to be cached. +If you are not using YSQL, ensure the [use_memory_defaults_optimized_for_ysql](../../../reference/configuration/yb-master/#use-memory-defaults-optimized-for-ysql) flag is set to false. This flag optimizes YugabyteDB's memory setup for YSQL, reserving a considerable amount of memory for PostgreSQL; if you are not using YSQL then that memory is wasted when it could be helping improve performance by allowing more data to be cached. Note that although the default setting is false, when creating a new universe using yugabyted or YugabyteDB Anywhere, the flag is set to true, unless you explicitly set it to false. -See [Memory division flags](../../reference/configuration/yb-tserver/#memory-division-flags) for more information. +See [Memory division flags](../../../reference/configuration/yb-tserver/#memory-division-flags) for more information. diff --git a/docs/content/preview/develop/best-practices-develop/clients.md b/docs/content/preview/develop/best-practices-develop/clients.md new file mode 100644 index 000000000000..050b44adb420 --- /dev/null +++ b/docs/content/preview/develop/best-practices-develop/clients.md @@ -0,0 +1,50 @@ +--- +title: Best practices for YSQL clients +headerTitle: Best practices for YSQL clients +linkTitle: YSQL clients +description: Tips and tricks for administering YSQL clients +headcontent: Tips and tricks for administering YSQL clients +menu: + preview: + identifier: best-practices-ysql-clients + parent: best-practices-develop + weight: 20 +type: docs +--- + +Client-side configuration plays a critical role in the performance, scalability, and resilience of YSQL applications. This guide highlights essential best practices for managing connections, balancing load across nodes, and handling failovers efficiently using YugabyteDB's smart drivers and connection pooling. Whether you're deploying in a single region or across multiple data centers, these tips will help ensure your applications make the most of YugabyteDB's distributed architecture + +## Load balance and failover using smart drivers + +YugabyteDB [smart drivers](../../../drivers-orms/smart-drivers/) provide advanced cluster-aware load-balancing capabilities that enable your applications to send requests to multiple nodes in the cluster by connecting to one node. You can also set a fallback hierarchy by assigning priority to specific regions and ensuring that connections are made to the region with the highest priority, and then fall back to the region with the next priority in case the high-priority region fails. + +{{}} +For more information, see [Load balancing with smart drivers](https://www.yugabyte.com/blog/multi-region-database-deployment-best-practices/#load-balancing-with-smart-driver). +{{}} + +## Make sure the application uses new nodes + +When a cluster is expanded, newly added nodes do not automatically start to receive client traffic. Regardless of the language of the driver or whether you are using a smart driver, the application must either explicitly request new connections or, if it is using a pooling solution, it can configure the pooler to recycle connections periodically (for example, by setting maxLifetime and/or idleTimeout). + +## Scale your application with connection pools + +Set up different pools with different load balancing policies as needed for your application to scale by using popular pooling solutions such as HikariCP and Tomcat along with YugabyteDB [smart drivers](../../../drivers-orms/smart-drivers/). + +{{}} +For more information, see [Connection pooling](../../../drivers-orms/smart-drivers/#connection-pooling). +{{}} + +### Database migrations and connection pools + +In some cases, connection pools may trigger unexpected errors while running a sequence of database migrations or other DDL operations. + +Because YugabyteDB is distributed, it can take a while for the result of a DDL to fully propagate to all caches on all nodes in a cluster. As a result, after a DDL statement completes, the next DDL statement that runs right afterwards on a different PostgreSQL connection may, in rare cases, see errors such as `duplicate key value violates unique constraint "pg_attribute_relid_attnum_index"` (see issue {{}}). It is recommended to use a single connection while running a sequence of DDL operations, as is common with application migration scripts with tools such as Flyway or Active Record. + +## Use YSQL Connection Manager + +YugabyteDB includes a built-in connection pooler, YSQL Connection Manager {{}}, which provides the same connection pooling advantages as other external pooling solutions, but without many of their limitations. As the manager is bundled with the product, it is convenient to manage, monitor, and configure the server connections. + +For more information, refer to the following: + +- [YSQL Connection Manager](../../../explore/going-beyond-sql/connection-mgr-ysql/) +- [Built-in Connection Manager Turns Key PostgreSQL Weakness into a Strength](https://www.yugabyte.com/blog/connection-pooling-management/) diff --git a/docs/content/preview/develop/best-practices-ysql.md b/docs/content/preview/develop/best-practices-develop/data-modeling-perf.md similarity index 51% rename from docs/content/preview/develop/best-practices-ysql.md rename to docs/content/preview/develop/best-practices-develop/data-modeling-perf.md index 2850606d4cb3..d308b4eda185 100644 --- a/docs/content/preview/develop/best-practices-ysql.md +++ b/docs/content/preview/develop/best-practices-develop/data-modeling-perf.md @@ -1,25 +1,25 @@ --- -title: Best practices for YSQL applications -headerTitle: Best practices -linkTitle: Best practices -description: Tips and tricks to build YSQL applications -headcontent: Tips and tricks to build YSQL applications for high performance and availability +title: Best practices for Data Modeling and performance of YSQL applications +headerTitle: Best practices for Data Modeling and performance of YSQL applications +linkTitle: YSQL data modeling +description: Tips and tricks for building YSQL applications +headcontent: Tips and tricks for building YSQL applications menu: preview: - identifier: best-practices-ysql - parent: develop - weight: 570 + identifier: data-modeling-perf + parent: best-practices-develop + weight: 10 type: docs --- -{{}} +Designing efficient, high-performance YSQL applications requires thoughtful data modeling and an understanding of how YugabyteDB handles distributed workloads. This guide offers a collection of best practices, from leveraging colocation and indexing techniques to optimizing transactions and parallelizing queries, that can help you build scalable, globally distributed applications with low latency and high availability. Whether you're developing new applications or tuning existing ones, these tips will help you make the most of YSQL's capabilities ## Use application patterns -Running applications in multiple data centers with data split across them is not a trivial task. When designing global applications, choose a suitable design pattern for your application from a suite of battle-tested design paradigms, including [Global database](../build-global-apps/global-database), [Multi-master](../build-global-apps/active-active-multi-master), [Standby cluster](../build-global-apps/active-active-single-master), [Duplicate indexes](../build-global-apps/duplicate-indexes), [Follower reads](../build-global-apps/follower-reads), and more. You can also combine these patterns as per your needs. +Running applications in multiple data centers with data split across them is not a trivial task. When designing global applications, choose a suitable design pattern for your application from a suite of battle-tested design paradigms, including [Global database](../../build-global-apps/global-database), [Multi-master](../../build-global-apps/active-active-multi-master), [Standby cluster](../../build-global-apps/active-active-single-master), [Duplicate indexes](../../build-global-apps/duplicate-indexes), [Follower reads](../../build-global-apps/follower-reads), and more. You can also combine these patterns as per your needs. {{}} -For more details, see [Build global applications](../build-global-apps). +For more details, see [Build global applications](../../build-global-apps). {{}} ## Colocation @@ -27,14 +27,14 @@ For more details, see [Build global applications](../build-global-apps). Colocated tables optimize latency and performance for data access by reducing the need for additional trips across the network for small tables. Additionally, it reduces the overhead of creating a tablet for every relation (tables, indexes, and so on) and their storage per node. {{}} -For more details, see [Colocation](../../explore/colocation/). +For more details, see [Colocation](../../../explore/colocation/). {{}} ## Faster reads with covering indexes When a query uses an index to look up rows faster, the columns that are not present in the index are fetched from the original table. This results in additional round trips to the main table leading to increased latency. -Use [covering indexes](../../explore/ysql-language-features/indexes-constraints/covering-index-ysql/) to store all the required columns needed for your queries in the index. Indexing converts a standard Index-Scan to an [Index-Only-Scan](https://dev.to/yugabyte/boosts-secondary-index-queries-with-index-only-scan-5e7j). +Use [covering indexes](../../../explore/ysql-language-features/indexes-constraints/covering-index-ysql/) to store all the required columns needed for your queries in the index. Indexing converts a standard Index-Scan to an [Index-Only-Scan](https://dev.to/yugabyte/boosts-secondary-index-queries-with-index-only-scan-5e7j). {{}} For more details, see [Avoid trips to the table with covering indexes](https://www.yugabyte.com/blog/multi-region-database-deployment-best-practices/#avoid-trips-to-the-table-with-covering-indexes). @@ -45,28 +45,24 @@ For more details, see [Avoid trips to the table with covering indexes](https://w A partial index is an index that is built on a subset of a table and includes only rows that satisfy the condition specified in the WHERE clause. This speeds up any writes to the table and reduces the size of the index, thereby improving speed for read queries that use the index. {{}} -For more details, see [Partial indexes](../../explore/ysql-language-features/indexes-constraints/partial-index-ysql/). +For more details, see [Partial indexes](../../../explore/ysql-language-features/indexes-constraints/partial-index-ysql/). {{}} ## Distinct keys with unique indexes If you need values in some of the columns to be unique, you can specify your index as UNIQUE. -When a unique index is applied to two or more columns, the combined values in these columns can't be duplicated in multiple rows. - -{{}} -By default a NULL value is treated as a distinct value, allowing you to have multiple NULL values in a column with a unique index. This can be turned OFF by adding the [NULLS NOT DISTINCT](../../api/ysql/the-sql-language/statements/ddl_create_index#nulls-not-distinct) option when creating the unique index. -{{}} +When a unique index is applied to two or more columns, the combined values in these columns can't be duplicated in multiple rows. Note that because a NULL value is treated as a distinct value, you can have multiple NULL values in a column with a unique index. {{}} -For more details, see [Unique indexes](../../explore/ysql-language-features/indexes-constraints/unique-index-ysql/). +For more details, see [Unique indexes](../../../explore/ysql-language-features/indexes-constraints/unique-index-ysql/). {{}} ## Faster sequences with server-level caching Sequences in databases automatically generate incrementing numbers, perfect for generating unique values like order numbers, user IDs, check numbers, and so on. They prevent multiple application instances from concurrently generating duplicate values. However, generating sequences on a database that is spread across regions could have a latency impact on your applications. -Enable [server-level caching](../../api/ysql/exprs/sequence_functions/func_nextval/#caching-values-on-the-yb-tserver) to improve the speed of sequences, and also avoid discarding many sequence values when an application disconnects. +Enable [server-level caching](../../../api/ysql/exprs/func_nextval/#caching-values-on-the-yb-tserver) to improve the speed of sequences, and also avoid discarding many sequence values when an application disconnects. {{}} For a demo, see the YugabyteDB Friday Tech Talk on [Scaling sequences with server-level caching](https://www.youtube.com/watch?v=hs-CU3vjMQY&list=PL8Z3vt4qJTkLTIqB9eTLuqOdpzghX8H40&index=76). @@ -89,15 +85,15 @@ UPDATE txndemo SET v = v + 3 WHERE k=1 RETURNING v; ``` {{}} -For more details, see [Fast single-row transactions](../../develop/learn/transactions/transactions-performance-ysql/#fast-single-row-transactions). +For more details, see [Fast single-row transactions](../../../develop/learn/transactions/transactions-performance-ysql/#fast-single-row-transactions). {{}} ## Delete older data quickly with partitioning -Use [table partitioning](../../explore/ysql-language-features/advanced-features/partitions/) to split your data into multiple partitions according to date so that you can quickly delete older data by dropping the partition. +Use [table partitioning](../../../explore/ysql-language-features/advanced-features/partitions/) to split your data into multiple partitions according to date so that you can quickly delete older data by dropping the partition. {{}} -For more details, see [Partition data by time](../data-modeling/common-patterns/timeseries/partitioning-by-time/). +For more details, see [Partition data by time](../../data-modeling/common-patterns/timeseries/partitioning-by-time/). {{}} ## Use the right data types for partition keys @@ -167,51 +163,16 @@ SELECT * FROM products; ``` {{}} -For more information, see [Data manipulation](../../explore/ysql-language-features/data-manipulation). -{{}} - -## Load balance and failover using smart drivers - -YugabyteDB [smart drivers](../../drivers-orms/smart-drivers/) provide advanced cluster-aware load-balancing capabilities that enables your applications to send requests to multiple nodes in the cluster just by connecting to one node. You can also set a fallback hierarchy by assigning priority to specific regions and ensuring that connections are made to the region with the highest priority, and then fall back to the region with the next priority in case the high-priority region fails. - -{{}} -For more information, see [Load balancing with smart drivers](https://www.yugabyte.com/blog/multi-region-database-deployment-best-practices/#load-balancing-with-smart-driver). -{{}} - -## Make sure the application uses new nodes - -When a cluster is expanded, newly added nodes do not automatically start to receive client traffic. Regardless of the language of the driver or whether you are using a smart driver, the application must either explicitly request new connections or, if it is using a pooling solution, it can configure the pooler to recycle connections periodically (for example, by setting maxLifetime and/or idleTimeout). - -## Scale your application with connection pools - -Set up different pools with different load balancing policies as needed for your application to scale by using popular pooling solutions such as HikariCP and Tomcat along with YugabyteDB [smart drivers](../../drivers-orms/smart-drivers/). - -{{}} -For more information, see [Connection pooling](../../drivers-orms/smart-drivers/#connection-pooling). +For more information, see [Data manipulation](../../../explore/ysql-language-features/data-manipulation). {{}} -### Database migrations and connection pools - -In some cases, connection pools may trigger unexpected errors while running a sequence of database migrations or other DDL operations. - -Because YugabyteDB is distributed, it can take a while for the result of a DDL to fully propagate to all caches on all nodes in a cluster. As a result, after a DDL statement completes, the next DDL statement that runs right afterwards on a different PostgreSQL connection may, in rare cases, see errors such as `duplicate key value violates unique constraint "pg_attribute_relid_attnum_index"` (see issue {{}}). It is recommended to use a single connection while running a sequence of DDL operations, as is common with application migration scripts with tools such as Flyway or Active Record. - -## Use YSQL Connection Manager - -YugabyteDB includes a built-in connection pooler, YSQL Connection Manager {{}}, which provides the same connection pooling advantages as other external pooling solutions, but without many of their limitations. As the manager is bundled with the product, it is convenient to manage, monitor, and configure the server connections. - -For more information, refer to the following: - -- [YSQL Connection Manager](../../explore/going-beyond-sql/connection-mgr-ysql/) -- [Built-in Connection Manager Turns Key PostgreSQL Weakness into a Strength](https://www.yugabyte.com/blog/connection-pooling-management/) - ## Re-use query plans with prepared statements -Whenever possible, use [prepared statements](../../api/ysql/the-sql-language/statements/perf_prepare/) to ensure that YugabyteDB can re-use the same query plan and eliminate the need for a server to parse the query on each operation. +Whenever possible, use [prepared statements](../../../api/ysql/the-sql-language/statements/perf_prepare/) to ensure that YugabyteDB can re-use the same query plan and eliminate the need for a server to parse the query on each operation. {{}} -When using server-side pooling, avoid explicit PREPARE and EXECUTE calls and use protocol-level prepared statements instead. Explicit prepare/execute calls can make connections sticky, which prevents you from realizing the benefits of using YSQL Connection Manager{{}} and server-side pooling. +When using server-side pooling, avoid explicit PREPARE and EXECUTE calls and use protocol-level prepared statements instead. Explicit prepare/execute calls can make connections sticky, which prevents you from realizing the benefits of using YSQL Connection Manager{{}} and server-side pooling. Depending on your driver, you may have to set some parameters to leverage prepared statements. For example, Npgsql supports automatic preparation using the Max Auto Prepare and Auto Prepare Min Usages connection parameters, which you add to your connection string as follows: @@ -232,14 +193,14 @@ For more details, see [Prepared statements in PL/pgSQL](https://dev.to/aws-heroe Use BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE for batch or long-running jobs, which need a consistent snapshot of the database without interfering, or being interfered with by other transactions. {{}} -For more details, see [Large scans and batch jobs](../../develop/learn/transactions/transactions-performance-ysql/#large-scans-and-batch-jobs). +For more details, see [Large scans and batch jobs](../../../develop/learn/transactions/transactions-performance-ysql/#large-scans-and-batch-jobs). {{}} ## JSONB datatype -Use the [JSONB](../../api/ysql/datatypes/type_json) datatype to model JSON data; that is, data that doesn't have a set schema but has a truly dynamic schema. +Use the [JSONB](../../../api/ysql/datatypes/type_json) datatype to model JSON data; that is, data that doesn't have a set schema but has a truly dynamic schema. -JSONB in YSQL is the same as the [JSONB datatype in PostgreSQL](https://www.postgresql.org/docs/15/datatype-json.html). +JSONB in YSQL is the same as the [JSONB datatype in PostgreSQL](https://www.postgresql.org/docs/11/datatype-json.html). You can use JSONB to group less interesting or less frequently accessed columns of a table. @@ -261,13 +222,9 @@ YSQL also supports JSONB expression indexes, which can be used to speed up data For large or batch SELECT or DELETE that have to scan all tablets, you can parallelize your operation by creating queries that affect only a specific part of the tablet using the `yb_hash_code` function. {{}} -For more details, see [Distributed parallel queries](../../api/ysql/exprs/func_yb_hash_code/#distributed-parallel-queries). +For more details, see [Distributed parallel queries](../../../api/ysql/exprs/func_yb_hash_code/#distributed-parallel-queries). {{}} -## Single availability zone (AZ) deployments - -In single AZ deployments, you need to set the [yb-tserver](../../reference/configuration/yb-tserver) flag `--durable_wal_write=true` to not lose data if the whole data center goes down (For example, power failure). - ## Row size limit Big columns add up when you select full or multiple rows. For consistent latency or performance, it is recommended keeping the size under 10MB or less, and a maximum of 32MB. @@ -278,43 +235,23 @@ For consistent latency or performance, it is recommended to size columns in the ## TRUNCATE tables instead of DELETE -[TRUNCATE](../../api/ysql/the-sql-language/statements/ddl_truncate/) deletes the database files that store the table data and is much faster than [DELETE](../../api/ysql/the-sql-language/statements/dml_delete/), which inserts a _delete marker_ for each row in transactions that are later removed from storage during compaction runs. +[TRUNCATE](../../../api/ysql/the-sql-language/statements/ddl_truncate/) deletes the database files that store the table data and is much faster than [DELETE](../../../api/ysql/the-sql-language/statements/dml_delete/), which inserts a _delete marker_ for each row in transactions that are later removed from storage during compaction runs. {{}} -Currently, TRUNCATE is not transactional. Also, similar to PostgreSQL, TRUNCATE is not MVCC-safe. For more details, see [TRUNCATE](../../api/ysql/the-sql-language/statements/ddl_truncate/). +Currently, TRUNCATE is not transactional. Also, similar to PostgreSQL, TRUNCATE is not MVCC-safe. For more details, see [TRUNCATE](../../../api/ysql/the-sql-language/statements/ddl_truncate/). {{}} ## Minimize the number of tablets you need Each table and index is split into tablets and each tablet has overhead. The more tablets you need, the bigger your universe will need to be. See [allowing for tablet replica overheads](#allowing-for-tablet-replica-overheads) for how the number of tablets affects how big your universe needs to be. -Each table and index consists of several tablets based on the [--ysql_num_shards_per_tserver](../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. +Each table and index consists of several tablets based on the [--ysql_num_shards_per_tserver](../../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. You can try one of the following methods to reduce the number of tablets: -- Use [colocation](../../explore/colocation/) to group small tables into 1 tablet. -- Reduce number of tablets-per-table using the [--ysql_num_shards_per_tserver](../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. -- Use the [SPLIT INTO](../../api/ysql/the-sql-language/statements/ddl_create_table/#split-into) clause when creating a table. -- Start with few tablets and use [automatic tablet splitting](../../architecture/docdb-sharding/tablet-splitting/). +- Use [colocation](../../../explore/colocation/) to group small tables into 1 tablet. +- Reduce number of tablets-per-table using the [--ysql_num_shards_per_tserver](../../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. +- Use the [SPLIT INTO](../../../api/ysql/the-sql-language/statements/ddl_create_table/#split-into) clause when creating a table. +- Start with few tablets and use [automatic tablet splitting](../../../architecture/docdb-sharding/tablet-splitting/). Note that multiple tablets can allow work to proceed in parallel so you may not want every table to have only one tablet. - -## Allow for tablet replica overheads - -Although you can manually provision the amount of memory each TServer uses using flags ([--memory_limit_hard_bytes](../../reference/configuration/yb-tserver/#memory-limit-hard-bytes) or [--default_memory_limit_to_ram_ratio](../../reference/configuration/yb-tserver/#default-memory-limit-to-ram-ratio)), this can be tricky as you need to take into account how much memory the kernel needs, along with the PostgreSQL processes and any Master process that is going to be colocated with the TServer. - -Accordingly, you should use the [--use_memory_defaults_optimized_for_ysql](../../reference/configuration/yb-tserver/#use-memory-defaults-optimized-for-ysql) flag, which gives good memory division settings for using YSQL, optimized for your node's size. - -If this flag is true, then the [memory division flag defaults](../../reference/configuration/yb-tserver/#memory-division-flags) change to provide much more memory for PostgreSQL; furthermore, they optimize for the node size. - -Note that although the default setting is false, when creating a new universe using yugabyted or YugabyteDB Anywhere, the flag is set to true, unless you explicitly set it to false. - -## Settings for CI and CD integration tests - -You can set certain flags to increase performance using YugabyteDB in CI and CD automated test scenarios as follows: - -- Point the flags `--fs_data_dirs`, and `--fs_wal_dirs` to a RAMDisk directory to make DML, DDL, cluster creation, and cluster deletion faster, ensuring that data is not written to disk. -- Set the flag `--yb_num_shards_per_tserver=1`. Reducing the number of shards lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. -- Use colocated databases in YSQL. Colocation lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. -- Set the flag `--replication_factor=1` for test scenarios, as keeping the data three way replicated (default) is not necessary. Reducing that to 1 reduces space usage and increases performance. -- Use `TRUNCATE table1,table2,table3..tablen;` instead of CREATE TABLE, and DROP TABLE between test cases. diff --git a/docs/content/preview/develop/change-data-capture/using-logical-replication/_index.md b/docs/content/preview/develop/change-data-capture/using-logical-replication/_index.md index c0e7dea9479c..85a88583d7a1 100644 --- a/docs/content/preview/develop/change-data-capture/using-logical-replication/_index.md +++ b/docs/content/preview/develop/change-data-capture/using-logical-replication/_index.md @@ -83,7 +83,7 @@ For reference documentation, see [YugabyteDB Connector](./yugabytedb-connector/) ## Limitations -- LSN Comparisons Across Slots. +- Log Sequence Number ([LSN](../using-logical-replication/key-concepts/#lsn-type)) Comparisons Across Slots. In the case of YugabyteDB, the LSN  does not represent the byte offset of a WAL record. Hence, arithmetic on LSN and any other usages of the LSN making this assumption will not work. Also, currently, comparison of LSN values from messages coming from different replication slots is not supported. diff --git a/docs/content/preview/develop/change-data-capture/using-logical-replication/key-concepts.md b/docs/content/preview/develop/change-data-capture/using-logical-replication/key-concepts.md index 7a1a225cf13d..89c5f5ef922e 100644 --- a/docs/content/preview/develop/change-data-capture/using-logical-replication/key-concepts.md +++ b/docs/content/preview/develop/change-data-capture/using-logical-replication/key-concepts.md @@ -22,7 +22,7 @@ The YugabyteDB logical replication feature uses [PostgreSQL Logical Replication] A replication slot represents a stream of changes that can be replayed to a client in the order they were made on the origin server. Each slot streams a sequence of changes from a single database. -In logical replication, the fundamental unit of data transmission is a transaction. A logical slot emits each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so if a replication process is interrupted and restarts, even if the checkpoint or the starting LSN falls in the middle of a transaction, **the entire transaction is retransmitted**. This behavior guarantees that clients receive complete transactions without missing any intermediate changes, maintaining data integrity across the replication stream​. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or (when using the replication protocol) request that decoding start from that LSN rather than letting the server determine the start point. +In logical replication, the fundamental unit of data transmission is a transaction. A logical slot emits each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so if a replication process is interrupted and restarts, even if the checkpoint or the starting Log Sequence Number ([LSN](#lsn-type)) falls in the middle of a transaction, **the entire transaction is retransmitted**. This behavior guarantees that clients receive complete transactions without missing any intermediate changes, maintaining data integrity across the replication stream​. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or (when using the replication protocol) request that decoding start from that LSN rather than letting the server determine the start point. For more information, refer to [Replication slots](https://www.postgresql.org/docs/15/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS) in the PostgreSQL documentation. diff --git a/docs/content/preview/develop/change-data-capture/using-logical-replication/monitor.md b/docs/content/preview/develop/change-data-capture/using-logical-replication/monitor.md index b99df3588928..4702b6bc4b6e 100644 --- a/docs/content/preview/develop/change-data-capture/using-logical-replication/monitor.md +++ b/docs/content/preview/develop/change-data-capture/using-logical-replication/monitor.md @@ -66,7 +66,7 @@ Provides a list of all replication slots that currently exist on the database cl | active_pid | integer | The process ID of the session using this slot if the slot is currently actively being used. `NULL` if no replication process is ongoing. | | xmin | xid | The oldest transaction that this slot needs the database to retain. | | catalog_xmin | xid | Not applicable for YSQL. Always set to xmin. | -| restart_lsn | pg_lsn | The LSN of the oldest change record which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints. | +| restart_lsn | pg_lsn | The Log Sequence Number ([LSN](../key-concepts/#lsn-type)) of the oldest change record which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints. | | confirmed_flush_lsn | pg_lsn | The LSN up to which the logical slot's consumer has confirmed receiving data. Data older than this is not available anymore. Transactions with commit LSN lower than the `confirmed_flush_lsn` are not available anymore. | | yb_stream_id | text | UUID of the CDC stream | | yb_restart_commit_ht | int8 | A uint64 representation of the commit Hybrid Time corresponding to the `restart_lsn`. This can be used by the client (like YugabyteDB connector) to perform a consistent snapshot (as of the `consistent_point`) in the case when a replication slot already exists. | diff --git a/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md b/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md index 5a002e877290..2c8789a9a317 100644 --- a/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md +++ b/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md @@ -482,7 +482,7 @@ No default. Specifies the criteria for performing a snapshot when the connector starts: * `initial` - The connector performs a snapshot only when no offsets have been recorded for the logical server name. -* `never` - The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the YugabyteDB logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL. +* `never` - The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored Log Sequence Number ([LSN](../key-concepts/#lsn-type)) in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the YugabyteDB logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL. * `initial_only` - The connector performs an initial snapshot and then stops, without processing any subsequent changes. Default: `initial` diff --git a/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md b/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md index 54e7bf92489b..6f5d6c1c7262 100644 --- a/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md +++ b/docs/content/preview/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md @@ -39,7 +39,7 @@ The connector produces a change event for every row-level insert, update, and de YugabyteDB normally purges write-ahead log (WAL) segments after some period of time. This means that the connector does not have the complete history of all changes that have been made to the database. Therefore, when the YugabyteDB connector first connects to a particular YugabyteDB database, it starts by performing a consistent snapshot of each of the configured tables. After the connector completes the snapshot, it continues streaming changes from the exact point at which the snapshot was made. This way, the connector starts with a consistent view of all of the data, and does not omit any changes that were made while the snapshot was being taken. -The connector is tolerant of failures. As the connector reads changes and produces events, it records the Log Sequence Number (LSN) for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. +The connector is tolerant of failures. As the connector reads changes and produces events, it records the Log Sequence Number ([LSN](../key-concepts/#lsn-type)) for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. {{< tip title="Use UTF-8 encoding" >}} diff --git a/docs/content/preview/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md b/docs/content/preview/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md index fe07900fb8a2..898cd01f38a9 100644 --- a/docs/content/preview/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md +++ b/docs/content/preview/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md @@ -122,7 +122,7 @@ The YugabyteDB gRPC Connector typically spends the vast majority of its time str The connector keeps polling for changes and whenever there is a change, the connector processes them, converts them to a specific format (Protobuf or JSON in the case of the Debezium plugin) and writes them on an output stream, which can then be consumed by clients. -The connector acts as a YugabyteDB client. When the connector receives changes it transforms the events into Debezium create, update, or delete events that include the LSN of the event. The connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic. +The connector acts as a YugabyteDB client. When the connector receives changes it transforms the events into Debezium create, update, or delete events that include the Log Sequence Number ([LSN](../../using-logical-replication/key-concepts/#lsn-type)) of the event. The connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic. Periodically, Kafka Connect records the most recent offset in another Kafka topic. The offset indicates source-specific position information that Debezium includes with each event. diff --git a/docs/content/preview/develop/learn/transactions/acid-transactions-ysql.md b/docs/content/preview/develop/learn/transactions/acid-transactions-ysql.md index 7af80637c90c..e1792a29dceb 100644 --- a/docs/content/preview/develop/learn/transactions/acid-transactions-ysql.md +++ b/docs/content/preview/develop/learn/transactions/acid-transactions-ysql.md @@ -73,7 +73,7 @@ YugabyteDB supports three kinds of isolation levels to support different applica | Level | Description | | :---- | :---------- | | [Repeatable Read (Snapshot)](../../../../explore/transactions/isolation-levels/#snapshot-isolation) | Only the data that is committed before the transaction began is visible to the transaction. Effectively, the transaction sees the snapshot of the database as of the start of the transaction. {{}}Applications using this isolation level should be designed to [retry](../transactions-retries-ysql#client-side-retry) on serialization failures.{{}} | -| [Read Committed](../../../../explore/transactions/isolation-levels/#read-committed-isolation){{}} | Each statement of the transaction sees the latest data committed by any concurrent transaction just before the execution of the statement. If another transaction has modified a row related to the current transaction, the current transaction waits for the other transaction to commit or rollback its changes. {{}} The server internally waits and retries on conflicts, so applications [need not retry](../transactions-retries-ysql#automatic-retries) on serialization failures.{{}} | +| [Read Committed](../../../../explore/transactions/isolation-levels/#read-committed-isolation) | Each statement of the transaction sees the latest data committed by any concurrent transaction just before the execution of the statement. If another transaction has modified a row related to the current transaction, the current transaction waits for the other transaction to commit or rollback its changes. {{}} The server internally waits and retries on conflicts, so applications [need not retry](../transactions-retries-ysql#automatic-retries) on serialization failures.{{}} | | [Serializable](../../../../explore/transactions/isolation-levels/#serializable-isolation) | This is the strictest isolation level and has the effect of all transactions being executed in a serial manner, one after the other rather than in parallel. {{}} Applications using this isolation level should be designed to [retry](../transactions-retries-ysql/#client-side-retry) on serialization failures.{{}} | {{}} diff --git a/docs/content/preview/develop/postgresql-compatibility.md b/docs/content/preview/develop/postgresql-compatibility.md index 84a85ea7a459..2f62d8ab02d6 100644 --- a/docs/content/preview/develop/postgresql-compatibility.md +++ b/docs/content/preview/develop/postgresql-compatibility.md @@ -17,21 +17,18 @@ rightNav: YugabyteDB is a [PostgreSQL-compatible](https://www.yugabyte.com/tech/postgres-compatibility/) distributed database that supports the majority of PostgreSQL syntax. YugabyteDB is methodically expanding its features to deliver PostgreSQL-compatible performance that can substantially improve your application's efficiency. -To test and take advantage of features developed for enhanced PostgreSQL compatibility in YugabyteDB that are currently in {{}}, you can enable Enhanced PostgreSQL Compatibility Mode (EPCM). When this mode is turned on, YugabyteDB is configured to use all the latest features developed for feature and performance parity. EPCM is available in [v2024.1](/preview/releases/ybdb-releases/v2024.1/) and later. Here are the features that are part of the EPCM mode. +To test and take advantage of features developed for enhanced PostgreSQL compatibility in YugabyteDB that are currently in {{}}, you can enable Enhanced PostgreSQL Compatibility Mode (EPCM). When this mode is turned on, YugabyteDB is configured to use all the latest features developed for feature and performance parity. EPCM is available in [v2024.1](/preview/releases/ybdb-releases/v2024.1/) and later. The following features are part of EPCM. | Feature | Flag/Configuration Parameter | EA | GA | | :--- | :--- | :--- | :--- | -| [Read committed](#read-committed) | [yb_enable_read_committed_isolation](../../reference/configuration/yb-tserver/#ysql-default-transaction-isolation) | {{}} | | +| [Read committed](#read-committed) | [yb_enable_read_committed_isolation](../../reference/configuration/yb-tserver/#ysql-default-transaction-isolation) | {{}} | {{}} | | [Wait-on-conflict](#wait-on-conflict-concurrency) | [enable_wait_queues](../../reference/configuration/yb-tserver/#enable-wait-queues) | {{}} | {{}} | | [Cost based optimizer](#cost-based-optimizer) | [yb_enable_base_scans_cost_model](../../reference/configuration/yb-tserver/#yb-enable-base-scans-cost-model) | {{}} | | | [Batch nested loop join](#batched-nested-loop-join) | [yb_enable_batchednl](../../reference/configuration/yb-tserver/#yb-enable-batchednl) | {{}} | {{}} | | [Ascending indexing by default](#default-ascending-indexing) | [yb_use_hash_splitting_by_default](../../reference/configuration/yb-tserver/#yb-use-hash-splitting-by-default) | {{}} | | | [YugabyteDB bitmap scan](#yugabytedb-bitmap-scan) | [yb_enable_bitmapscan](../../reference/configuration/yb-tserver/#yb-enable-bitmapscan) | {{}} | {{}} | | [Efficient communication
between PostgreSQL and DocDB](#efficient-communication-between-postgresql-and-docdb) | [pg_client_use_shared_memory](../../reference/configuration/yb-tserver/#pg-client-use-shared-memory) | {{}} | {{}} | - -| Planned Feature | Flag/Configuration Parameter | EA | -| :--- | :--- | :--- | -| [Parallel query](#parallel-query) | | Planned | +| [Parallel query](#parallel-query) | [yb_enable_parallel_append](../../reference/configuration/yb-tserver/#yb-enable-parallel-append) | {{}} | v2025.1 | ## Feature availability @@ -110,7 +107,7 @@ Default ascending indexing provides feature compatibility and is the default in Configuration parameter: `yb_enable_bitmapscan=true` -Bitmap scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Bitmap scans can improve the performance of queries containing AND and OR conditions across several index scans. YugabyteDB bitmap scan provides feature compatibility and improved performance parity. For YugabyteDB relations to use a bitmap scan, the PostgreSQL parameter `enable_bitmapscan` must also be true (the default). +Bitmap scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Bitmap scans can improve the performance of queries containing `AND` and `OR` conditions across several index scans. YugabyteDB bitmap scan provides feature compatibility and improved performance parity. For YugabyteDB relations to use a bitmap scan, the PostgreSQL parameter `enable_bitmapscan` must also be true (the default). ### Efficient communication between PostgreSQL and DocDB @@ -118,13 +115,21 @@ Configuration parameter: `pg_client_use_shared_memory=true` Enable more efficient communication between YB-TServer and PostgreSQL using shared memory. This feature provides improved performance parity. -## Planned features +### Parallel query + +{{< note title="Note" >}} -The following features are planned for EPCM in future releases. +Parallel query is {{}} in v2024.2.3 but has not yet been added to EPCM. -### Parallel query +{{< /note >}} -Enables the use of PostgreSQL [parallel queries](https://www.postgresql.org/docs/15/parallel-query.html). Using parallel queries, the query planner can devise plans that leverage multiple CPUs to answer queries faster. Parallel query provides feature compatibility and improved performance parity. +Configuration parameters: `yb_enable_parallel_append=true` `yb_parallel_range_rows` + +Enables the use of [PostgreSQL parallel queries](https://www.postgresql.org/docs/15/parallel-query.html). Using parallel queries, the query planner can devise plans that leverage multiple CPUs to answer queries faster. Currently, YugabyteDB supports parallel query for colocated tables. Support for hash- and range-sharded tables is planned. Parallel query provides feature compatibility and improved performance parity. + +{{}} +To learn about parallel queries, see [Parallel queries](../../explore/ysql-language-features/advanced-features/parallel-query/). +{{}} ## Enable EPCM diff --git a/docs/content/preview/explore/transactions/isolation-levels.md b/docs/content/preview/explore/transactions/isolation-levels.md index bb3e036c3374..22fab24ba2fd 100644 --- a/docs/content/preview/explore/transactions/isolation-levels.md +++ b/docs/content/preview/explore/transactions/isolation-levels.md @@ -35,11 +35,11 @@ YugabyteDB supports three isolation levels in the transactional layer: - Serializable - Snapshot -- Read committed {{}} +- Read committed The default isolation level for the YSQL API is effectively Snapshot (that is, the same as PostgreSQL's `REPEATABLE READ`) because, by default, Read committed, which is the YSQL API and PostgreSQL _syntactic_ default, maps to Snapshot isolation. -To enable Read committed (currently in [Early Access](/preview/releases/versioning/#feature-maturity)), you must set the YB-TServer flag `yb_enable_read_committed_isolation` to `true`. By default this flag is `false` and the Read committed isolation level of the YugabyteDB transactional layer falls back to the stricter Snapshot isolation (in which case `READ COMMITTED` and `READ UNCOMMITTED` of YSQL also in turn use Snapshot isolation). +To enable Read committed, you must set the YB-TServer flag `yb_enable_read_committed_isolation` to `true`. By default this flag is `false` and the Read committed isolation level of the YugabyteDB transactional layer falls back to the stricter Snapshot isolation (in which case `READ COMMITTED` and `READ UNCOMMITTED` of YSQL also in turn use Snapshot isolation). {{< tip title="Tip" >}} @@ -57,8 +57,8 @@ The following table shows the mapping between the PostgreSQL isolation levels in | PostgreSQL Isolation | YugabyteDB Equivalent | Dirty Read | Non-repeatable Read | Phantom Read | Serialization Anomaly | | :------------------- | :------------------------ | :--------- | :------------------ | :----------- | :-------------------- | -| Read uncommitted | Read Committed {{}} | Allowed, but not in YSQL | Possible | Possible | Possible | -| Read committed | Read Committed {{}} | Not possible | Possible | Possible | Possible | +| Read uncommitted | Read Committed | Allowed, but not in YSQL | Possible | Possible | Possible | +| Read committed | Read Committed | Not possible | Possible | Possible | Possible | | Repeatable read | Snapshot | Not possible | Not possible | Allowed, but not in YSQL | Possible | | Serializable | Serializable | Not possible | Not possible | Not possible | Not possible | @@ -352,7 +352,7 @@ SELECT * FROM example; ## Read committed isolation -{{}}Read committed isolation is the same as Snapshot isolation, except that every statement in the transaction is aware of all data that has been committed before it has been issued (this implicitly means that the statement will see a consistent snapshot). In other words, each statement works on a new snapshot of the database that includes everything that has been committed before the statement is issued. Conflict detection is the same as in Snapshot isolation. +Read committed isolation is the same as Snapshot isolation, except that every statement in the transaction is aware of all data that has been committed before it has been issued (this implicitly means that the statement will see a consistent snapshot). In other words, each statement works on a new snapshot of the database that includes everything that has been committed before the statement is issued. Conflict detection is the same as in Snapshot isolation. Consider an example of transactions' behavior under the Read committed isolation level. diff --git a/docs/content/preview/explore/ysql-language-features/advanced-features/parallel-query.md b/docs/content/preview/explore/ysql-language-features/advanced-features/parallel-query.md new file mode 100644 index 000000000000..893e5bbc8efb --- /dev/null +++ b/docs/content/preview/explore/ysql-language-features/advanced-features/parallel-query.md @@ -0,0 +1,39 @@ +--- +title: Parallel queries +linkTitle: Parallel queries +description: Parallel queries in YSQL +tags: + feature: early-access +menu: + preview: + identifier: advanced-features-parallel-query + parent: advanced-features + weight: 600 +type: docs +--- + +YugabyteDB supports the use of [PostgreSQL parallel queries](https://www.postgresql.org/docs/15/parallel-query.html). Using parallel queries, the [query planner](../../../../architecture/query-layer/planner-optimizer/) can devise plans that leverage multiple CPUs to answer queries faster. + +Currently, YugabyteDB supports parallel queries for [colocated tables](../../../colocation/); support for hash- and range-sharded tables is planned. + +To enable and configure parallel queries, set the following configuration parameters. + +| Parameter | Description | Default | +| :--- | :--- | :--- | +| yb_enable_parallel_append | Enables the planner's use of parallel append plans. To enable parallel query, set this to true. | false | +| yb_parallel_range_rows | The number of rows to plan per parallel worker. To enable parallel query, set this to a value other than 0. (Recommended: 10000) | 0 | +| yb_parallel_range_size | Approximate size of parallel range for DocDB relation scans. | 1MB | + +In addition, you can use the following PostgreSQL configuration parameters to configure parallel queries: + +- Optimize the number of workers used by the parallel query. + - [max_parallel_workers](https://www.postgresql.org/docs/15/runtime-config-resource.html#GUC-MAX-PARALLEL-WORKERS) + - [max_parallel_workers_per_gather](https://www.postgresql.org/docs/15/runtime-config-resource.html#GUC-MAX-PARALLEL-WORKERS-PER-GATHER) + - [max_parallel_maintenance_workers](https://www.postgresql.org/docs/15/runtime-config-resource.html#GUC-MAX-PARALLEL-WORKERS-MAINTENANCE) +- Optimize cost of parallel plan to achieve the optimal plan. + - [parallel_setup_cost](https://www.postgresql.org/docs/15/runtime-config-query.html#GUC-PARALLEL-SETUP-COST) + - [parallel_tuple_cost](https://www.postgresql.org/docs/15/runtime-config-query.html#GUC-PARALLEL-TUPLE-COST) +- Enable or disable the query planner's use of hash-join plan types with parallel hash. Has no effect if hash-join plans are not also enabled. The default is on. + - [enable_parallel_hash](https://www.postgresql.org/docs/15/runtime-config-query.html#RUNTIME-CONFIG-QUERY-ENABLE) + +For more information, refer to [How Parallel Query Works](https://www.postgresql.org/docs/15/how-parallel-query-works.html) in the PostgreSQL documentation. diff --git a/docs/content/preview/explore/ysql-language-features/pg-extensions/_index.md b/docs/content/preview/explore/ysql-language-features/pg-extensions/_index.md index c7a921bbf5ae..19ea5dc4d446 100644 --- a/docs/content/preview/explore/ysql-language-features/pg-extensions/_index.md +++ b/docs/content/preview/explore/ysql-language-features/pg-extensions/_index.md @@ -36,6 +36,8 @@ YugabyteDB supports the following [PostgreSQL modules](https://www.postgresql.or | Module | Description | | :----- | :---------- | | [auto_explain](extension-auto-explain/) | Provides a means for logging execution plans of slow statements automatically. | +| cube| Implements a data type cube for representing multidimensional cubes.
For more information, see [cube](https://www.postgresql.org/docs/15/cube.html) in the PostgreSQL documentation. | +| earthdistance| Provides two different approaches to calculating great circle distances on the surface of the Earth.
For more information, see [earthdistance](https://www.postgresql.org/docs/15/earthdistance.html) in the PostgreSQL documentation. | | [file_fdw](extension-file-fdw/) | Provides the foreign-data wrapper file_fdw, which can be used to access data files in the server's file system. | | [fuzzystrmatch](extension-fuzzystrmatch/) | Provides several functions to determine similarities and distance between strings. | | hstore | Implements the hstore data type for storing sets of key-value pairs in a single PostgreSQL value.
For more information, see [hstore](https://www.postgresql.org/docs/15/hstore.html) in the PostgreSQL documentation. | @@ -55,7 +57,7 @@ YugabyteDB supports the following additional extensions, some of which you must |
Extension
|
Status
| Description | | :-------- | :----- | :---------- | -| [Anonymizer](extension-pganon/) {{}}| Pre-bundled | Mask or replace personally identifiable information (PII) or commercially sensitive data in a database. | +| [Anonymizer](extension-pganon/) {{}}| Pre-bundled | Mask or replace personally identifiable information (PII) or commercially sensitive data in a database. | | [HypoPG](extension-hypopg/) | Pre-bundled | Create hypothetical indexes to test whether an index can increase performance for problematic queries without consuming any actual resources. | | Orafce | Pre-bundled | Provides compatibility with Oracle functions and packages that are either missing or implemented differently in YugabyteDB and PostgreSQL. This compatibility layer can help you port your Oracle applications to YugabyteDB.
For more information, see the [Orafce](https://github.com/orafce/orafce) documentation. | | [PGAudit](../../../secure/audit-logging/audit-logging-ysql/) | Pre-bundled | The PostgreSQL Audit Extension (pgaudit) provides detailed session and/or object audit logging via the standard PostgreSQL logging facility. | diff --git a/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pganon.md b/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pganon.md index 4a07dc4c6e02..6f75a279fc00 100644 --- a/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pganon.md +++ b/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pganon.md @@ -4,7 +4,7 @@ headerTitle: PostgreSQL Anonymizer extension linkTitle: Anonymizer description: Using the PostgreSQL Anonymizer extension in YugabyteDB tags: - feature: tech-preview + feature: early-access menu: preview: identifier: extension-pganon @@ -21,13 +21,13 @@ YugabyteDB uses v1.3.1 of PostgreSQL Anonymizer. ## Enable Anonymizer -To enable the Anonymizer extension, add `anon` to `shared_preload_libraries` in the PostgreSQL server configuration parameters using the YB-TServer [--ysql_pg_conf_csv](../../../../reference/configuration/yb-tserver/#ysql-pg-conf-csv) flag: +While in early access, to enable the Anonymizer extension, you set the YB-TServer `--enable_pg_anonymizer` flag to true. For example, using [yugabyted](../../../../reference/configuration/yugabyted/), you would do the following: ```sh ---ysql_pg_conf_csv=shared_preload_libraries=anon +./bin/yugabyted start --tserver_flags="enable_pg_anonymizer=true" ``` -Note that modifying `shared_preload_libraries` requires restarting the YB-TServer. +Note that modifying `--enable_pg_anonymizer` requires restarting the YB-TServer. ## Customize Anonymizer diff --git a/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pgvector.md b/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pgvector.md index 91cbb9a3a8c2..80bcac87249d 100644 --- a/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pgvector.md +++ b/docs/content/preview/explore/ysql-language-features/pg-extensions/extension-pgvector.md @@ -215,6 +215,6 @@ A higher `ef_construction` value provides faster recall at the cost of index bui ## Learn more - Tutorial: [Build and Learn](/preview/tutorials/build-and-learn/) -- Tutorial: [Build scalable generative AI applications with Azure OpenAI and YugabyteDB](/preview/tutorials/azure/azure-openai/) +- Tutorials: [Build scalable generative AI applications with YugabyteDB](/preview/tutorials/ai/) - [PostgreSQL pgvector: Getting Started and Scaling](https://www.yugabyte.com/blog/postgresql-pgvector-getting-started/) - [Multimodal Search with PostgreSQL pgvector](https://www.yugabyte.com/blog/postgresql-pgvector-multimodal-search/) diff --git a/docs/content/preview/faq/yugabyte-platform.md b/docs/content/preview/faq/yugabyte-platform.md index 32d005eac4a5..b52479262f54 100644 --- a/docs/content/preview/faq/yugabyte-platform.md +++ b/docs/content/preview/faq/yugabyte-platform.md @@ -49,7 +49,7 @@ You install YugabyteDB Anywhere using a standalone installer that you download f {{< note title="Replicated end of life" >}} -YugabyteDB Anywhere was previously installed using Replicated. However, YugabyteDB Anywhere ended support for Replicated installation at the end of 2024. You can migrate existing Replicated YugabyteDB Anywhere installations using YBA Installer. See [Migrate from Replicated](../../yugabyte-platform/install-yugabyte-platform/migrate-replicated/). +YugabyteDB Anywhere was previously installed using Replicated. However, YugabyteDB Anywhere ended support for Replicated installation at the end of 2024. You can migrate existing Replicated YugabyteDB Anywhere installations using YBA Installer. See [Migrate from Replicated](/v2.20/yugabyte-platform/install-yugabyte-platform/migrate-replicated/). {{< /note >}} diff --git a/docs/content/preview/launch-and-manage/monitor-and-alert/xcluster-monitor.md b/docs/content/preview/launch-and-manage/monitor-and-alert/xcluster-monitor.md index c768fd88f8cd..501e1325f232 100644 --- a/docs/content/preview/launch-and-manage/monitor-and-alert/xcluster-monitor.md +++ b/docs/content/preview/launch-and-manage/monitor-and-alert/xcluster-monitor.md @@ -47,6 +47,7 @@ Use the [xcluster status](../../../reference/configuration/yugabyted/#status-1) ``` Example output: + ```output Outbound xCluster Replications: @@ -64,7 +65,6 @@ Outbound xCluster Replications: +------------------------------------------------------------------------------------------+ No Inbound xCluster replications found for this cluster. - ``` @@ -127,13 +127,13 @@ In transactional xCluster replication setups, the current xCluster safe time is
- ```sh ./bin/yugabyted xcluster status \ [--replication_id ] ``` Example output: + ```output No Outbound xCluster replications found for this cluster. Inbound xCluster Replications: @@ -153,6 +153,7 @@ Inbound xCluster Replications: | : Safe Time Skew(micro secs): 0.81 | +------------------------------------------------------------------------------------------+ ``` +
@@ -165,6 +166,7 @@ Inbound xCluster Replications: ``` Example output: + ```output [ { diff --git a/docs/content/preview/manage/data-migration/migrate-from-postgres.md b/docs/content/preview/manage/data-migration/migrate-from-postgres.md index eefdef4873fe..c42c3443b9d5 100644 --- a/docs/content/preview/manage/data-migration/migrate-from-postgres.md +++ b/docs/content/preview/manage/data-migration/migrate-from-postgres.md @@ -247,8 +247,8 @@ For more details, see [Live migration with fall-back](/preview/yugabyte-voyager/ When porting an existing PostgreSQL application to YugabyteDB you can follow a set of best practices to get the best out of your new deployment. -{{}} -For a full list of tips and tricks for high performance and availability, see [Best practices](../../../develop/best-practices-ysql/). +{{}} +For a full list of tips and tricks for high performance and availability, see [Best practices](../../../develop/best-practices-develop/). {{}} ### Retry transactions on conflicts diff --git a/docs/content/preview/reference/configuration/operating-systems.md b/docs/content/preview/reference/configuration/operating-systems.md index b67c257e2b59..d89fa8d32a36 100644 --- a/docs/content/preview/reference/configuration/operating-systems.md +++ b/docs/content/preview/reference/configuration/operating-systems.md @@ -21,6 +21,7 @@ Unless otherwise noted, operating systems are supported by all supported version | :--------------- | :------------- | :------------- | :---- | | AlmaLinux 8 | {{}} | {{}} | | | AlmaLinux 9 | {{}} | {{}} | Default for YugabyteDB Anywhere-deployed nodes | +| Amazon Linux 2023 | {{}} | {{}} | Supported in v2024.2.3 and later | | Oracle Linux 8 | {{}} | | | | Red Hat Enterprise Linux 8 | {{}} | | | | Red Hat Enterprise Linux 8 CIS Hardened | {{}} | | | diff --git a/docs/content/preview/reference/configuration/yb-tserver.md b/docs/content/preview/reference/configuration/yb-tserver.md index e7c37a824d67..9b70c69a78a7 100644 --- a/docs/content/preview/reference/configuration/yb-tserver.md +++ b/docs/content/preview/reference/configuration/yb-tserver.md @@ -767,13 +767,13 @@ Specifies the default transaction isolation level. Valid values: `SERIALIZABLE`, `REPEATABLE READ`, `READ COMMITTED`, and `READ UNCOMMITTED`. -Default: `READ COMMITTED` {{}} +Default: `READ COMMITTED` -Read Committed support is currently in [Early Access](/preview/releases/versioning/#feature-maturity). [Read Committed Isolation](../../../explore/transactions/isolation-levels/) is supported only if the YB-TServer flag `yb_enable_read_committed_isolation` is set to `true`. By default this flag is `false` and in this case the Read Committed isolation level of the YugabyteDB transactional layer falls back to the stricter Snapshot Isolation (in which case `READ COMMITTED` and `READ UNCOMMITTED` of YSQL also in turn use Snapshot Isolation). +[Read Committed Isolation](../../../explore/transactions/isolation-levels/) is supported only if the YB-TServer flag `yb_enable_read_committed_isolation` is set to `true`. By default this flag is `false` and in this case the Read Committed isolation level of the YugabyteDB transactional layer falls back to the stricter Snapshot Isolation (in which case `READ COMMITTED` and `READ UNCOMMITTED` of YSQL also in turn use Snapshot Isolation). ##### --yb_enable_read_committed_isolation -{{}} Enables Read Committed Isolation. By default this flag is false and in this case `READ COMMITTED` (and `READ UNCOMMITTED`) isolation level of YSQL fall back to the stricter [Snapshot Isolation](../../../explore/transactions/isolation-levels/). See [--ysql_default_transaction_isolation](#ysql-default-transaction-isolation) flag for more details. +Enables Read Committed Isolation. By default this flag is false and in this case `READ COMMITTED` (and `READ UNCOMMITTED`) isolation level of YSQL fall back to the stricter [Snapshot Isolation](../../../explore/transactions/isolation-levels/). See [--ysql_default_transaction_isolation](#ysql-default-transaction-isolation) flag for more details. Default: `false` @@ -1812,13 +1812,13 @@ Default: 1024 ##### yb_enable_batchednl -{{}} Enable or disable the query planner's use of batched nested loop join. +Enable or disable the query planner's use of batched nested loop join. Default: true ##### yb_enable_base_scans_cost_model -{{}} Enables the YugabyteDB cost model for Sequential and Index scans. When enabling this parameter, you must run ANALYZE on user tables to maintain up-to-date statistics. +{{}} Enables the YugabyteDB cost model for Sequential and Index scans. When enabling this parameter, you must run ANALYZE on user tables to maintain up-to-date statistics. When enabling the cost based optimizer, ensure that [packed row](../../../architecture/docdb/packed-rows) for colocated tables is enabled by setting `ysql_enable_packed_row_for_colocated_table = true`. diff --git a/docs/content/preview/reference/get-started-guide.md b/docs/content/preview/reference/get-started-guide.md index ce0c75b7625e..fa3dd531bd8b 100644 --- a/docs/content/preview/reference/get-started-guide.md +++ b/docs/content/preview/reference/get-started-guide.md @@ -124,7 +124,7 @@ Find resources for getting started, migrating existing databases, using your dat [Distributed PostgreSQL Essentials for Developers: Hands-on Course](https://www.youtube.com/watch?v=rqJBFQ-4Hgk) : Build a scalable and fault-tolerant movie recommendation service. -[Best practices](../../develop/best-practices-ysql/) +[Best practices](../../develop/best-practices-develop/) : Tips and tricks to build applications for high performance and availability. [Drivers and ORMs](../../drivers-orms/) diff --git a/docs/content/preview/releases/yba-releases/v2.25.md b/docs/content/preview/releases/yba-releases/v2.25.md index 127a6110cd99..a853689ab83d 100644 --- a/docs/content/preview/releases/yba-releases/v2.25.md +++ b/docs/content/preview/releases/yba-releases/v2.25.md @@ -15,6 +15,191 @@ What follows are the release notes for all releases in the YugabyteDB Anywhere ( For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). +## v2.25.2.0 - May 20, 2025 {#v2.25.2.0} + +**Build:** `2.25.2.0-b359` + +### Download + + + +### Change log + +
+ View the detailed changelog + +### Improvements + +* Ensures unique zone names in each provider to avoid confusion and enhance clarity in the UI. PLAT-16367 +* Automatically deletes associated backup policies when a universe is removed. PLAT-17197 +* Reduces failover task execution time by skipping the `UpdateConsistencyCheck` subtask. PLAT-17037 +* Displays aggregated table replication status as the namespace status based on severity. PLAT-17273 +* Enables connection pooling during universe creation with new flags. PLAT-16688 +* Allows custom configuration of GCP connection draining timeout. PLAT-17356 +* Enables LDAP URL validation to support IPv6 addresses. PLAT-17180 +* Modify PITR endpoints to return both taskUUID and pitrUUID. PLAT-16805 +* Adds a linter to the YBA CLI project for enhanced code formatting. PLAT-16887 +* Allows setting a custom timeout for `DeleteReplicationOnSource` during failover. PLAT-17038 +* Speeds up failover by skipping `createTransferXClusterCertsRemoveTasks` on the source universe. PLAT-17039 +* Enables optional `enable-pitr` flag for scheduled backups and corrects PITR command help text. PLAT-17031,PLAT-17058 + +### Bug fixes + +* Removes "Alerts are snoozed" text from the Health widget. PLAT-15744 +* Adds a bootstrap summary to the DR config creation modal to clarify which tables will be bootstrapped. PLAT-15973 +* Enables viewing specific TServer metrics on Kubernetes by adjusting metric query processing. PLAT-16268 +* Changes default label for tserver/master metrics from `HOSTNAME` to `EXPORTED_INSTANCE`. PLAT-16268 +* Refreshes KMS tokens at 70% TTL and hourly via YBA backend. PLAT-16290 +* Supports health checks for multiple installed NTP services. PLAT-16709 +* Now supports `awsHostedZoneName` in AWS provider edit payload to prevent failures. PLAT-16723 +* Switches SSL certificate verification to use fingerprint comparison, enhancing compatibility and reducing task failures. PLAT-16726 +* Ensures master statefulsets are not deployed in read replica clusters to avoid confusion and potential errors. PLAT-11348,PLAT-16727 +* Disables clock drift check for Kubernetes clusters and when disabled by config. PLAT-16819 +* Ensures the Metrics page in YBA handles proxy settings correctly. PLAT-16868 +* Enhances database health checks and process management for better stability and performance. PLAT-14999,PLAT-16895,PLAT-15742,PLAT-16197 +* Ensures all cloudInfo fields are merged in YBA UI before edit requests, preventing mischaracterized edits. PLAT-16924 +* Enables force deletion even if `DeleteBootstrapIds` subtask fails. PLAT-16982 +* Enhances RR cluster deletion by making it retryable, abortable, and classifying it as a placement modification task. PLAT-16991 +* Enhances node agent to anticipate certificate expiration and enable prompt renewal. PLAT-17056 +* Adds retry for disk mount/unmount during OS patching and ensures volume attachment before VM start. PLAT-17094 +* Re-enables node safety checks in YBM, ensuring nodes are safe to take down. PLAT-17097 +* Re-disables the cluster consistency check for YBM dual-NIC configurations. PLAT-17097 +* Enhances node agent installation for manual provisioning in YNP to be idempotent. PLAT-17141 +* Enables conditional validation for AWS keys based on IAM role settings. PLAT-17192 +* Enables clearer metrics and alerts for backup deletions. PLAT-17251 +* Ensures TLS toggle and cert rotation manage `YBC` flags on dedicated masters. PLAT-17472 +* Enhances Kubernetes support for Prometheus backups and restores, including retaining PostgreSQL dumps on restore. PLAT-8626 +* Switches the default YugabyteDB managed cloud image back to AlmaLinux 8.9. PLAT-15311 +* Ensures Kubernetes operator correctly handles storage configurations without setting default S3 attributes. PLAT-16760 +* Fixes Azure resource deletion by correctly reading the error code field. PLAT-16769 +* Ensure instance types exist before node addition in on-premises providers. PLAT-16810 +* Restores using KMS now function correctly due to improved field annotations in YBA CLI. PLAT-16811,PLAT-16783 +* Enables upgrading universes without unintended server cert rotation. PLAT-16812 +* Fixes the issue where changing timezone doesn't update on zoomed metrics graphs. PLAT-16833 +* Upgrades Prometheus in YBA installer to version 3.2.1, enhancing security. PLAT-16872 +* Upgrades Prometheus in helm charts to version 3.2.1, enhancing security. PLAT-16872 +* Upgrades PostgreSQL to version 14.17 to address critical security vulnerabilities. PLAT-16873 +* Upgrades key dependencies for enhanced security against critical vulnerabilities. PLAT-16873,PLAT-16874,PLAT-16876 +* Upgrades azcopy to version 10.28.0 to enhance security and performance. PLAT-16893 +* Upgrades address security vulnerabilities in Netty, Json-smart, and Mina-core, ensuring increased safety against potential attacks. PLAT-16894 +* Prevents YBA crash loop caused by invalid OIDC configuration settings. PLAT-16905 +* Fixes issue where prometheus-based alerts for clock drift were not triggering. PLAT-16984 +* Fixes script error to correctly handle the 10th argument during PostgreSQL restore. PLAT-16990 +* Speeds up Azure blob deletion and handles backups more efficiently. PLAT-17040 +* Fixes the directory path for installing Clockbound binaries. PLAT-17135 +* Enhanced the restore function to properly filter keyspaces during a single keyspace restore. PLAT-17146 +* Ensures K8s Helm override form correctly submits pre-check requests. PLAT-17184 +* Ensures `SetupYNP` only prepares the node agent package without creating an entry. PLAT-17194 +* Fixes issues with creating universes and editing read replicas when primary cluster payload is missing. PLAT-17224 +* Ensures Ansible provisioning validation works on Ubuntu by updating the scripting method. PLAT-17349 +* Enables retrying `CreateUniverse` for on-prem nodes by modifying preflight checks. PLAT-17368 +* Ensures YBA HA promotion success even if it fails midway after a restore. PLAT-17369 +* Ensures only `Running` tables are added to xCluster replication edits. PLAT-17387 +* Ensures node updates during tasks won't overwrite live data with stale information. PLAT-17405 +* Disables background node agent installer by default, but tracks universes needing migration. PLAT-17435,PLAT-17449 +* Resolves issue where xCluster edit command incorrectly removes tables from replication. PLAT-17521 +* Retries failed CREATE TABLESPACE queries up to 3 times to ensure success. PLAT-14388 +* Enables TLS certificate verification by default in the YBA CLI, adds `insecure` and `ca-cert` flags. PLAT-16083 +* Allows S3 bucket access through both global and private endpoints using the new `globalBucketAccess` field. PLAT-16571 +* Allows deleting Kubernetes universes even when paused. PLAT-16808 +* Enables Kubernetes-based backup and restore for Prometheus in YugabyteDB. PLAT-16824 +* Ensures `dedicatedNodes` is set to true for all Kubernetes universes. PLAT-16827 +* Enables more flexible regex matching for S3 Host Base domains. PLAT-16842 +* Blocks creation of cron-based universes in YNP to prevent health check failures. PLAT-16879 +* Simplifies the AsyncTask interface in the node agent, reducing method count. PLAT-16886 +* Ensures crontab binary exists before disabling services on Amazon Linux. PLAT-16902 +* Adds a refresh button to the slow queries UI for easier data updates. PLAT-16917 +* Fixes configuration display and saving issues for migrated universes from 2.20 to 2024.2. PLAT-16918 +* Enables scraping of node agent metrics through YBA proxy endpoint. PLAT-16939 +* Fixes UUID comparison in manual incremental backup creation. PLAT-16953 +* Appends `node_ip` to the config file to prevent race conditions. PLAT-16960 +* Fixes errors in health checks when changing node IPs manually. PLAT-16963 +* Groups all prechecks into a single subtask group for better user experience. PLAT-16965 +* Removes duplicate case in switch statement to fix compilation errors. PLAT-16974 +* Enhances PerfAdvisor by ignoring new fields and supporting custom temp directories. PLAT-14028,PLAT-17020 +* Fixes incorrect data-test-id for Full Move button and adds translation to Run Prechecks. PLAT-17034 +* Reduces UI flickering in task tabs during database upgrades. PLAT-17057 +* Fixes deadlock issue in backups by using sequential streams instead of parallel streams. PLAT-17063 +* Moves YSQL server health checks to after cluster configuration updates during universe creation. PLAT-17085 +* Ensures YSQL health checks run successfully after cluster configuration updates during universe creation. PLAT-17085 +* Ensures Prometheus data directory script runs properly using `sh` and moves directories correctly. PLAT-17091 +* Enables `xCluster` creation only with specified table UUIDs despite new flags. PLAT-17105 +* Fixes xCluster creation in YBA CLI by updating client to handle bootstrap tables UUID. PLAT-17105 +* Sends HTTP 529 response when `tasks_list` API encounters exceptions. PLAT-17111 +* Allows specifying full URNs for Azure vnet/subnet to improve resource grouping. PLAT-17115 +* Enables correct THP parameter settings in Ansible and YNP provisioning. PLAT-171678,PLAT-17171,PLAT-17167 +* Ensures core dump file generation pattern matches the one from Ansible playbooks. PLAT-17201 +* Enables server control via RPC to node agent, gated by a global runtime feature flag. PLAT-17216 +* Enhances cluster consistency checks to handle multiple IP addresses per node. PLAT-17222 +* Speeds up upgrade processes by moving pre-checks to asynchronous tasks. PLAT-17238 +* Ensures alert for orphan masters is raised correctly in specific cases. PLAT-17257 +* Adds metrics to track and alert on node agent installation failures. PLAT-17274 +* Fixes issue where adding a node incorrectly re-creates existing nodes in async clusters. PLAT-17311 +* Writes PG upgrade check logs to a temporary file for better error parsing. PLAT-17418 +* Enables attach-detach script to work with YBA on HTTPS platforms. PLAT-9692 +* Allows the TlsToggle task to retry with consistent intent settings. PLAT-11187 +* Ensures masters and TServers are verified to belong to the correct universe after startup. PLAT-11696 +* Ensures xCluster deletion can proceed by using either source or target universe UUID when available. PLAT-13785 +* Ensures timezone dropdown defaults to the set preference after clearing or refreshing. PLAT-16606,PLAT-16705 +* Fixes inconsistent `useTimeSync` setting for K8s and OnPrem universes. PLAT-16749 +* Allows empty fields in Cert Manager Issuer during K8s setup. PLAT-16759,PLAT-16758 +* Ensures `semanage fcontext` runs regardless of SELinux mode to prevent node-agent issues. PLAT-16762 +* Restores `semanage fcontext` execution regardless of SELinux mode. PLAT-16762 +* Enables RunApiTriggeredHooks to correctly mark updateSucceeded as true. PLAT-16839 +* Extracts `node_exporter` based on architecture and enhances Python support. PLAT-16871 +* Ensures tag changes are saved and visible in audit logs. PLAT-16875 +* Fixes node state accuracy during resize task retries. PLAT-16916 +* Blocks cron-based universe creation when Ansible provisioning is disabled. PLAT-16925 +* Ensures subtask details update correctly when main tasks complete. PLAT-16961 +* Fixes the display of TServer label for disk volume stats in K8s environments. PLAT-16964 +* Ensures the task banner updates with new tasks on launch by maintaining universe state. PLAT-16970 +* Ensures correct scheduling of incremental backups by updating full backup times first. PLAT-16972 +* Removes YEDIS option from CREATE and EDIT modes in the UI, ensuring a cleaner interface. PLAT-17015,PLAT-16983 +* Updates the xCluster version threshold to `2024.1.3.0-b104` on the YBA UI to ensure accuracy in displaying semi-automatic mode availability. PLAT-17045 +* Enhances backup and restore by reconfiguring YBC on all queryable nodes, not just `Running` or `ToBeRemoved`. PLAT-17252 +* Adds YugabyteDB package support to the YNP module. PLAT-17260 +* Allows configuring the SSHD daemon via YNP for custom SSH ports. PLAT-17283 +* Corrects counting of failed tables for DR error banners. PLAT-17348 +* Allows configuring the timeout for PostgreSQL upgrade checks, defaulting to 600 seconds. PLAT-17473 +* Adds Kubernetes overrides to API examples for creating universes. PLAT-8019 +* Adds commands to edit and delete read replica clusters in YBA CLI. PLAT-12842 +* Enables using `yba universe describe` outputs as templates for `yba universe create`. PLAT-16360 +* Adds CLI commands to list, describe, download, and delete support bundles. PLAT-16362 +* Enables the creation of support bundles via the YBA CLI. PLAT-16363 +* Corrects API notations for Point-in-Time Recovery operations. PLAT-16364 +* Enhances YBA CLI with comprehensive alert management commands. PLAT-16365 +* Adds CLI commands for managing alert channels and destinations. PLAT-16366 +* Prompts users for confirmation if an existing config file will be overwritten. PLAT-16617 +* Ignores consistency checks on retries when finding a TServer fails. PLAT-16667 +* Adds commands to manage alert maintenance windows. PLAT-16696 +* Prevents failures in OperatorUtils by not running ConfigBuilder during initialization. PLAT-16882 +* Adds endpoint to list backup directories for selected storage config. PLAT-16900 +* Adds DELETE node command to YBA CLI for managing universe nodes. PLAT-16903 +* Adds prechecks-only functionality for Kubernetes upgrades and edits. PLAT-17019 +* Adds pull secrets and node selector rules to customer creation jobs. PLAT-17026 +* Adds CLI support for creating and managing user groups. PLAT-17032 +* Enhances the `describe` command output spacing for better readability. PLAT-17096 +* Adds instance type commands to all cloud service providers in CLI. PLAT-17099 +* Switches AWSUtil from parallel streams to regular streams to avoid thread exhaustion. PLAT-17102 +* Adds commands to refresh KMS configurations from YBA CLI. PLAT-17131 +* Adds a YBA CLI command for configuring YCQL in existing universes. PLAT-17137 +* Enhances data persistence by copying PG restore dump files to `/opt/yugabyte/yugaware/data` in Kubernetes environments. PLAT-17138 +* Fixes cert-manager certificate names and SAN entries for MCS. PLAT-17142,GH-163 +* Reverts erroneous method changes to fix Azure Private DNS in universe creation/deletion. PLAT-17152 +* Adds support for new statuses in `GetReplicationStatus` RPC, enhancing xCluster replication monitoring. PLAT-17230 +* Ensures correct permissions on /run/user with a new precheck. PLAT-17246 +* Resolves the date conversion bug in the get JWT endpoint. PLAT-17261 +* Ensures PYTHON_EXECUTABLE is set for ntpd service checks in clock-skew configuration. PLAT-17524 + +
+ ## v2.25.1.0 - March 21, 2025 {#v2.25.1.0} **Build:** `2.25.1.0-b381` diff --git a/docs/content/preview/releases/yba-releases/v2024.2.md b/docs/content/preview/releases/yba-releases/v2024.2.md index 5d5aa18f9e04..c73e2798adf3 100644 --- a/docs/content/preview/releases/yba-releases/v2024.2.md +++ b/docs/content/preview/releases/yba-releases/v2024.2.md @@ -15,6 +15,138 @@ What follows are the release notes for all releases in the **YugabyteDB Anywhere For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). +## v2024.2.3.0 - May 16, 2025 {#v2024.2.3.0} + +**Build:** `2024.2.3.0-b116` + +**Third-party licenses:** [YugabyteDB](https://downloads.yugabyte.com/releases/2024.2.3.0/yugabytedb-2024.2.3.0-b116-third-party-licenses.html), [YugabyteDB Anywhere](https://downloads.yugabyte.com/releases/2024.2.3.0/yugabytedb-anywhere-2024.2.3.0-b116-third-party-licenses.html) + +### Download + + + +### Highlights + +This release brings significant enhancements focused on performance, flexibility, and operational improvements: + +**Private CA support for Kubernetes Deployments** {{}} + +For Kubernetes-based universes using encryption-in-transit, this release adds support for `aws-privateca-issuer`, simplifying [certificate management with AWS Private CA](/stable/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes/#including-the-common-name). + +**IAM role support for GCP Backups** {{}} + +Managing [backups to Google Cloud Storage (GCS)](/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage/) for VM-based universes is now easier. YugabyteDB Anywhere can now leverage its own IAM role for authentication to GCS, streamlining the configuration process. + +**Deprecation of cron on DB nodes** {{}} + +The use of [cron to start YB services](/stable/yugabyte-platform/upgrade/prepare-to-upgrade/#cron-based-universes) on DB nodes has been deprecated. Instead, use systemd. YBA now flags any existing DB nodes using cron with a warning containing remediation instructions. + +### Change log + +
+ View the detailed changelog + +### Improvements + +* Automatically deletes associated backup policies when a universe is removed. PLAT-17197 +* Enables connection pooling during universe creation using new flags. PLAT-16688 +* Allows custom configuration of GCP connection draining timeout. PLAT-17356 +* Enables LDAP URL validation to support IPv6 addresses. PLAT-17180 +* Adds a linter to the YBA CLI project, use `make fmt` for formatting. PLAT-16887 + +### Bug fixes + +* Enhances KMS token renewal at 70% TTL and adds hourly refresh. PLAT-16290 +* Supports multiple NTP services for health checks. PLAT-16709 +* Allows uploading `.cer` certificates to YBA without checksum validation errors. PLAT-16726 +* Disables clock drift check for Kubernetes clusters and when configured. PLAT-16819 +* Enhances database health checks and process management for better stability and performance. PLAT-16197,PLAT-14999,PLAT-15742,PLAT-16895 +* Enables retry and abort options for RR cluster deletion tasks. PLAT-16991 +* Automatically renews node agent certificates ahead of expiry. PLAT-17056 +* Ensures node agent installation is repeatable and error-resistant during manual provisioning. PLAT-17141 +* Enables clearer metrics and alerts for backup deletions. PLAT-17251 +* Upgrades BouncyCastle library to mitigate vulnerabilities. PLAT-13849 +* Fixes Azure resource deletion by correctly reading the error code field. PLAT-16769 +* Ensure instance types exist before node addition in onprem providers. PLAT-16810 +* Restores using KMS now function correctly due to improved field annotations in YBA CLI. PLAT-16811,PLAT-16783 +* Ensures correct certificate issuer settings during universe upgrades using certManager. PLAT-16812 +* Fixes the issue where changing timezone doesn't update on zoomed metrics graphs. PLAT-16833 +* Upgrades Prometheus in YBA installer to version 3.2.1, enhancing security. PLAT-16872 +* Upgrades Prometheus in helm charts to version 3.2.1, enhancing security. PLAT-16872 +* Upgrades PostgreSQL to version 14.17 to address critical security vulnerabilities. PLAT-16873 +* Upgrades key dependencies for enhanced security against critical vulnerabilities. PLAT-16876,PLAT-16874,PLAT-16873 +* Upgrades azcopy to version 10.28.0 to enhance security and performance. PLAT-16893 +* Upgrades address security vulnerabilities in Netty, Json-smart, and Mina-core, ensuring increased safety against potential attacks. PLAT-16894 +* Prevents YBA crash loop caused by invalid OIDC configuration settings. PLAT-16905 +* Fixes clock drift metric reporting to ensure alert triggers. PLAT-16984 +* Ensures unique names for databases during specific restores. PLAT-17146 +* Fixes issues with creating universes and editing read replicas due to missing primary cluster payloads. PLAT-17224 +* Fixes universe creation failure in on-prem tests by ensuring node agent cleanup. PLAT-17292 +* Ensures Ansible provisioning validation works on Ubuntu by updating the scripting method. PLAT-17349 +* Enables retrying `CreateUniverse` for on-prem nodes by modifying preflight checks. PLAT-17368 +* Ensures YBA HA promotion success even if it fails midway after a restore. PLAT-17369 +* Skips THP setting on CentOS 7 and AL2 to enhance stability. PLAT-17545 +* Fixes disk IOPS and throughput reset issue during universe edits. PLAT-16661,PLAT-16724 +* Fixes Helm error to successfully apply multiple `nodeSelector` values in overrides. PLAT-16727 +* Adds flexible regex matching for S3 Host Base domains. PLAT-16842 +* Fixes node agent precheck failures and streamlines AsyncTask interface for RPC tasks. PLAT-15566,PLAT-16886 +* Ensures the `crontab` binary exists before disabling services on Amazon Linux 2023 AMI. PLAT-16902 +* Adds a refresh button to the slow queries UI for easier data updates. PLAT-16917 +* Fixes configuration display and saving issues for migrated universes from 2.20 to 2024.2. PLAT-16918 +* Enhances PerfAdvisor by ignoring new fields and supporting custom temp directories. PLAT-17020,PLAT-14028 +* Fixes deadlock issue in backups by using sequential streams instead of parallel streams. PLAT-17063 +* Enables xCluster creation to require `bootstrapParams.tables` despite `allowBootstrap` addition. PLAT-17105 +* Allows specifying full URN for Azure vnet and subnet for resource flexibility. PLAT-17115 +* Enables correct setting of THP parameters during provisioning with Ansible and YNP. PLAT-171678,PLAT-17171,PLAT-17167 +* Fixes issue where adding a new node to an async cluster incorrectly re-creates existing nodes. PLAT-17337 +* Fixes node addition in async clusters without recreating existing nodes. PLAT-17311,PLAT-17337 +* Ensures playbooks for AmazonLinux:2023 work smoothly in YBA 2024.2. PLAT-17364 +* Enhances XCluster deletion by using source universe UUID when the target is absent. PLAT-13785 +* Allows empty fields in Cert Manager Issuer during K8s setup. PLAT-16759,PLAT-16758 +* Ensures backup schedules correctly update times for full and incremental backups. PLAT-16972 +* Updates the client-side xCluster version check to `2024.1.3.0-b104` to align with backend validations. PLAT-17045 +* Ensures node states update correctly during resize tasks with retries. PLAT-17189,PLAT-16916 +* Enables creation and editing of xCluster configurations via YBA CLI. PLAT-16358 +* Corrects API annotations for Point-in-Time Recovery operations. PLAT-16364 +* Resolves the date conversion bug in the get JWT endpoint. PLAT-17261 + +
+ +## v2024.2.2.4 - May 19, 2025 {#v2024.2.2.4} + +**Build:** `2024.2.2.4-b2` + +**Third-party licenses:** [YugabyteDB](https://downloads.yugabyte.com/releases/2024.2.2.4/yugabytedb-2024.2.2.4-b2-third-party-licenses.html), [YugabyteDB Anywhere](https://downloads.yugabyte.com/releases/2024.2.2.4/yugabytedb-anywhere-2024.2.2.4-b2-third-party-licenses.html) + +### Download + + + +### Change log + +
+ View the detailed changelog + +### Bug fixes + +* Allows specifying full URNs for Azure vnet/subnet to improve resource grouping. PLAT-17115 + +
+ ## v2024.2.2.3 - May 6, 2025 {#v2024.2.2.3} **Build:** `2024.2.2.3-b1` @@ -105,7 +237,7 @@ If you previously had OIDC configured for YugabyteDB Anywhere, check the **Scope ### New features * *More convenience when changing time zones* - when viewing metrics or charts, switching time zones is now more conveniently located on the same web page, rather than several clicks away. -* *Linux updates on YBA via boot-disk-replacement* - As an alternative to in-place patching, Linux updates (for security, or for upgrades) to YBA can now be performed by replacing the YBA VM boot disk, reinstalling YBA, and re-attaching the data disk. +* *Linux updates on YBA via boot-disk-replacement* - As an alternative to in-place patching, Linux updates (for security, or for upgrades) to YBA can now be performed by replacing the YBA VM boot disk, reinstalling YBA, and re-attaching the data disk. * *YBA CLI* - a new YBA command-line which enables script-based automation. Now available in Early Access. {{}} * *Connection Pooling (for Kubernetes)* - K8s support for optional server-side Connection Pooling enables more connections per cluster, more efficient memory usage, and faster first connection (and therefore first query) latencies. {{}} diff --git a/docs/content/preview/releases/ybdb-releases/v2.25.md b/docs/content/preview/releases/ybdb-releases/v2.25.md index b13c75ed3b67..fb157cfc77f5 100644 --- a/docs/content/preview/releases/ybdb-releases/v2.25.md +++ b/docs/content/preview/releases/ybdb-releases/v2.25.md @@ -15,6 +15,296 @@ What follows are the release notes for the YugabyteDB v2.25 release series. Cont For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). +## v2.25.2.0 - May 20, 2025 {#v2.25.2.0} + +**Build:** `2.25.2.0-b359` + +### Downloads + + + +**Docker:** + +```sh +docker pull yugabytedb/yugabyte:2.25.2.0-b359 +``` + +### Change log + +
+ View the detailed changelog + +### New features + +* yugabyted UI now displays xCluster replication details for the source and destination universe. {{}} + +### Improvements + +#### YSQL + +* Enhances nested loop joins by rechecking pushability of conditions and renames relevant function to reduce confusion. {{}} +* Restores CREATE permission on the public schema to `yb_db_admin`. {{}} +* Exempts walsender from YSQL backend check to prevent index creation delays. {{}} +* Enables viewing TCMalloc heap snapshots for PG backend processes via new YSQL functions. {{}} +* Enhances `yb_servers` function to include `universe_uuid` for better cluster identification. {{}} +* Fixes comment linting issues to handle non-word characters. {{}} +* Enhances ASH data retrieval in query diagnostics using the SPI framework. {{}} +* Allows customization of `ybhnsw` index creation options in YSQL. {{}} +* Integrates new data types and functions from pgvector 0.8.0 into YSQL. {{}} +* Enables on-demand logging and enhanced catalog cache statistics tracking. {{}} +* Enables conditional checks for role existence in `ysql_dump` outputs with the `dump_role_checks` flag. {{}} +* Removes the check that the first operation in a plain session must set the read time. {{}} +* Enhances code consistency in `ybgate_api.h` by matching PostgreSQL style. {{}} +* Consolidates multiple suppression flags into one for cleaner `pg_regress` outputs. {{}} +* Refactors `PgDocReadOp` to enhance modularity by isolating sampling logic into `PgDocSampleOp`. {{}} +* Enables `ALTER TYPE ... SET SCHEMA` support for orafce extension upgrades. {{}} +* Enhances `pg_stat_get_progress_info` by adding new fields. {{}} +* Eliminates unnecessary workaround in `ALTER TABLE` operations related to constraint handling. {{}} +* Reinstates checks for `ash_metadata` in PgClient RPC requests with added code explanations. {{}} +* Re-adds `bitmap_scans_distinct` test to ensure consistent behavior. {{}} +* Adds support for datetime and UUID type pushdown in mixed mode. {{}},{{}} +* Organizes YSQL code by splitting function definitions into a new file. {{}} +* Enables expression pushdown for MOD, LIKE, ASCII, SUBSTRING in mixed mode upgrades. {{}} +* Disables AutoAnalyze during the entire PG15 upgrade to ensure stability. {{}} +* Enforces naming conventions for distinguishing YugabyteDB-specific files. {{}} +* Aligns `CurrentMemoryContext` handling more closely with PostgreSQL updates. {{}} +* Enhances compatibility with PostgreSQL numeric tests by refining data ordering and simplifying table structures. {{}} +* Maintains workaround in `pg_dump` to support upgrades with `pg_stat_statements`. {{}} +* Ensures consistent transaction path settings for single-shard operations. {{}} +* Merges PostgreSQL 15.12 improvements into YugabyteDB, enhancing database compatibility and stability. {{}} +* Allows users to adjust `ybhnsw.ef_search` for HNSW index searches in YSQL. {{}} +* Automatically maps `hnsw` to `ybhnsw` in `CREATE INDEX` statements for seamless index creation. {{}} +* Recommends changing isolation level to read committed to avoid errors during concurrent inserts. {{}} +* Excludes PostgreSQL owned code from `bad_variable_declaration_spacing` lint rule. {{}} +* Adds `server_type` option to differentiate foreign servers in `postgres_fdw`. {{}} +* Renames `switch_fallthrough` to `yb_switch_fallthrough` for consistency. {{}} +* Enables the PostgreSQL anonymizer extension via the `enable_pg_anonymizer` flag. {{}} +* Enhances error reporting by including index names for missing rows. {{}} +* Displays rows removed by YugabyteDB index recheck in execution plans. {{}} +* Aligns `get_relation_constraint_attnos` function to use correct flag. {{}} +* Disallow setting `ysql_select_parallelism` to zero to prevent errors. {{}} +* Removes `pg_strtouint64` and adopts `strtoull` or `strtou64` for consistency. {{}} +* Aligns YSQL more closely with upstream PostgreSQL, reducing discrepancies and streamlining changes. {{}} +* Logs now detail the cause and context of read restart errors for better troubleshooting. {{}} +* Limits output buffer to 8kB to ensure compatibility with certain clients. {{}} +* Enhances TServer by adding support for garbage collection of invalidation messages, reducing memory usage. {{}} +* Increases the timeout for detecting `pg_yb_catalog_version` mode from 10 seconds to 20 seconds. {{}} +* Enhances `pg_stats` with length and bounds histograms for better query planning. {{}} +* Fixes build failures and enhances memory usage reporting with TCMalloc stats. {{}} + +#### YCQL + +* Allows setting NULL in YCQL JSONB column values using UPDATE statements. {{}} + +#### DocDB + +* Enables placing intermediate CA certificates directly in the server cert file for node-to-node encryption. {{}} +* Tracks ByteBuffer memory usage with `MemTracker`. {{}} +* Allows dynamic adjustment of `rocksdb_compact_flush_rate_limit_bytes_per_sec` across all tablets. {{}} +* Selects geographically closest TServer for faster clone operations. {{}} +* Switches most builds to clang 19, enhancing code safety and addressing new warnings. {{}} +* Introduces block-based data organization in `YbHnsw` for efficient memory management during data loading and unloading. {{}} +* Enables manual compaction of vector index chunks in Vector LSM. {{}} +* Ensures vector index backfill reads from the indexed table at the correct time. {{}} +* Upgrades protobuf to version 21.12 for better C++23 compatibility. {{}} +* Updates codebase to C++23, enhancing compatibility and performance. {{}} +* Enables sequence replication in xCluster by default, removing the need for a flag. {{}} +* Adds logging for vector index search stats when `vector_index_dump_stats` flag is true. {{}} +* Ensures consistent bootstrapping of vector indexes after a TServer restart. {{}} +* Enhances handling of expired snapshots by retrying deletion tasks automatically. {{}} +* Ensures vector index contains all entries from the indexed table. {{}} +* Adds detailed cluster balancer warnings to the master UI page. {{}} +* Adds tombstones to obsolete vector IDs, reducing queries to the main table. {{}} +* Displays cluster balancer tasks on the master UI page. {{}} +* Adds annotations to prevent compiler reordering in shared memory interactions. {{}} +* Uses non-concurrent mode by default for creating vector indexes to streamline processes. {{}} +* Adds safeguard to pause replication after repeated DDL failures. {{}} +* Enhances xCluster DDL replication by adjusting `yb_read_time` usage and silencing related warnings. {{}} +* Renames `docdb::VectorIndex` to `docdb::DocVectorIndex` to eliminate name confusion. {{}} +* Allows specific compaction and flush for vector indexes via `yb-admin` commands. {{}} +* Adds `yb-ts-cli compact_vector_index` command for tablet-specific vector index compaction. {{}} +* Adds `automatic_mode` flag to `create_checkpoint` for simpler xCluster setup. {{}} +* Enables dropping vector indexes in DocDB. {{}} +* Displays replication mode in the master UI `Outbound Replication Groups` section. {{}} +* Enhances vector index compaction with a new deletion API and clearer naming conventions. {{}} +* Automatic mode now always requires bootstrapping to ensure OID consistency. {{}} +* Reuse threads to enhance connection efficiency in shared memory communication. {{}} +* Enhances vector index query stats logging and adds new metrics tracking. {{}} +* Enhances monitoring by using thread pool names for thread categorization. {{}} +* Simplifies navigation and modification of master async RPC tasks code. {{}} +* Introduces idle timeouts in `rpc::ThreadPool` to automatically adjust thread counts based on activity, enhancing resource efficiency. {{}} +* Switches to `MPSCQueue` for enhanced single-consumer performance and simpler maintenance. {{}} +* Adds support for the DocumentDB extension v0.102-0 to enhance database functionality. {{}} +* Simplifies xCluster DDL replication tests by removing bidirectional roles. {{}} +* Allows setting `ybhnsw.ef_search` to customize search expansion factor. {{}} +* Adds paginated querying for vector index operations. {{}} +* Cancels vector index compaction during VectorLSM shutdown. {{}} +* Enables cloning of vector indexes in databases. {{}} +* Enables consistent backup and restore for vector indexes. {{}} +* Speeds up ExternalMiniCluster tests by directly triggering master elections. {{}} +* Deprecates the `load_balancer_count_move_as_add` flag to simplify cluster balancing. {{}} +* Removes `master_replication` from `master_fwd.h` to optimize file parsing times. {{}} + +#### CDC + +* Enhances CDC streaming by advancing restart time in idle periods, supported by the new flag `cdcsdk_update_restart_time_interval_secs`. {{}} +* Reduces logging frequency for certain CDC errors to avoid clutter. {{}} +* Sets `wal_status` in `pg_replication_slots` based on CDC consumption timing. {{}} +* Corrects flag value conversion to ensure accurate update intervals for CDC restart times. {{}} +* Blocks table drops if they are part of a publication to prevent replication issues. {{}} +* Reduces the default `yb_walsender_poll_sleep_duration_empty_ms` flag value to 10 ms to speed up replication in sparse workloads. {{}} +* Increases log visibility for netty errors by changing levels from `DEBUG` to `WARN`. {{}} + +#### yugabyted + +* Removes `psutil` dependency in `yugabyted` for better compatibility. {{}} + +### Bug fixes + +#### YSQL + +* Reduces XID usage by generating one per `REFRESH MATERIALIZED VIEW CONCURRENTLY` operation. {{}} +* Renames on unique constraints now update associated DocDB table names. {{}} +* Reduces read restart errors during concurrent disjoint key writes. {{}} +* Avoids unnecessary catalog version bumps during in-place materialized view refreshes. {{}} +* Disables index-only scans on copartitioned indexes. {{}} +* Introduces custom SQL error codes for better error handling across processes. {{}} +* Fixes crashes when using `yb_get_range_split_clause` with partitioned tables. {{}} +* Fixes incorrect error message related to "INSERT ON CONFLICT" under concurrent transactions. {{}} +* Corrects batched read behavior for mixed immediate and deferred FK constraints. {{}} +* Reduces latency after DDL changes by using catalog version for cache invalidation. {{}} +* Refines cost model tuning using server-side execution times for more accurate query optimization. {{}} +* Removes redundant `yb_cdc_snapshot_read_time` field, simplifying snapshot management. {{}} +* Enables geolocation costing in the new cost model using the `yb_enable_geolocation_costing` flag. {{}} +* Fixes flaky behavior in Connection Manager when handling prepared statements. {{}} +* Disables fast-path transactions for bulk loads on colocated tables by default. {{}} +* Refactors the FK cache handling in YSQL for cleaner code structure. {{}} +* Optimizes cost modeling for primary index scans to assume sequential disk block fetching. {{}} +* Ensures accurate detection of duplicate entries during fast-path transactions. {{}} +* Enables setting follower reads YSQL parameters at connection time. {{}} +* Resolves multiple issues in tuple-to-string utility functions. {{}} +* Ensures stable operation of refresh materialized view during major upgrades. {{}} +* Uses auto-generated OID symbols for `pg_proc` entries. {{}} +* Displays the `initdb` log file path on stdout for easier debugging. {{}} +* Ensures consistent data during fast-path `COPY` operations on tables with unique indexes. {{}} +* Organizes tests into separate files for better clarity and maintenance. {{}} +* Enhances query planning for inherited and partitioned tables with more efficient path usage. {{}} +* Ensures PostgreSQL compilation only executes necessary tasks by correctly handling `MAKELEVEL`. {{}} +* Prevents database crashes by blocking index creation on dimensionless vector columns. {{}} +* Fixes upgrade issues for partitioned tables by reverting `relam` settings. {{}} +* Prevents crash by excluding NULL values from vector indices. {{}} +* Enhances index scans and partition pruning for BOOLEAN conditions. {{}} +* Ensures correct behavior of YbBitmapIndexScan upon rescan by updating pushdown expressions. {{}} +* Eliminates erroneous colocation data in `indexdef` for copartitioned indexes. {{}} +* Adds unit test to handle `SELECT` errors in incremental cache refresh. {{}} +* Fixes regression bug in handling incremental cache refresh across concurrent DDL sessions. {{}} +* Restores and repositions a critical statement to its intended location in the planner. {{}} +* Enables selective whole row retrieval for DELETE on partitioned tables with varying configurations. {{}} +* Corrects estimations for inner table scans in Batched Nested Loop Joins. {{}} +* Fixes integer overflow in BNL cost estimation, preventing negative values. {{}} +* Prevents incorrect sharing of query limits in subplan executions. {{}},{{}} +* Adds a YSQL configuration parameter to customize negative catalog caching. {{}},{{}} +* Ensures the `vmodule` flag is respected by the postgres process. {{}} +* Adds `liblz4.1.dylib` to macOS `yugabyte-client` package for successful deployment. {{}} +* Enables `ANALYZE` to collect accurate stats for parent-level of partitioned tables. {{}} +* Prevents crashes by handling non-variable expressions in single-row updates or deletes. {{}} +* Adds a safeguard to prevent crashes during NULL vector index scans. {{}} +* Enhances stability by initializing vector index scan costs to prevent undefined behavior. {{}} +* Prevents relcache reference leaks in `yb_get_ybctid_width`. {{}} +* Fixes port conflict issue when setting `pgsql_proxy_bind_address` in dual NIC setups. {{}} +* Addresses "Duplicate table" error by ensuring unique OID allocation during restores. {{}} +* Ensures YSQL dumps set `colocated = false` for non-colocated databases during backups. {{}} +* Reduces default RPC message size limit for better data handling. {{}} +* Enhances `yb_index_check` to verify materialized view indexes' consistency. {{}} +* Ensures `ysql_dump` maintains enum sort order during backup and restore. {{}} +* Ensures accurate data return from index scans by correctly fetching needed values for rechecks. {{}} +* Ensures `path->rows` reflects accurate row estimates in scans, avoiding incorrect overwrites. {{}} +* Prevents "Duplicate table" errors by not computing `relfilenode_htab` during initdb. {{}} +* Switches from `now` to `clock_timestamp` for recording invalidation message time. {{}} +* Ignores the `SPLIT` option when creating a partitioned table. {{}} +* Renames YSQL metric prefixes for clarity and maintains compatibility with old names. {{}} +* Updates description for `ysql_yb_enable_ash` flag. {{}} +* Allows restoration of old backups with enum types without errors by reverting to warnings and auto-generated OIDs. {{}} +* Logs odd `pg_enum` OID during restore if `sortorder` is missing, enhancing troubleshooting. {{}} +* Restores the call to `ScheduleCheckObjectIdAllocators` inadvertently removed. {{}} +* Fixes a use-after-free bug in ysql_dump by copying tablegroup_name. {{}} +* Allows `yb_binary_restore` to be set by `ybdbadmin` for vector extension creation. {{}} + +#### DocDB + +* Resolves issue where tables could get indefinitely stuck in HIDING state. {{}} +* Prevents creation of tablespaces with duplicate placement blocks. {{}} +* Prevents crashes by ensuring non-null frontiers during transaction apply after a TServer restart. {{}} +* Fixes load balancing for rewritten tables in colocated databases. {{}} +* Prevents deadlocks in PG sessions when using shared memory, enhancing stability. {{}} +* Fixes crashes in ProcessSupervisor when unable to restart a process. {{}} +* Ensures yb-admin commands respect user-specified timeouts for table flushes and compactions. {{}} +* Enhances transactional xCluster accuracy by using majority replicated OpId for ApplySafeTime calculations. {{}} +* Ensures accurate `WaitForReplicationDrain` behavior by not mislabeling tablets as drained. {{}} +* Enables cloning databases to any time in the snapshot schedule retention period. {{}} +* Fixes handling of `db_max_flushing_bytes` to properly limit memory usage under high write loads. {{}} +* Prevents unbounded growth of the `recently_applied_map` by not adding read-only transactions, conserving memory. {{}} +* Enables cloning databases with sequences to earlier states without errors. {{}} +* Prevents false conflict detection in snapshot isolation operations. {{}} +* Fixes lock order for vector index creation to prevent deadlocks. {{}} +* Allows xCluster to handle `UNKNOWN` state TableInfos gracefully. {{}} +* Disables `-Wmisleading-indentation` warnings in GCC to prevent increased compile times. {{}} +* Adjusts shared memory address range to 0x350000000000-0x3f0000000000 to avoid conflicts. {{}} +* Ensures continuous leader lease revocation by supporting multiple old leader leases. {{}} +* Prevents potential deadlocks by ensuring table locks follow ID order during namespace copies. {{}} +* Separates thread pools for vector index backfill and inserts to avoid deadlocks. {{}} +* Escapes non-printable characters in UI and file outputs. {{}} +* Fixes logging of partition keys for new child tablets post-split. {{}} +* Fixes flush failure reporting during shutdown to prevent data loss. {{}} +* Ensures accurate data tracking during master leader transitions by handling chunked tablet reports efficiently. {{}} +* Ensures system tables are correctly removed during deletions or upgrades. {{}} +* Reduces thread contention by using a lock-free queue for thread management. {{}} +* Enhances local RPC call handling to execute in the same thread when possible. {{}} +* Stops tracking transactions when `use_bootstrap_intent_ht_filter` is set to false, preventing memory leaks. {{}} +* Ensures `Slice::ToDebugString` respects the `max_len` setting for hex outputs. {{}} +* Prevents `yb-admin` crashes by correctly handling argument count for `create_database_snapshot`. {{}} +* Improves error handling for shared memory operations in DocDB. {{}} +* Removes 60-second timeout upper bound on admin RPCs and adds new `yb_client_admin_rpc_timeout_sec` flag. {{}} +* Prevents deadlocks during background compaction and transaction loading. {{}} +* Fixes the issue of recording "query id 0" in Active Session History samples. {{}} +* Blocks nonconcurrent index creation on xCluster replicated tables. {{}} +* Prevents master process crashes by fixing an iteration modification issue in TriggerDdlVerificationIfNeeded. {{}} +* Reverts non-printable character handling to fix tests and API scraping issues. {{}} +* Prevents crashes during vector index flush on shutdown. {{}} + +#### CDC + +* Ensures only relevant `COMMIT` records are streamed, fixing gRPC connector crashes. {{}} +* Prevents CDC crashes by logging a warning for dropped indexes on colocated tables. {{}} +* Prevents data loss by not streaming records during transaction load. {{}} +* Ensures reliable CDC stream functionality during index creation, preventing schema packing errors. {{}} + +#### yugabyted + +* Checks for chrony service before enabling clockbound during node start. {{}} +* Preserves the universe key locally after enabling EAR for recovery scenarios. {{}} + +
+ ## v2.25.1.0 - March 21, 2025 {#v2.25.1.0} **Build:** `2.25.1.0-b381` diff --git a/docs/content/preview/releases/ybdb-releases/v2024.1.md b/docs/content/preview/releases/ybdb-releases/v2024.1.md index 8ca09a28bea9..84fda1dbbb1d 100644 --- a/docs/content/preview/releases/ybdb-releases/v2024.1.md +++ b/docs/content/preview/releases/ybdb-releases/v2024.1.md @@ -18,16 +18,16 @@ What follows are the release notes for the YugabyteDB 2024.1 release series. Con For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). {{}} -YugabyteDB 2024.1.0.0 and newer releases do not support v7 Linux versions (CentOS7, Red Hat Enterprise Linux 7, Oracle Enterprise Linux 7.x), Amazon Linux 2, and Ubuntu 18. If you're currently using one of these Linux versions, upgrade to a supported OS version before installing YugabyteDB v2024.1.0. Refer to [Operating system support](/stable/reference/configuration/operating-systems/) for the complete list of supported operating systems. +YugabyteDB 2024.1.0.0 and newer releases do not support v7 Linux versions (CentOS7, Red Hat Enterprise Linux 7, Oracle Enterprise Linux 7.x), Amazon Linux 2, and Ubuntu 18. If you're currently using one of these Linux versions, upgrade to a supported OS version before installing YugabyteDB v2024.1.0. Refer to [Operating system support](/v2024.1/reference/configuration/operating-systems/) for the complete list of supported operating systems. {{}} {{< tip title="New memory division settings available" >}} -YugabyteDB uses [memory division flags](/stable/reference/configuration/yb-master/#memory-division-flags) to specify how memory should be divided between different processes (for example, [YB-TServer](/stable/architecture/yb-tserver/) versus [YB-Master](/stable/architecture/yb-master/)) on a YugabyteDB node as well as within processes. Using these flags, you can better allocate memory for PostgreSQL, making it more suitable for a wider range of use cases. +YugabyteDB uses [memory division flags](/v2024.1/reference/configuration/yb-master/#memory-division-flags) to specify how memory should be divided between different processes (for example, [YB-TServer](/v2024.1/architecture/yb-tserver/) versus [YB-Master](/v2024.1/architecture/yb-master/)) on a YugabyteDB node as well as within processes. Using these flags, you can better allocate memory for PostgreSQL, making it more suitable for a wider range of use cases. -For _new_ v2024.1.x universes, if you are expecting to use any nontrivial amount of [YSQL](/stable/api/ysql/), it is strongly recommended to set [‑‑use_memory_defaults_optimized_for_ysql](/stable/reference/configuration/yb-tserver/#use-memory-defaults-optimized-for-ysql). This changes the memory division defaults to better values for YSQL usage, and optimizes memory for the node size. +For _new_ v2024.1.x universes, if you are expecting to use any nontrivial amount of [YSQL](/v2024.1/api/ysql/), it is strongly recommended to set [‑‑use_memory_defaults_optimized_for_ysql](/v2024.1/reference/configuration/yb-tserver/#use-memory-defaults-optimized-for-ysql). This changes the memory division defaults to better values for YSQL usage, and optimizes memory for the node size. -If you are _upgrading_ a universe, you may want to instead review your memory division settings and adjust them if desired; see [best practices](/stable/develop/best-practices-ysql/#minimize-the-number-of-tablets-you-need). +If you are _upgrading_ a universe, you may want to instead review your memory division settings and adjust them if desired; see [best practices](/v2024.1/develop/best-practices-ysql/#minimize-the-number-of-tablets-you-need). In future releases, the memory division settings will be used to determine how many tablet replicas a YB-TServer can safely support; this information will power new alerts warning you about overloading nodes with too many tablet replicas and allow blocking operations that would create too many tablet replicas. @@ -561,17 +561,17 @@ docker pull yugabytedb/yugabyte:2024.1.2.0-b77 ### New features -* [Semi-automatic transactional xCluster setup](/stable/deploy/multi-dc/async-replication/async-replication-transactional/). Provides operationally simpler setup and management of YSQL transactional xCluster replication, as well as simpler steps for performing DDL changes. {{}} +* [Semi-automatic transactional xCluster setup](/v2024.1/deploy/multi-dc/async-replication/async-replication-transactional/). Provides operationally simpler setup and management of YSQL transactional xCluster replication, as well as simpler steps for performing DDL changes. {{}} -* [Kubernetes readiness probe](/stable/deploy/kubernetes/single-zone/oss/helm-chart/#readiness-probes). Added readiness probes for TServer and Master pods in YugabyteDB, supporting custom or default configurations, thereby enhancing stability by ensuring YSQL/YCQL and YB-Master pods are ready before traffic is routed. {{}} +* [Kubernetes readiness probe](/v2024.1/deploy/kubernetes/single-zone/oss/helm-chart/#readiness-probes). Added readiness probes for TServer and Master pods in YugabyteDB, supporting custom or default configurations, thereby enhancing stability by ensuring YSQL/YCQL and YB-Master pods are ready before traffic is routed. {{}} * yugabyted * [Voyager assessment visualisation in yugabyted UI](/preview/yugabyte-voyager/migrate/assess-migration/). Yugabyted UI provides a dashboard to allow the users to effectively plan the migrations based on the complexity and also be able to monitor the progress of each migration - * [Backup/restore support with TLS enabled](/stable/reference/configuration/yugabyted/#backup). In secure mode, yugabyted cluster supports taking full backup/restores. {{}} + * [Backup/restore support with TLS enabled](/v2024.1/reference/configuration/yugabyted/#backup). In secure mode, yugabyted cluster supports taking full backup/restores. {{}} - * [xCluster support](/stable/reference/configuration/yugabyted/#set-up-xcluster-replication-between-clusters). yugabyted enables native support for setting up xCluster between two yugabyted deployed clusters. {{}} + * [xCluster support](/v2024.1/reference/configuration/yugabyted/#set-up-xcluster-replication-between-clusters). yugabyted enables native support for setting up xCluster between two yugabyted deployed clusters. {{}} ### Change log @@ -676,14 +676,14 @@ docker pull yugabytedb/yugabyte:2024.1.1.0-b137 **PostgreSQL Logical Replication Protocol Support** {{}} -We're excited to announce in the 2024.1.1.0 release support for the PostgreSQL Logical Replication Protocol for Change Data Capture (CDC), in addition to the existing native [gRPC Replication protocol](/stable/develop/change-data-capture/using-yugabytedb-grpc-replication/). -This feature allows you to manage CDC streams using [Publications](https://www.postgresql.org/docs/11/sql-createpublication.html) and [Replication Slots](https://www.postgresql.org/docs/11/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS), similar to native PostgreSQL. Additionally, a [new connector](/stable/develop/change-data-capture/using-logical-replication/get-started/#get-started-with-yugabytedb-connector) is introduced that utilizes the logical replication protocol to consume the CDC streams via [Replication slots](https://www.postgresql.org/docs/current/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS). +We're excited to announce in the 2024.1.1.0 release support for the PostgreSQL Logical Replication Protocol for Change Data Capture (CDC), in addition to the existing native [gRPC Replication protocol](/v2024.1/develop/change-data-capture/using-yugabytedb-grpc-replication/). +This feature allows you to manage CDC streams using [Publications](https://www.postgresql.org/docs/11/sql-createpublication.html) and [Replication Slots](https://www.postgresql.org/docs/11/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS), similar to native PostgreSQL. Additionally, a [new connector](/v2024.1/develop/change-data-capture/using-logical-replication/get-started/#get-started-with-yugabytedb-connector) is introduced that utilizes the logical replication protocol to consume the CDC streams via [Replication slots](https://www.postgresql.org/docs/current/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS). -For more information, refer to [logical replication protocol](/stable/develop/change-data-capture/using-logical-replication/). +For more information, refer to [logical replication protocol](/v2024.1/develop/change-data-capture/using-logical-replication/). ### New features -* [Automated SQL/CQL Shell binary](/stable/api/ysqlsh/#installation). Along with full binary, added separate downloadable SQL/CQL Shell binary. +* [Automated SQL/CQL Shell binary](/v2024.1/api/ysqlsh/#installation). Along with full binary, added separate downloadable SQL/CQL Shell binary. * [Voyager assessment visualisation in yugabyted UI](/preview/yugabyte-voyager/migrate/assess-migration/). Yugabyted UI provides a dashboard to allow the users to effectively plan the migrations based on the complexity and also be able to monitor the progress of each migration {{}} @@ -842,52 +842,52 @@ We're pleased to announce the early access of the new [Enhanced Postgres Compati **Rollback after upgrade** -Rolling back to the pre-upgrade version if you're not satisfied with the upgraded version is now {{}}. Refer to the [Rollback phase](/stable/manage/upgrade-deployment/#b-rollback-phase) for more information. +Rolling back to the pre-upgrade version if you're not satisfied with the upgraded version is now {{}}. Refer to the [Rollback phase](/v2024.1/manage/upgrade-deployment/#b-rollback-phase) for more information. ### New features -* [yugabyted](/stable/reference/configuration/yugabyted/) - * Set preferred regions. The preferred region handles all read and write requests from clients. Use the [yugabyted configure data_placement](/stable/reference/configuration/yugabyted/#data-placement) command to specify preferred regions for clusters. +* [yugabyted](/v2024.1/reference/configuration/yugabyted/) + * Set preferred regions. The preferred region handles all read and write requests from clients. Use the [yugabyted configure data_placement](/v2024.1/reference/configuration/yugabyted/#data-placement) command to specify preferred regions for clusters. * Connection management integration. With connection management enabled, the **Nodes** page of yugabyted UI displays the split of physical and logical connections. - * [Docker-based deployments](/stable/reference/configuration/yugabyted/#create-a-multi-region-cluster-in-docker). Improves the yugabyted Docker user experience for RF-3 deployments and docker container/host restarts. - * Simplified [PITR configuration](/stable/reference/configuration/yugabyted/#restore). - * Perform all admin operations using a [pass through mechanism](/stable/reference/configuration/yugabyted/#admin-operation) to execute yb-admin commands. + * [Docker-based deployments](/v2024.1/reference/configuration/yugabyted/#create-a-multi-region-cluster-in-docker). Improves the yugabyted Docker user experience for RF-3 deployments and docker container/host restarts. + * Simplified [PITR configuration](/v2024.1/reference/configuration/yugabyted/#restore). + * Perform all admin operations using a [pass through mechanism](/v2024.1/reference/configuration/yugabyted/#admin-operation) to execute yb-admin commands. * DocDB Availability * Speed up local bootstrap. Faster rolling upgrades and restarts by minimizing table bootstrap time. * Hardening Raft. Reduced time window for re-tryable requests by honoring write RPC timeouts. -* [Batched nested loop joins](/stable/architecture/query-layer/join-strategies/#batched-nested-loop-join-bnl). A join execution strategy that is an improvement on Nested Loop joins that sends one request to the inner table per batch of outer table tuples instead of once per individual outer table tuple. +* [Batched nested loop joins](/v2024.1/architecture/query-layer/join-strategies/#batched-nested-loop-join-bnl). A join execution strategy that is an improvement on Nested Loop joins that sends one request to the inner table per batch of outer table tuples instead of once per individual outer table tuple. -* [Tablet splitting on range-sharded tables](/stable/architecture/docdb-sharding/tablet-splitting/#range-sharded-tables). Optimized the tablet split thresholds to speed up data ingestion. +* [Tablet splitting on range-sharded tables](/v2024.1/architecture/docdb-sharding/tablet-splitting/#range-sharded-tables). Optimized the tablet split thresholds to speed up data ingestion. -* [Catalog Caching](/stable/reference/configuration/yb-tserver/#catalog-flags). Reduce master requests during PostgreSQL system catalog refresh by populating YB-TServer catalog cache. {{}} +* [Catalog Caching](/v2024.1/reference/configuration/yb-tserver/#catalog-flags). Reduce master requests during PostgreSQL system catalog refresh by populating YB-TServer catalog cache. {{}} -* [Catalog Caching](/stable/reference/configuration/yb-tserver/#ysql-yb-toast-catcache-threshold). Use TOAST compression to reduce PG catalog cache memory. Compressed catalog tuples when storing in the PG catalog cache to reduce the memory consumption. {{}} +* [Catalog Caching](/v2024.1/reference/configuration/yb-tserver/#ysql-yb-toast-catcache-threshold). Use TOAST compression to reduce PG catalog cache memory. Compressed catalog tuples when storing in the PG catalog cache to reduce the memory consumption. {{}} -* [Index backfill](/stable/explore/ysql-language-features/indexes-constraints/index-backfill/) stability improvements. Ensure timely notification to all nodes and PostgreSQL backends before initiating index backfill to prevent missing entries during index creation. +* [Index backfill](/v2024.1/explore/ysql-language-features/indexes-constraints/index-backfill/) stability improvements. Ensure timely notification to all nodes and PostgreSQL backends before initiating index backfill to prevent missing entries during index creation. * Support for CDC with atomic DDL. In case of DDL being rolled back, CDC will not send records with rolled back schema. -* [Wait-On Conflict Concurrency Control](/stable/architecture/transactions/concurrency-control/#wait-on-conflict). Cross-tablet fairness in resuming "waiters". Resume waiters in a consistent order across tablets, when a set of transactions simultaneously wait on more than one intent/lock on various tablets. +* [Wait-On Conflict Concurrency Control](/v2024.1/architecture/transactions/concurrency-control/#wait-on-conflict). Cross-tablet fairness in resuming "waiters". Resume waiters in a consistent order across tablets, when a set of transactions simultaneously wait on more than one intent/lock on various tablets. * YSQL - * [Cost-based optimizer](/stable/reference/configuration/yb-tserver/#yb-enable-base-scans-cost-model). Added support for cost based optimizer for YSQL. {{}} - * [DDL concurrency](/stable/reference/configuration/yb-tserver/#ddl-concurrency-flags). Support for isolating DDLs per database. Specifically, a DDL in one database does not cause catalog cache refreshes or aborts transactions due to breaking change in another database. - * [DDL atomicity](/stable/reference/configuration/yb-tserver/#ddl-atomicity-flags). Ensures that YSQL DDLs are fully atomic between YSQL and DocDB layers, that is in case of any errors, they are fully rolled back, and in case of success they are applied fully. Currently, such inconsistencies are rare but can happen. - * [ALTER TABLE support](/stable/api/ysql/the-sql-language/statements/ddl_alter_table/#add-column-if-not-exists-column-name-data-type-constraint-constraints). Adds support for the following variants of ALTER TABLE ADD COLUMN: + * [Cost-based optimizer](/v2024.1/reference/configuration/yb-tserver/#yb-enable-base-scans-cost-model). Added support for cost based optimizer for YSQL. {{}} + * [DDL concurrency](/v2024.1/reference/configuration/yb-tserver/#ddl-concurrency-flags). Support for isolating DDLs per database. Specifically, a DDL in one database does not cause catalog cache refreshes or aborts transactions due to breaking change in another database. + * [DDL atomicity](/v2024.1/reference/configuration/yb-tserver/#ddl-atomicity-flags). Ensures that YSQL DDLs are fully atomic between YSQL and DocDB layers, that is in case of any errors, they are fully rolled back, and in case of success they are applied fully. Currently, such inconsistencies are rare but can happen. + * [ALTER TABLE support](/v2024.1/api/ysql/the-sql-language/statements/ddl_alter_table/#add-column-if-not-exists-column-name-data-type-constraint-constraints). Adds support for the following variants of ALTER TABLE ADD COLUMN: * with a SERIAL data type * with a volatile DEFAULT * with a PRIMARY KEY - * [Lower latency for large scans with size-based fetching](/stable/reference/configuration/yb-tserver/#ysql-yb-fetch-size-limit). A static size based fetch limit value to control how many rows can be returned in one request from DocDB. {{}} + * [Lower latency for large scans with size-based fetching](/v2024.1/reference/configuration/yb-tserver/#ysql-yb-fetch-size-limit). A static size based fetch limit value to control how many rows can be returned in one request from DocDB. {{}} -* [Tablet limits](/stable/architecture/docdb-sharding/tablet-splitting/#tablet-limits). Depending on the available nodes and resources such as memory and CPU, YugabyteDB can automatically limit the total number of tables that can be created to ensure that the system can be stable and performant. +* [Tablet limits](/v2024.1/architecture/docdb-sharding/tablet-splitting/#tablet-limits). Depending on the available nodes and resources such as memory and CPU, YugabyteDB can automatically limit the total number of tables that can be created to ensure that the system can be stable and performant. -* Truncate support with [PITR](/stable/manage/backup-restore/point-in-time-recovery/). The TRUNCATE command is now allowed for databases with PITR enabled. +* Truncate support with [PITR](/v2024.1/manage/backup-restore/point-in-time-recovery/). The TRUNCATE command is now allowed for databases with PITR enabled. * DocDB memory tracking enhancements. Memory tracking in DocDB to account for 90% of memory used. -* [Enhanced Explain Analyze output](/stable/explore/query-1-performance/explain-analyze/). Explain Analyze when used with DIST option will also show the rows read from the storage layer, which can help diagnosing the query performance. +* [Enhanced Explain Analyze output](/v2024.1/explore/query-1-performance/explain-analyze/). Explain Analyze when used with DIST option will also show the rows read from the storage layer, which can help diagnosing the query performance. * Upgrade OpenSSL to 3.0.8 from 1.1.1. OpenSSL 1.1.1 is out of support. This feature upgrades YugabyteDB to FIPS compliant OpenSSL version 3.0.8. diff --git a/docs/content/preview/releases/ybdb-releases/v2024.2.md b/docs/content/preview/releases/ybdb-releases/v2024.2.md index 8869ab2e8c54..7b9fea256230 100644 --- a/docs/content/preview/releases/ybdb-releases/v2024.2.md +++ b/docs/content/preview/releases/ybdb-releases/v2024.2.md @@ -17,6 +17,228 @@ What follows are the release notes for the YugabyteDB 2024.2 release series. Con For an RSS feed of all release series, point your feed reader to the [RSS feed for releases](../index.xml). +## v2024.2.3.0 - May 16, 2025 {#v2024.2.3.0} + +**Build:** `2024.2.3.0-b116` + +**Third-party licenses:** [YugabyteDB](https://downloads.yugabyte.com/releases/2024.2.3.0/yugabytedb-2024.2.3.0-b116-third-party-licenses.html), [YugabyteDB Anywhere](https://downloads.yugabyte.com/releases/2024.2.3.0/yugabytedb-anywhere-2024.2.3.0-b116-third-party-licenses.html) + +### Downloads + + + +**Docker:** + +```sh +docker pull yugabytedb/yugabyte:2024.2.3.0-b116 +``` + +### Highlights + +This release brings significant enhancements focused on performance, flexibility, and operational improvements: + +**Enhanced Query Performance with Parallel Queries** {{}} + +Experience faster query execution on **colocated tables** with the introduction of [PostgreSQL's Parallel Queries](/stable/explore/ysql-language-features/advanced-features/parallel-query/). This allows the database to leverage multiple CPU cores for quicker results. Support for sharded tables is coming soon. + +**Greater Flexibility in Cross-Cluster Replication** {{}} + +Gain more control with the ability to temporarily enable write operations on xCluster Standby universes and read/write access on replicas in SQL [transactional mode](/stable/architecture/docdb-replication/async-replication/#transactional). This exception-based access, configurable via a session-level configuration parameter, accommodates specific operational needs and tool integrations. + +**Enhanced Data Privacy with PostgreSQL Anonymizer** {{}} + +YugabyteDB now includes the [PostgreSQL Anonymizer extension](/stable/explore/ysql-language-features/pg-extensions/extension-pganon/), which allows you to mask or replace sensitive data (PII or commercially sensitive information) directly in the database. + +### Change log + +
+ View the detailed changelog + +### Improvements + +#### YSQL + +* Exempts walsender from YSQL backend checks to prevent index creation timeouts. {{}} +* Enables viewing TCMalloc heap snapshots for PG backend processes via new YSQL functions. {{}} +* Enhances `yb_servers` function to include `universe_uuid` for better cluster identification. {{}} +* Enables pushdown of UUID and datetime types in mixed mode. {{}},{{}} +* Enables function pushdown for MOD, LIKE, ASCII, SUBSTRING in mixed mode upgrades. {{}} +* Adds `enable_pg_anonymizer` flag for enabling the PostgreSQL anonymizer extension. {{}} +* Displays index name for missing index row errors in `yb_index_check`. {{}} +* Logs now detail the cause and context of read restart errors for better troubleshooting. {{}} +* Limits output buffer to 8kB to ensure compatibility with certain clients. {{}} +* Enables manual skipping of index backfill in YSQL with `CREATE INDEX NONCONCURRENTLY`. {{}} +* Enables viewing length and bounds histograms in `pg_stats`. {{}} + +#### YCQL + +* Allows setting `NULL` values in YCQL JSONB columns. {{}} + +#### DocDB + +* Allows using intermediate CA certs in server cert files for node-to-node encryption. {{}} +* Tracks ByteBuffer memory usage with `MemTracker`. {{}} +* Allows adjusting `rocksdb_compact_flush_rate_limit_bytes_per_sec` without restarting the tablet server. {{}} +* Cloning now uses the geographically closest TServer, reducing timeouts. {{}} +* Enhances handling of expired snapshots by retrying deletion tasks automatically. {{}} +* Enables tracking of thread metrics by specific thread pool names for enhanced monitoring. {{}} +* Enhances maintainability by consolidating tablespace validation logic into `TablespaceParser`. {{}} + +#### CDC + +* Reduces logging frequency for certain CDC errors to avoid clutter. {{}} +* Advances CDC restart time even without new writes, controlled by `cdcsdk_update_restart_time_interval_secs` flag. {{}} +* Enhances CDC restart time updates with more accurate timing control via the `cdcsdk_update_restart_time_interval_secs` flag. {{}} +* Prevents table drop if it's part of a publication during logical replication. {{}} +* Reduces `yb_walsender_poll_sleep_duration_empty_ms` from 1 second to 10 ms to speed up replication in sparse workloads. {{}} + +#### yugabyted + +* Displays Voyager version in two lines and uses camel case in the UI labels. {{}} +* Prevents empty issue types or names in Voyager UI. {{}} + +### Bug fixes + +#### YSQL + +* Reduces XID usage by generating one per `REFRESH MATERIALIZED VIEW CONCURRENTLY` operation. {{}} +* Skips catalog version bump during in-place materialized view refresh. {{}} +* Reduces read restart errors on concurrent writes to disjoint keys. {{}} +* Enables testing of SECURITY LABEL with dummy_seclabel. {{}} +* Ensures correct column mapping for partitioned tables during `INSERT ON CONFLICT` updates. {{}},{{}} +* Sets query ID for background workers earlier to ensure correct tracking. {{}} +* Enhances estimation of result tuple size in edge cases, preventing division by zero errors. {{}} +* Fixes memory leaks in the PostgreSQL anonymizer extension and optimizes test scheduling. {{}} +* Fixes incorrect role name quoting in `ysql_dumpall`. {{}} +* Ensures stable behavior in Connection Manager when using multiple connections. {{}} +* Fixes `\d` command for indexes with spaces in their names. {{}} +* Fixes issues with `INSERT ON CONFLICT DO UPDATE` to prevent secondary index corruption. {{}} +* Modifies `ysql_dumpall` to combine `CREATE ROLE` and `ALTER ROLE` for effective role management. {{}} +* Ensures stable operation of refresh materialized view during major upgrades. {{}} +* Refactors FK cache management into a separate class for cleaner architecture. {{}} +* Enhances index scans and partition pruning for BOOLEAN conditions. {{}} +* Optimizes deferred FK constraint checks by reading ybctids at transaction end. {{}} +* Enables setting follower reads YSQL configuration parameters anytime during a connection. {{}} +* Fixes the error handling in INSERT ON CONFLICT statements when using read-committed transactions. {{}} +* Enhances accuracy of seek-next estimation for index scans and paged results. {{}} +* Refines cost model tuning using server-side execution times for more accurate query optimization. {{}} +* Prevents crashes by handling non-variable expressions in single-row updates or deletes. {{}} +* Prevents relcache reference leaks in `yb_get_ybctid_width`. {{}} +* Optimizes cost modeling for primary index scans to assume sequential disk block fetching. {{}} +* Introduces custom SQL error codes for better error handling in parallel queries. {{}} +* Lowers default RPC message size limit to prevent overflow. {{}} +* Enhances `yb_index_check` to verify materialized view indexes' consistency. {{}} +* Enables ANALYZE command to gather top-level statistics for partitioned tables. {{}} +* Corrects estimations for inner table scans in Batched Nested Loop Joins. {{}} +* Fixes integer overflow in BNL cost estimation, preventing negative values. {{}} +* Prevents unnecessary aborts of top-level transactions when only a subtransaction conflicts. {{}} +* Fixes inaccurate row estimates in `yb_cost_bitmap_table_scan` by removing incorrect overwrites. {{}} +* Fixes port conflict handling between Connection Manager and PostgreSQL. {{}} +* Prevents restoring non-colocated databases as colocated during backup. {{}} +* Adds a new YSQL configuration parameter `yb_neg_catcache_ids` to customize negative caching for catalog caches. {{}},{{}} +* Preserves enum sort order during backup and restore processes. {{}} +* Enables geolocation costing in the new cost model with `yb_enable_geolocation_costing`. {{}} +* Corrects the description for `ysql_yb_enable_ash` flag. {{}} +* Ensures successful restoration of enum types from old backups by changing errors to warnings. {{}} +* Logs now include odd `pg_enum` OID during restore to better trace issues with missing sortorder values. {{}} +* Restores the call to `ScheduleCheckObjectIdAllocators` inadvertently removed. {{}} +* Fixes a use-after-free bug in ysql_dump by copying tablegroup_name. {{}} +* Separates read-after-commit tests into their own file. {{}} + +#### DocDB + +* Fixes load balancing for rewritten tables in colocated databases. {{}} +* Ensures tables no longer get stuck in HIDING state during failures. {{}} +* Prevents resource waste by checking disk space before remote bootstrap operations. {{}} +* Fixes deadlock in tablet servers using shared memory, preventing crashes. {{}} +* Fixes a crash when `ProcessSupervisor` cannot restart a process. {{}} +* Ensures yb-admin commands respect user-specified timeouts for table flushes and compactions. {{}} +* Enhances transactional xCluster accuracy by using majority replicated OpId for ApplySafeTime calculations. {{}} +* Ensures accurate `WaitForReplicationDrain` behavior by not mislabeling tablets as drained. {{}} +* Enables cloning databases to any past time in the schedule's retention period. {{}} +* Fixes handling of `db_max_flushing_bytes` to properly limit memory usage under high write loads. {{}} +* Prevents unbounded growth of the `recently_applied_map` by not adding read-only transactions, conserving memory. {{}} +* Prevents data loss by ensuring databases are flushed before updating flush markers. {{}} +* Prevents false conflict detection in snapshot isolation operations. {{}} +* Enables successful cloning of sequences to historical times by extending the `yb_disable_catalog_version_check` effect. {{}} +* Enhances system table deletion during `ysql_upgrade` by ensuring clean removal from `SysTableInfoPB`. {{}} +* Enhances leader lease handling to support multiple revisions during rapid leadership changes. {{}} +* Stops tracking recently applied transactions if `use_bootstrap_intent_ht_filter` is set to false, reducing memory usage. {{}} +* Prevents `yb-admin` crashes by correctly handling argument count for `create_database_snapshot`. {{}} +* Prevents deadlocks during background compaction and transaction loading. {{}} +* Improves error handling for shared memory operations in DocDB. {{}} +* Removes 60-second timeout upper bound on admin RPCs and adds new `yb_client_admin_rpc_timeout_sec` flag. {{}} + +#### CDC + +* Ensures only relevant transactional records are streamed, preventing `COMMIT` record errors. {{}} +* Prevents CDC crashes by logging a warning for dropped indexes on colocated tables. {{}} +* Prevents data loss by not streaming records during transaction load. {{}} +* Ensures reliable CDC stream functionality during index creation, preventing schema packing errors. {{}} + +#### yugabyted + +* Checks for chrony service before enabling clockbound during node start. {{}} +* Preserves the universe key locally after enabling EAR for recovery scenarios. {{}} + +
+ +## v2024.2.2.4 - May 19, 2025 {#v2024.2.2.4} + +**Build:** `2024.2.2.4-b2` + +**Third-party licenses:** [YugabyteDB](https://downloads.yugabyte.com/releases/2024.2.2.4/yugabytedb-2024.2.2.4-b2-third-party-licenses.html), [YugabyteDB Anywhere](https://downloads.yugabyte.com/releases/2024.2.2.4/yugabytedb-anywhere-2024.2.2.4-b2-third-party-licenses.html) + +### Downloads + + + +**Docker:** + +```sh +docker pull yugabytedb/yugabyte:2024.2.2.4-b2 +``` + +This is a YugabyteDB Anywhere only release, with no changes to YugabyteDB. + ## v2024.2.2.3 - May 6, 2025 {#v2024.2.2.3} **Build:** `2024.2.2.3-b1` @@ -146,17 +368,17 @@ docker pull yugabytedb/yugabyte:2024.2.2.1-b6 #### YSQL -* Reduces XID usage by generating one per `REFRESH MATERIALIZED VIEW CONCURRENTLY` operation. {{}} -* Fixes issues with `INSERT ON CONFLICT DO UPDATE` to prevent secondary index corruption. {{}} -* Ensures stable operation of refresh materialized view during major upgrades. {{}} +* Reduces XID usage by generating one per `REFRESH MATERIALIZED VIEW CONCURRENTLY` operation. {{}} +* Fixes issues with `INSERT ON CONFLICT DO UPDATE` to prevent secondary index corruption. {{}} +* Ensures stable operation of refresh materialized view during major upgrades. {{}} #### DocDB -* Fixes load balancing for rewritten tables in colocated databases. {{}} +* Fixes load balancing for rewritten tables in colocated databases. {{}} #### yugabyted -* Checks for chrony service before enabling clockbound during node start. {{}} +* Checks for chrony service before enabling clockbound during node start. {{}} @@ -496,7 +718,7 @@ YugabyteDB uses [memory division flags](/stable/reference/configuration/yb-maste Also turned on by default is `--enforce_tablet_replica_limits`, which enforces tablet replica limits based on the memory allocated to per-tablet overhead across the universe. When turned on, any CREATE TABLE request that would create too many tablets returns an error, and tablet splitting is also blocked if the result would be too many tablets. -In addition, YugabyteDB will alert you if your system currently has too many tablets. If you are *upgrading* a universe, you may want to review your memory division settings and adjust them if desired; see [Best practices](/stable/develop/best-practices-ysql/). +In addition, YugabyteDB will alert you if your system currently has too many tablets. If you are *upgrading* a universe, you may want to review your memory division settings and adjust them if desired; see [Best practices](/stable/develop/best-practices-develop/data-modeling-perf/#minimize-the-number-of-tablets-you-need). ### New features diff --git a/docs/content/preview/tutorials/AI/_index.md b/docs/content/preview/tutorials/AI/_index.md index 3e36896d3cf7..1c566902f28e 100644 --- a/docs/content/preview/tutorials/AI/_index.md +++ b/docs/content/preview/tutorials/AI/_index.md @@ -6,46 +6,57 @@ description: How to Develop Applications with AI and YugabyteDB image: headcontent: Add a scalable and highly-available database to your AI projects type: indexpage +showRightNav: true cascade: unversioned: true --- -{{}} +## Retrieval-augmented generation +{{}} {{}} + title="Similarity Search using Azure AI" + body="Build a scalable generative AI application using YugabyteDB as the database backend." + href="azure-openai/" + icon="/images/tutorials/azure/icons/OpenAI-Icon.svg">}} {{}} + title="Similarity Search using Google Vertex AI" + body="Deploy generative AI applications using Google Vertex AI and YugabyteDB." + href="google-vertex-ai/" + icon="/images/tutorials/google/icons/Google-Vertex-AI-Icon.svg">}} +{{}} + +## Vector basics + +{{}} {{}} {{}} +{{}} -{{}} +## Agentic, multiple data sources, and multi-step reasoning + +{{}} {{}} + title="Use a knowledge base using Llama-Index" + body="Build a scalable RAG (Retrieval-Augmented Generation) app using LlamaIndex and OpenAI." + href="ai-llamaindex-openai/" + icon="/images/tutorials/ai/icons/llamaindex-icon.svg">}} +{{}} {{}} diff --git a/docs/content/preview/tutorials/AI/ai-langchain-openai.md b/docs/content/preview/tutorials/AI/ai-langchain-openai.md index 64c9b993b0fd..9dc8034d49e2 100644 --- a/docs/content/preview/tutorials/AI/ai-langchain-openai.md +++ b/docs/content/preview/tutorials/AI/ai-langchain-openai.md @@ -1,14 +1,14 @@ --- title: How to Develop LLM Apps with LangChain, OpenAI and YugabyteDB -headerTitle: LangChain and OpenAI -linkTitle: LangChain and OpenAI +headerTitle: Query without SQL using LangChain +linkTitle: Query without SQL - LangChain description: Learn to build context-aware LLM applications using LangChain and OpenAI. image: /images/tutorials/ai/icons/langchain-icon.svg headcontent: Query your database using natural language menu: preview_tutorials: identifier: tutorials-ai-langchain-openai - parent: tutorials-ai + parent: tutorials-ai-agentic weight: 60 type: docs --- @@ -287,4 +287,4 @@ LangChain provides a powerful toolkit to application developers seeking LLM inte For more information about LangChain, see the [LangChain documentation](https://python.langchain.com/docs/get_started/introduction). -If you would like to learn more on integrating OpenAI with YugabyteDB, check out the [Azure OpenAI](../../azure/azure-openai/) tutorial. +If you would like to learn more on integrating OpenAI with YugabyteDB, check out the [Azure OpenAI](../azure-openai/) tutorial. diff --git a/docs/content/preview/tutorials/AI/ai-llamaindex-openai.md b/docs/content/preview/tutorials/AI/ai-llamaindex-openai.md index fb28da9d176d..6e92abb4fbef 100644 --- a/docs/content/preview/tutorials/AI/ai-llamaindex-openai.md +++ b/docs/content/preview/tutorials/AI/ai-llamaindex-openai.md @@ -1,14 +1,14 @@ --- title: How to Develop RAG Apps with LlamaIndex, OpenAI and YugabyteDB -headerTitle: Build RAG applications with LlamaIndex, OpenAI, and YugabyteDB -linkTitle: LlamaIndex and OpenAI +headerTitle: Talk to a database and knowledge base +linkTitle: Knowledge base - LlamaIndex description: Learn to build RAG applications using LlamaIndex and OpenAI. image: /images/tutorials/ai/icons/llamaindex-icon.svg headcontent: Use YugabyteDB as the database backend for RAG applications menu: preview_tutorials: identifier: tutorials-ai-llamaindex-openai - parent: tutorials-ai + parent: tutorials-ai-agentic weight: 60 type: docs --- diff --git a/docs/content/preview/tutorials/AI/ai-localai.md b/docs/content/preview/tutorials/AI/ai-localai.md index 9c7e7145c21d..477ad9b66cb2 100644 --- a/docs/content/preview/tutorials/AI/ai-localai.md +++ b/docs/content/preview/tutorials/AI/ai-localai.md @@ -1,14 +1,14 @@ --- title: How to Develop LLM Apps with LocalAI and YugabyteDB -headerTitle: Build LLM applications using LocalAI and YugabyteDB -linkTitle: LocalAI +headerTitle: Similarity search using LocalAI +linkTitle: Similarity search - LocalAI description: Learn to build LLM applications using LocalAI. image: /images/tutorials/ai/icons/localai-icon.svg headcontent: Use YugabyteDB as the database backend for LLM applications menu: preview_tutorials: identifier: tutorials-ai-localai - parent: tutorials-ai + parent: tutorials-ai-vector weight: 60 type: docs --- diff --git a/docs/content/preview/tutorials/AI/ai-ollama.md b/docs/content/preview/tutorials/AI/ai-ollama.md index 9c90cf9b0e9c..916c3953643c 100644 --- a/docs/content/preview/tutorials/AI/ai-ollama.md +++ b/docs/content/preview/tutorials/AI/ai-ollama.md @@ -1,14 +1,14 @@ --- title: How to Develop AI Apps Locally with Ollama and YugabyteDB -headerTitle: Build applications with locally-hosted embedding models using Ollama and YugabyteDB -linkTitle: Ollama +headerTitle: Similarity search using Ollama +linkTitle: Similarity search - Ollama description: Learn to build LLM applications using Ollama. image: /images/tutorials/ai/icons/ollama-icon.svg headcontent: Use YugabyteDB as the database backend for LLM applications menu: preview_tutorials: identifier: tutorials-ai-ollama - parent: tutorials-ai + parent: tutorials-ai-vector weight: 60 type: docs --- diff --git a/docs/content/preview/tutorials/azure/azure-openai.md b/docs/content/preview/tutorials/AI/azure-openai.md similarity index 98% rename from docs/content/preview/tutorials/azure/azure-openai.md rename to docs/content/preview/tutorials/AI/azure-openai.md index 7ee708cc28ea..4949e5a2b44e 100644 --- a/docs/content/preview/tutorials/azure/azure-openai.md +++ b/docs/content/preview/tutorials/AI/azure-openai.md @@ -1,14 +1,16 @@ --- title: Build Scalable Generative AI Applications with Azure OpenAI and YugabyteDB -headerTitle: Build scalable generative AI applications with Azure OpenAI and YugabyteDB -linkTitle: Azure OpenAI +headerTitle: Similarity search using Azure OpenAI +linkTitle: Similarity search - Azure description: Build scalable generative AI applications with Azure OpenAI and YugabyteDB image: /images/tutorials/azure/icons/OpenAI-Icon.svg headcontent: Use YugabyteDB as the database backend for Azure OpenAI applications +aliases: + - /tutorials/azure/azure-openai/ menu: preview_tutorials: identifier: tutorials-azure-openai - parent: tutorials-azure + parent: tutorials-ai-rag weight: 40 type: docs --- @@ -304,4 +306,4 @@ With the help of the PostgreSQL pgvector extension, YugabyteDB enhances the scal To learn more about additional updates to YugabyteDB with release 2.19, check out [Dream Big, Go Bigger: Turbocharging PostgreSQL](https://www.yugabyte.com/blog/postgresql-turbocharging/). -To learn how to run this application using Google Cloud, see [Build scalable generative AI applications with Google Vertex AI and YugabyteDB](../../google/google-vertex-ai/). +To learn how to run this application using Google Cloud, see [Build scalable generative AI applications with Google Vertex AI and YugabyteDB](../google-vertex-ai/). diff --git a/docs/content/preview/tutorials/google/google-vertex-ai.md b/docs/content/preview/tutorials/AI/google-vertex-ai.md similarity index 97% rename from docs/content/preview/tutorials/google/google-vertex-ai.md rename to docs/content/preview/tutorials/AI/google-vertex-ai.md index fb187003cc22..de07a7340bac 100644 --- a/docs/content/preview/tutorials/google/google-vertex-ai.md +++ b/docs/content/preview/tutorials/AI/google-vertex-ai.md @@ -1,14 +1,16 @@ --- title: Build Scalable Generative AI Applications with Google Vertex AI and YugabyteDB -headerTitle: Build scalable generative AI applications with Google Vertex AI and YugabyteDB -linkTitle: Google Vertex AI +headerTitle: Similarity search using Google Vertex AI +linkTitle: Similarity search - Google Vertex description: Build scalable generative AI applications with Google Vertex AI and YugabyteDB image: /images/tutorials/google/icons/Google-Vertex-AI-Icon.svg headcontent: Use YugabyteDB as the database backend for Google Vertex AI applications +aliases: + - /tutorials/google/google-vertex-ai/ menu: preview_tutorials: identifier: tutorials-google-vertex-ai - parent: tutorials-google + parent: tutorials-ai-rag weight: 40 type: docs --- @@ -201,4 +203,4 @@ The Google Vertex AI service simplifies the process of designing, building, and With the help of the PostgreSQL pgvector extension, YugabyteDB enhances the scalability of these applications by distributing data and embeddings across a cluster of nodes, facilitating similarity searches on a large scale. -To learn how to run this application using Azure, see [Build scalable generative AI applications with Azure OpenAI and YugabyteDB](../../azure/azure-openai/). +To learn how to run this application using Azure, see [Build scalable generative AI applications with Azure OpenAI and YugabyteDB](../azure-openai/). diff --git a/docs/content/preview/tutorials/azure/_index.md b/docs/content/preview/tutorials/azure/_index.md index ade8962b0d60..82703a9f5302 100644 --- a/docs/content/preview/tutorials/azure/_index.md +++ b/docs/content/preview/tutorials/azure/_index.md @@ -35,7 +35,7 @@ type: indexpage {{}} {{}} {{}} diff --git a/docs/content/preview/tutorials/quick-start/docker.md b/docs/content/preview/tutorials/quick-start/docker.md index cc6952075c6d..0b1c6f195657 100644 --- a/docs/content/preview/tutorials/quick-start/docker.md +++ b/docs/content/preview/tutorials/quick-start/docker.md @@ -153,12 +153,11 @@ In the preceding `docker run` command, the data stored in YugabyteDB does not pe {{< note title="Note" >}} -When you start yugabyted with a non-default `--base_dir` value, you must set the `--base_dir` flag for every subsequent yugabyted command. -For example, to get the status of the cluster you just created, you would enter `bin/yugabyted status --base_dir=/home/yugabyte/yb_data`. +By default, yugabyted uses the `/home/user/var` directory. When you start yugabyted with a non-default `--base_dir` value, you must set the `--base_dir` flag for every subsequent yugabyted command. +For example, to get the status of the cluster you just created, you would enter `bin/yugabyted status --base_dir=/home/yugabyte/yb_data`. {{< /note >}} - ## Connect to the database The cluster you have created consists of two processes: diff --git a/docs/content/preview/yugabyte-cloud/_index.md b/docs/content/preview/yugabyte-cloud/_index.md index add89b1373cc..098ad8ec82f1 100644 --- a/docs/content/preview/yugabyte-cloud/_index.md +++ b/docs/content/preview/yugabyte-cloud/_index.md @@ -30,7 +30,7 @@ YugabyteDB Managed is now YugabyteDB Aeon! [Learn more](https://www.yugabyte.com {{< sections/2-boxes >}} {{< sections/bottom-image-box title="Sign up to create a Sandbox cluster" - description="Sign up, log in, and follow the built-in tutorial to create your first cluster and build a sample application. No credit card required." + description="Sign up, log in, and follow the built-in tutorial to create your first cluster, and build a sample application. No credit card required." buttonText="Sign up" buttonTarget="_blank" buttonUrl="https://cloud.yugabyte.com/signup?utm_medium=direct&utm_source=docs&utm_campaign=YBM_signup" diff --git a/docs/content/preview/yugabyte-platform/administer-yugabyte-platform/anywhere-rbac.md b/docs/content/preview/yugabyte-platform/administer-yugabyte-platform/anywhere-rbac.md index 9255636e000a..571f7ad0611f 100644 --- a/docs/content/preview/yugabyte-platform/administer-yugabyte-platform/anywhere-rbac.md +++ b/docs/content/preview/yugabyte-platform/administer-yugabyte-platform/anywhere-rbac.md @@ -14,11 +14,11 @@ type: docs YugabyteDB Anywhere uses a role-based access control (RBAC) model to manage access to your YugabyteDB Anywhere instance (whether via the UI or the REST API). Using roles, you can enforce the [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege) (PoLP) by ensuring that users have the precise permissions needed to fulfill their roles while mitigating the risk of unauthorized access or accidental breaches. A role defines a set of permissions that determine what features can be accessed by account users who have been assigned that role. -RBAC is also available with fine-grained control over access to universes. Fine-grained RBAC is {{}}; during Early Access, by default fine-grained RBAC is not enabled. See [Manage users](#manage-users). +RBAC is also available with fine-grained control over access to universes. Fine-grained RBAC is {{}}; during Early Access, by default fine-grained RBAC is not enabled. See [Manage users](#manage-users). ## Users and roles -As a Super Admin or Admin, you can invite new users and manage existing users for your YugabyteDB Anywhere instance. How you add and modify users varies depending on whether you have enabled [fine-grained RBAC](#fine-grained-rbac) {{}}. You can only assign, create, and modify custom roles if fine-grained RBAC is enabled. +As a Super Admin or Admin, you can invite new users and manage existing users for your YugabyteDB Anywhere instance. How you add and modify users varies depending on whether you have enabled [fine-grained RBAC](#fine-grained-rbac) {{}}. You can only assign, create, and modify custom roles if fine-grained RBAC is enabled. A user can interact with a YugabyteDB Anywhere instance via the UI or [REST API](../../anywhere-automation/anywhere-api/). diff --git a/docs/content/preview/yugabyte-platform/alerts-monitoring/anywhere-export-configuration.md b/docs/content/preview/yugabyte-platform/alerts-monitoring/anywhere-export-configuration.md index 5a8cbe619286..a309e1868bbd 100644 --- a/docs/content/preview/yugabyte-platform/alerts-monitoring/anywhere-export-configuration.md +++ b/docs/content/preview/yugabyte-platform/alerts-monitoring/anywhere-export-configuration.md @@ -30,7 +30,7 @@ For information on how to export logs from a universe using an export configurat ## Prerequisites -Export configuration is {{}}. To enable export configuration management, set the **Enable DB Audit Logging** Global Configuration option (config key `yb.universe.audit_logging_enabled`) to true. Refer to [Manage runtime configuration settings](../../administer-yugabyte-platform/manage-runtime-config/). Note that only a Super Admin user can modify Global configuration settings. The flag can't be turned off if audit logging is enabled on a universe. +Export configuration is {{}}. To enable export configuration management, set the **Enable DB Audit Logging** Global Configuration option (config key `yb.universe.audit_logging_enabled`) to true. Refer to [Manage runtime configuration settings](../../administer-yugabyte-platform/manage-runtime-config/). Note that only a Super Admin user can modify Global configuration settings. The flag can't be turned off if audit logging is enabled on a universe. ## Best practices diff --git a/docs/content/preview/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md b/docs/content/preview/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md index d7a33ef5d3a7..5bacb5ec5cd8 100644 --- a/docs/content/preview/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md +++ b/docs/content/preview/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md @@ -26,11 +26,13 @@ You can configure AWS S3 and S3-compatible storage as your backup target. By default, the option to use S3 path style access is not available. To ensure that you can use this feature, navigate to `https:///features` and enable the **enablePathStyleAccess** option. {{< /note >}} +### Create an AWS backup configuration + To configure S3 storage, do the following: 1. Navigate to **Integrations** > **Backup** > **Amazon S3**. -1. Click **Create S3 Backup** to access the configuration form shown in the following illustration: +1. Click **Create S3 Backup**. ![S3 Backup](/images/yp/cloud-provider-configuration-backup-aws.png) @@ -104,38 +106,12 @@ To create a GCP backup configuration, do the following: 1. Enter the URI of your GCS bucket in the **GCS Bucket** field. For example, `gs://gcp-bucket/test_backups`. -1. Select **Use GCP IAM** if you're using [GKE service account](#gke-service-account-based-iam-gcp-iam) for backup and restore. +1. Select **Use GCP IAM** to use the YugabyteDB Anywhere instance's Identity Access Management (IAM) role for the GCS backup. -1. Enter the credentials for your account in JSON format in the **GCS Credentials** field. +1. If **Use GCP IAM** is disabled, enter the credentials for your account in JSON format in the **GCS Credentials** field. 1. Click **Save**. -### GKE service account-based IAM (GCP IAM) - -Google Kubernetes Engine (GKE) uses a concept known as "Workload Identity" to provide a secure way to allow a Kubernetes service account (KSA) in your GKE cluster to act as an IAM service account so that your Kubernetes universes can access GCS for backups. - -In GKE, each pod can be associated with a KSA. The KSA is used to authenticate and authorize the pod to interact with other Google Cloud services. An IAM service account is a Google Cloud resource that allows applications to make authorized calls to Google Cloud APIs. - -Workload Identity links a KSA to an IAM account using annotations in the KSA. Pods that use the configured KSA automatically authenticate as the IAM service account when accessing Google Cloud APIs. - -By using Workload Identity, you avoid the need for manually managing service account keys or tokens in your applications running on GKE. This approach enhances security and simplifies the management of credentials. - -- To enable GCP IAM when installing YugabyteDB Anywhere, refer to [Enable GKE service account-based IAM](../../install-yugabyte-platform/install-software/kubernetes/#enable-gke-service-account-based-iam). - -- To enable GCP IAM during universe creation, refer to [Configure Helm overrides](../../create-deployments/create-universe-multi-zone-kubernetes/#helm-overrides). - -- To upgrade an existing universe with GCP IAM, refer to [Upgrade universes for GKE service account-based IAM support](../../manage-deployments/edit-helm-overrides/#upgrade-universes-for-gke-service-account-based-iam). - -**Prerequisites** - -- The GKE cluster hosting the pods should have Workload Identity enabled. The worker nodes of this GKE cluster should have the GKE metadata server enabled. - -- The IAM service account, which is used to annotate the KSA, should have sufficient permissions to read, write, list, and delete objects in GCS. - -- The KSA, which is annotated with the IAM service account, should be present in the same namespace where the pod resources for YugabyteDB Anywhere and YugabyteDB universes are expected. If you have multiple namespaces, each namespace should include the annotated KSA. - -For instructions on setting up Workload Identity, see [Use Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) in the GKE documentation. - ## Network File System You can configure Network File System (NFS) as your backup target, as follows: diff --git a/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/gcp.md b/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/gcp.md index f72fe0d84649..809fdc53c425 100644 --- a/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/gcp.md +++ b/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/gcp.md @@ -44,7 +44,7 @@ Before you can deploy universes using YugabyteDB Anywhere (YBA), you must create When deploying a universe, YBA uses the provider configuration settings to do the following: - Create instances on GCP using the following: - - your cloud provider credentials + - your GCP service account credentials - specified regions and availability zones (this can be a subset of those specified in the provider configuration) - a Linux image - optionally, an [instance template](#gcp-instance-templates) @@ -53,7 +53,7 @@ When deploying a universe, YBA uses the provider configuration settings to do th ## Prerequisites -- Cloud provider credentials. YBA uses your credentials to automatically provision and de-provision instances that run YugabyteDB. An instance for YugabyteDB includes a compute instance, as well as local or remote disk storage attached to the compute instance. +- GCP service account credentials. YBA uses your credentials to automatically provision and de-provision instances that run YugabyteDB. An instance for YugabyteDB includes a compute instance, as well as local or remote disk storage attached to the compute instance. For more information on setting up a GCP service account, refer to [Cloud permissions to deploy nodes](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/). @@ -77,8 +77,6 @@ To create a GCP provider: The create provider process includes generating a new VPC, a network, subnetworks in all available regions, as well as a new firewall rule, VPC peering for network connectivity, and a custom SSH key pair for YugabyteDB Anywhere-to-YugabyteDB connectivity. -Now you are ready to create a YugabyteDB universe on GCP. - ### View and edit providers To view a provider, select it in the list of GCP Configs to display the **Overview**. @@ -99,6 +97,7 @@ To edit the provider, select **Config Details**, make changes, and click **Apply If `new-project-yb` is a new GCE project, the backend request fails and you will be notified that you can't change the GCE project for an in-use provider. - Regions - You can add regions and zones to an in-use provider. Note that you cannot edit existing region details, delete a region if any of the region's zones are in use, or delete zones that are in use. +- Linux version catalog To view the universes created using the provider, select **Universes**. @@ -112,11 +111,17 @@ Enter a Provider name. The Provider name is an internal tag used for organizing ### Cloud Info -If your YBA instance is not running inside GCP, you need to supply YBA with credentials to the desired GCP project by uploading a configuration file. To do this, set **Credential Type** to **Upload Service Account config** and proceed to upload the JSON file that you obtained when you created your service account, as described in [Cloud permissions](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/). +**Credential Type**. YBA requires the ability to create VMs in GCP. To do this, you can do one of the following: + +- If your YBA instance is not running inside GCP, you need to supply YBA with credentials to the desired GCP project by uploading a configuration file. + + Set **Credential Type** to **Upload Service Account config** and upload the JSON file that you obtained when you created your service account, as described in [Cloud permissions](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/). + +- If your YBA instance is running inside GCP, the preferred method for authentication to the GCP APIs is to add a service account role to the GCP instance running YBA and then configure YBA to use the instance's service account. -If your YBA instance is running inside GCP, the preferred method for authentication to the GCP APIs is to add a service account role to the GCP instance running YBA and then configure YBA to use the instance's service account. To do this, set **Credential Type** to **Use service account from this YBA host's instance**. + Set **Credential Type** to **Use service account from this YBA host's instance**. -### VPC Setup +#### VPC Setup Specify the VPC to use for deploying YugabyteDB nodes. diff --git a/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/kubernetes.md b/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/kubernetes.md index 4a87fdf96b60..6e4958548040 100644 --- a/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/kubernetes.md +++ b/docs/content/preview/yugabyte-platform/configure-yugabyte-platform/kubernetes.md @@ -50,7 +50,7 @@ Before you create a Kubernetes provider, perform the following: - Create a `yugabyte-platform-universe-management` service account. - Create a `kubeconfig` file of the service account you created to configure access to the Kubernetes cluster. -Refer to [To deploy nodes](../../prepare/cloud-permissions/cloud-permissions-nodes/). +See [To deploy nodes](../../prepare/cloud-permissions/cloud-permissions-nodes/). ## Configure Kubernetes @@ -124,7 +124,7 @@ Continue configuring your Kubernetes provider by clicking **Add region** and com 1. Complete the **Overrides** field using one of the provided [options](#overrides). If you do not specify anything, YBA uses defaults specified inside the Helm chart. For additional information, see [Open source Kubernetes](../../../deploy/kubernetes/single-zone/oss/helm-chart/). -1. If you are using [Kubernetes cert-manager](https://cert-manager.io) to manage TLS certificates, specify the issuer kind, enter the issuer name, and optionally provide the issuer group. For more information, refer to [Enable encryption in transit](../../security/enable-encryption-in-transit/add-certificate-kubernetes/). +1. If you are using [Kubernetes cert-manager](https://cert-manager.io) to manage TLS certificates, specify the issuer kind, enter the issuer name, and optionally provide the issuer group. For more information, refer to [Add certificates](../../security/enable-encryption-in-transit/add-certificate-kubernetes/). If required, add a new zone by clicking **Add Zone**, as your configuration may have multiple zones. @@ -379,6 +379,17 @@ tserver: The Kubernetes `labels` are key-value pairs attached to objects. The `labels` are used to specify identifying attributes of objects that are meaningful and relevant to you. For more information, see [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the Kubernetes documentation. +### Use common name with cert-manager + +If your certificate issuer (for example, for `aws-privateca-issuer`) requires the certificate to include the common name, set the following override: + +```yml +tls: + certManager: + certificates: + commonNameRequired: true +``` + ### Preflight check Preflight check overrides, such as DNS address resolution, disk IO, available port, ulimit: diff --git a/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md b/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md index 2d4761a4b3e3..ed680ff5dbf8 100644 --- a/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md +++ b/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md @@ -159,7 +159,7 @@ If there are any errors in your overrides definitions, a detailed error message #### GKE service account -If you want to enable [GKE service account-based IAM](../../back-up-restore-universes/configure-backup-storage/#gke-service-account-based-iam-gcp-iam) for backup and restore using GCS at the universe level, add the following overrides: +If you want to enable [GKE service account-based IAM](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/#gke-service-account-based-iam-gcp-iam) for backup and restore using GCS at the universe level, add the following overrides: ```yaml tserver: diff --git a/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone.md b/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone.md index 508cf261b70b..7c8ea6f86270 100644 --- a/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone.md +++ b/docs/content/preview/yugabyte-platform/create-deployments/create-universe-multi-zone.md @@ -138,6 +138,12 @@ Enable Connection Pooling Enable Systemd Services : To use cron instead of systemd for managing nodes, you can disable systemd services. This is not recommended. +{{< warning title="cron-based support deprecated" >}} + +cron-based universes will no longer be supported in YugabyteDB Anywhere v2025.2 and later. To upgrade to v2025.2 or later, all your universes must be using systemd. Universes that use a cloud provider configuration will be upgraded to systemd automatically. To upgrade on-premises cron-based universes, navigate to the universe and choose **Actions>Upgrade to Systemd**. + +{{< /warning >}} + Override Deployment Ports : To customize the [ports used for the universe](../../prepare/networking/), select the **Override Deployment Ports** option and enter the custom port numbers for the services you want to change. Any value from `1024` to `65535` is valid, as long as it doesn't conflict with anything else running on nodes to be provisioned. diff --git a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/installer.md b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/installer.md index abdfcc60fada..2dd6aae2716c 100644 --- a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/installer.md +++ b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/installer.md @@ -282,7 +282,7 @@ To use the data disk with a new installation, do the following: ### Reconfigure -You can use YBA Installer to reconfigure an installed YBA instance. +You can use YBA Installer to make changes to an installed YBA instance. To reconfigure an installation, edit the configuration file with your changes, and then run the command as follows: @@ -297,8 +297,8 @@ For more information, refer to [Configuration options](#configuration-options). YBA Installer provides basic service management, with `start`, `stop`, and `restart` commands. Each of these can be performed for all the services (`platform`, `postgres`, and `prometheus`), or any individual service. ```sh -sudo yba-ctl [start, stop, reconfigure] -sudo yba-ctl [start, stop, reconfigure] prometheus +sudo yba-ctl [start, stop, restart] +sudo yba-ctl [start, stop, restart] prometheus ``` In addition to the state changing operations, you can use the `status` command to show the status of all YugabyteDB Anywhere services, in addition to other information such as the log and configuration location, versions of each service, and the URL to access the YugabyteDB Anywhere UI. @@ -437,6 +437,8 @@ YBA Installer [automatically generates](#configure-yba-installer) the file when | sudo | opt/yba-ctl/ | | non-sudo | ~/opt/yba-ctl/ | +To make changes to an existing installation, edit the configuration file with your changes and run the [reconfigure](#reconfigure) command. Note that some settings (marked with {{}}) cannot be changed after installation. + Note that the file must include all fields. Optional fields may be left blank. ### Configure YBA Installer diff --git a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md index beba5482bd2b..9c283464da35 100644 --- a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md +++ b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md @@ -518,7 +518,7 @@ In addition, it is recommended to set a large initial storage size, because resi If you are using Google Cloud Storage (GCS) for backups, you can enable GKE service account-based IAM (GCP IAM) so that Kubernetes universes can access GCS. -Before enabling GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../../back-up-restore-universes/configure-backup-storage/#gke-service-account-based-iam-gcp-iam). +Before enabling GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/#gke-service-account-based-iam-gcp-iam). To enable GCP IAM, provide the following additional Helm values during installation to a version which supports this feature (v2.18.4 or later): diff --git a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md b/docs/content/preview/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md deleted file mode 100644 index 1e296c7cf65d..000000000000 --- a/docs/content/preview/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md +++ /dev/null @@ -1,350 +0,0 @@ - - -Choose the type of YugabyteDB Anywhere installation. - -{{< tabpane text=true >}} - - {{% tab header="Docker-based" lang="docker" %}} - -For a Docker-based installation, YugabyteDB Anywhere uses [Replicated scheduler](https://www.replicated.com/) for software distribution and container management. You need to ensure that the host can pull containers from the [Replicated Docker Registries](https://help.replicated.com/docs/native/getting-started/docker-registries/). - -Replicated installs a compatible Docker version if it is not pre-installed on the host. The currently supported Docker version is 20.10.n. - - {{% /tab %}} - - {{% tab header="Kubernetes-based" lang="kubernetes" %}} - -For a Kubernetes-based installation, you need to ensure that the host can pull container images from the [Quay.io](https://quay.io/) container registry. For details, see [Pull and push YugabyteDB Docker images to private container registry](#pull-and-push-yugabytedb-docker-images-to-private-container-registry). - -In addition, you need to ensure that core dumps are enabled and configured on the underlying Kubernetes node. For details, see [Specify ulimit and remember the location of core dumps](#specify-ulimit-and-remember-the-location-of-core-dumps). - - - -#### Specify ulimit and remember the location of core dumps - -The core dump collection in Kubernetes requires special care due to the fact that `core_pattern` is not isolated in cgroup drivers. - -You need to ensure that core dumps are enabled on the underlying Kubernetes node. Running the `ulimit -c` command in a Kubernetes pod or node must produce a large non-zero value or the `unlimited` value as an output. For more information, see [How to enable core dumps](https://www.ibm.com/support/pages/how-do-i-enable-core-dumps). - -To be able to locate your core dumps, you should be aware of the fact that the location to which core dumps are written depends on the sysctl `kernel.core_pattern` setting. For more information, see [Linux manual: core(5)](https://man7.org/linux/man-pages/man5/core.5.html#:~:text=Naming%20of%20core%20dump%20files). - -To inspect the value of the sysctl in a Kubernetes pod or node, execute the following: - -```sh -cat /proc/sys/kernel/core_pattern -``` - -If the value of `core_pattern` contains a `|` pipe symbol (for example, `|/usr/share/apport/apport -p%p -s%s -c%c -d%d -P%P -u%u -g%g -- %E`), the core dump is being redirected to a specific collector on the underlying Kubernetes node, with the location depending on the exact collector. To be able to retrieve core dump files in case of a crash in the Kubernetes pod, it is important that you understand where these files are written. - -If the value of `core_pattern` is a literal path of the form `/var/tmp/core.%p`, no action is required on your part, as core dumps will be copied by the YugabyteDB node to the persistent volume directory `/mnt/disk0/cores` for future analysis. - -Note the following: - -- ulimits and sysctl are inherited from Kubernetes nodes and cannot be changed for an individual pod. -- New Kubernetes nodes might be using [systemd-coredump](https://www.freedesktop.org/software/systemd/man/systemd-coredump.html) to manage core dumps on the node. - -#### Pull and push YugabyteDB Docker images to private container registry - -Due to security concerns, some Kubernetes environments use internal container registries such as Harbor and Nexus. In this type of setup, YugabyteDB deployment must be able to pull images from and push images to a private registry. - -{{< note title="Note" >}} - -This is not a recommended approach for enterprise environments. You should ask the container registry administrator to add proxy cache to pull the source images to the internal registry automatically. This would allow you to avoid modifying the Helm chart or providing a custom registry inside the YugabyteDB Anywhere cloud provider. - -{{< /note >}} - -Before proceeding, ensure that you have the following: - -- Pull secret consisting of the user name and password or service account to access source (pull permission) and destination (push and pull permissions) container registries. -- Docker installed on a server (desktop) that can access both container registries. For installation instructions, see [Docker Desktop](https://www.docker.com/products/docker-desktop). - -Generally, the process involves the following: - -- Fetching the correct version of the YugabyteDB Helm chart whose `values.yaml` file describes all the image locations. -- Retagging images. -- Pushing images to the private container registry. -- Modifying the Helm chart values to point to the new private location. - -![img](/images/yp/docker-pull.png) - -You need to perform the following steps: - -1. Log in to [Quay.io](https://quay.io/) to access the YugabyteDB private registry using the user name and password provided in the secret `yaml` file. To find the `auth` field, use `base64 -d` to decode the data inside the `yaml` file twice. In this field, the user name and password are separated by a colon. For example, `yugabyte+:ZQ66Z9C1K6AHD5A9VU28B06Q7N0AXZAQSR`. - - ```sh - docker login -u “your_yugabyte_username” -p “yugabyte_provided_password” quay.io - - docker search yugabytedb # You should see images - ``` - -1. Fetch the YugabyteDB Helm chart on your desktop (install Helm on your desktop). Since the images in the `values.yaml` file may vary depending on the version, you need to specify the version you want to pull and push, as follows: - - ```sh - helm repo add yugabytedb https://charts.yugabyte.com - helm repo update - helm fetch yugabytedb/yugaware - - version= {{ version }} - tar zxvf yugaware-{{ version }}.tgz - cd yugaware - cat values.yaml - ``` - - ```properties - image: - commonRegistry: "" - repository: **quay.io/yugabyte/yugaware** - tag: **{{ version.build }}** - pullPolicy: IfNotPresent - pullSecret: yugabyte-k8s-pull-secret - thirdparty-deps: - registry: quay.io - tag: **latest** - name: **yugabyte/thirdparty-deps** - prometheus: - registry: "" - tag: **{{ version.prometheus }}** - name: **prom/prometheus** - nginx: - registry: "" - tag: **{{ version.nginx }}** - name: nginx - ``` - -1. Pull images to your Docker Desktop, as follows: - - ```sh - docker pull quay.io/yugabyte/yugaware:{{ version.build }} - ``` - - ```output - xxxxxxxxx: Pulling from yugabyte/yugaware - c87736221ed0: Pull complete - 4d33fcf3ee85: Pull complete - 60cbb698a409: Pull complete - daaf3bdf903e: Pull complete - eb7b573327ce: Pull complete - 94aa28231788: Pull complete - 16c067af0934: Pull complete - 8ab1e7f695af: Pull complete - 6153ecb58755: Pull complete - c0f981bfb844: Pull complete - 6485543159a8: Pull complete - 811ba76b1d72: Pull complete - e325b2ff3e2a: Pull complete - c351a0ce1ccf: Pull complete - 73765723160d: Pull complete - 588cb609ac0b: Pull complete - af3ae7e64e48: Pull complete - 17fb23853f77: Pull complete - cb799d679e2f: Pull complete - Digest: sha256:0f1cb1fdc1bd4c17699507ffa5a04d3fe5f267049e0675d5d78d77fa632b330c - Status: Downloaded newer image for quay.io/yugabyte/yugaware:xxxxxx - quay.io/yugabyte/yugaware:xxxxxxx - ``` - - ```sh - docker pull quay.io/yugabyte/thirdparty-deps:latest - ``` - - ```output - latest: Pulling from yugabyte/thirdparty-deps - c87736221ed0: Already exists - 4d33fcf3ee85: Already exists - 60cbb698a409: Already exists - d90c5841d133: Pull complete - 8084187ca761: Pull complete - 47e3b9f5c7f5: Pull complete - 64430b56cbd6: Pull complete - 27b03c6bcdda: Pull complete - ae35ebe6caa1: Pull complete - 9a655eedc488: Pull complete - Digest: sha256:286a13eb113398e1c4e63066267db4921c7644dac783836515a783cbd25b2c2a - Status: Downloaded newer image for quay.io/yugabyte/thirdparty-deps:latest - quay.io/yugabyte/thirdparty-deps:latest - ``` - - ```sh - docker pull postgres:11.5 - ``` - - ```output - xxxxxx: Pulling from library/postgres - 80369df48736: Pull complete - b18dd0a6efec: Pull complete - 5c20c5b8227d: Pull complete - c5a7f905c8ec: Pull complete - 5a3f55930dd8: Pull complete - ffc097878b09: Pull complete - 3106d02490d4: Pull complete - 88d1fc513b8f: Pull complete - f7d9cc27056d: Pull complete - afe180d8d5fd: Pull complete - b73e04acbb5f: Pull complete - 1dba81bb6cfd: Pull complete - 26bf23ba2b27: Pull complete - 09ead80f0070: Pull complete - Digest: sha256:b3770d9c4ef11eba1ff5893e28049e98e2b70083e519e0b2bce0a20e7aa832fe - Status: Downloaded newer image for postgres:11.5 - docker.io/library/postgres: - ``` - - ```sh - docker pull prom/prometheus:v2.2.1 - ``` - - ```output - Image docker.io/prom/prometheus:v2.2.1 uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/ - aab39f0bc16d: Pull complete - a3ed95caeb02: Pull complete - 2cd9e239cea6: Pull complete - 0266ca3d0dd9: Pull complete - 341681dba10c: Pull complete - 8f6074d68b9e: Pull complete - 2fa612efb95d: Pull complete - 151829c004a9: Pull complete - 75e765061965: Pull complete - b5a15632e9ab: Pull complete - Digest: sha256:129e16b08818a47259d972767fd834d84fb70ca11b423cc9976c9bce9b40c58f - Status: Downloaded newer image for prom/prometheus: - docker.io/prom/prometheus: - ``` - - ```sh - docker pull nginx:1.17.4 - ``` - - ```output - 1.17.4: Pulling from library/nginx - 8d691f585fa8: Pull complete - 047cb16c0ff6: Pull complete - b0bbed1a78ca: Pull complete - Digest: sha256:77ebc94e0cec30b20f9056bac1066b09fbdc049401b71850922c63fc0cc1762e - Status: Downloaded newer image for nginx:1.17.4 - docker.io/library/nginx:1.17.4 - ``` - - ```sh - docker pull janeczku/go-dnsmasq:release-1.0.7 - ``` - - ```output - release-1.0.7: Pulling from janeczku/go-dnsmasq - 117f30b7ae3d: Pull complete - 504f1e14d6cc: Pull complete - 98e84d0ba41a: Pull complete - Digest: sha256:3a99ad92353b55e97863812470e4f7403b47180f06845fdd06060773fe04184f - Status: Downloaded newer image for janeczku/go-dnsmasq:release-1.0.7 - docker.io/janeczku/go-dnsmasq:release-1.0.7 - ``` - -1. Log in to your target container registry, as per the following example that uses Google Container Registry (GCR) : - - ```sh - docker login -u _json_key --password-stdin https://gcr.io < .ssh/my-service-account-key.json - ``` - -1. Tag the local images to your target registry, as follows: - - ```sh - docker images - ``` - - ```output - REPOSITORY TAG IMAGE ID CREATED SIZE - quay.io/yugabyte/yugaware 2.5.1.0-b153 **a04fef023c7c** 6 weeks ago 2.54GB - quay.io/yugabyte/thirdparty-deps latest **721453480a0f** 2 months ago 447MB - nginx 1.17.4 **5a9061639d0a** 15 months ago 126MB - postgres 11.5 **5f1485c70c9a** 15 months ago 293MB - prom/prometheus v2.2.1 **cc866859f8df** 2 years ago 113MB - janeczku/go-dnsmasq release-1.0.7 **caef6233eac4** 4 years ago 7.38MB - ``` - - ```sh - docker tag a04fef023c7c gcr.io/dataengineeringdemos/yugabyte/yugaware:2.5.1.0-b153 - docker tag 721453480a0f gcr.io/dataengineeringdemos/yugabyte/thirdparty-deps:latest - docker tag 5a9061639d0a gcr.io/dataengineeringdemos/yugabyte/nginx:1.17.4 - docker tag 5f1485c70c9a gcr.io/dataengineeringdemos/yugabyte/postgres:11.5 - docker tag cc866859f8df gcr.io/dataengineeringdemos/prom/prometheus:v2.2.1 - docker tag caef6233eac4 gcr.io/dataengineeringdemos/janeczku/go-dnsmasq:release-1.0.7 - ``` - -1. Push images to the private container registry, as follows: - - ```sh - docker push a04fef023c7c - docker push 721453480a0f - docker push 5a9061639d0a - docker push 5f1485c70c9a - docker push cc866859f8df - docker push caef6233eac4 - ``` - - ![img](/images/yp/docker-image.png) - -1. Modify the Helm chart `values.yaml` file. You can map your private internal repository URI to `commonRegistry` and use the folder or `project/image_name` and tags similar to the following: - - ```properties - image: - commonRegistry: "**gcr.io/dataengineeringdemos**" - repository: **“”** - tag: **2.5.1.0-b153** - pullPolicy: IfNotPresent - pullSecret: yugabyte-k8s-pull-secret - thirdparty-deps: - registry: /yugabyte/thhirdparty-deps - tag: **latest** - name: **yugabyte/thirdparty-deps** - postgres: - registry: "yugabyte/postgres" - tag: 11.5 - name: **postgres** - prometheus: - registry: "prom/prometheus" - tag: **v2.2.1** - name: **prom/prometheus** - nginx: - registry: "yugabyte/nginx" - tag: **1.17.4** - name: nginx - dnsmasq: - registry: "janeczku/go-dnsmasq/" - tag: **release-1.0.7** - name: **janeczku/go-dnsmasq - ``` - -1. Install Helm chart or specify the container registry in YugabyteDB Anywhere cloud provider, as follows: - - ```sh - helm install yugaware **.** -f values.yaml - ``` - - {{% /tab %}} - - {{% tab header="Airgapped" lang="airgapped" %}} - -Installing YugabyteDB Anywhere on Airgapped hosts, without access to any Internet traffic (inbound or outbound) requires the following: - -- Whitelisting endpoints: to install Replicated and YugabyteDB Anywhere on a host with no Internet connectivity, you have to first download the binaries on a computer that has Internet connectivity, and then copy the files over to the appropriate host. In case of restricted connectivity, the following endpoints have to be whitelisted to ensure that they are accessible from the host marked for installation: - `https://downloads.yugabyte.com` - `https://download.docker.com` - -- Ensuring that Docker Engine version 20.10.n is available. If it is not installed, you need to follow the procedure described in [Installing Docker in airgapped](https://www.replicated.com/docs/kb/supporting-your-customers/installing-docker-in-airgapped/). -- Ensuring that the following ports are open on the YugabyteDB Anywhere host: - - 8800 – HTTP access to the Replicated UI - - 80 or 443 – HTTP and HTTPS access to the YugabyteDB Anywhere UI, respectively - - 22 – SSH -- Ensuring that the attached disk storage (such as persistent EBS volumes on AWS) is 100 GB minimum. -- Having YugabyteDB Anywhere airgapped install package. Contact Yugabyte Support for more information. -- Signing the Yugabyte license agreement. Contact Yugabyte Support for more information. - - {{% /tab %}} - -{{< /tabpane >}} diff --git a/docs/content/preview/yugabyte-platform/manage-deployments/edit-config-flags.md b/docs/content/preview/yugabyte-platform/manage-deployments/edit-config-flags.md index f9b9fa7c77e4..7755ac4a684e 100644 --- a/docs/content/preview/yugabyte-platform/manage-deployments/edit-config-flags.md +++ b/docs/content/preview/yugabyte-platform/manage-deployments/edit-config-flags.md @@ -24,7 +24,7 @@ For more information about the available configuration flags, see the following: ## Enhanced Postgres Compatibility -If your cluster database version is v2024.2 or later, you can enable early access features for PostgreSQL compatibility by navigating to the universe and clicking **Actions > Edit Postgres Compatibility**. For more information, refer to [Enhanced PostgreSQL Compatibility Mode](../../../develop/postgresql-compatibility/). +If your cluster database version is v2024.2 or later, you can enable early access features for PostgreSQL compatibility by navigating to the universe and clicking **Actions > More > Edit Postgres Compatibility**. For more information, refer to [Enhanced PostgreSQL Compatibility Mode](../../../develop/postgresql-compatibility/). {{}} Enabling Enhanced Postgres Compatibility sets several flags, and overrides any settings you may have set for the same flags on the **G-Flags** tab. The **G-Flags** tab will however continue to display the setting that you customized. diff --git a/docs/content/preview/yugabyte-platform/manage-deployments/edit-helm-overrides.md b/docs/content/preview/yugabyte-platform/manage-deployments/edit-helm-overrides.md index d16391de1769..0e096f2bbf60 100644 --- a/docs/content/preview/yugabyte-platform/manage-deployments/edit-helm-overrides.md +++ b/docs/content/preview/yugabyte-platform/manage-deployments/edit-helm-overrides.md @@ -29,7 +29,7 @@ To edit Kubernetes overrides, do the following: If you are using Google Cloud Storage (GCS) for backups, you can enable GKE service account-based IAM (GCP IAM) so that Kubernetes universes can access GCS. -Before upgrading a universe for GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../back-up-restore-universes/configure-backup-storage/#gke-service-account-based-iam-gcp-iam). +Before upgrading a universe for GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/#gke-service-account-based-iam-gcp-iam). To upgrade an existing universe to use GCP IAM, do the following: diff --git a/docs/content/preview/yugabyte-platform/manage-deployments/edit-universe.md b/docs/content/preview/yugabyte-platform/manage-deployments/edit-universe.md index 91a41f25766f..52937554d345 100644 --- a/docs/content/preview/yugabyte-platform/manage-deployments/edit-universe.md +++ b/docs/content/preview/yugabyte-platform/manage-deployments/edit-universe.md @@ -63,7 +63,7 @@ While in Early Access, the feature is not available by default. To make connecti To enable or disable connection pooling on a universe: 1. Navigate to your universe. -1. Click **Actions > Edit Connection Pooling** to open the **Edit Connection Pooling** dialog. +1. Click **Actions > More > Edit Connection Pooling** to open the **Edit Connection Pooling** dialog. 1. Enable or disable the **Built-In Connection Pooling** option. 1. Optionally, you can change the YSQL API port (used by applications to connect to a universe) and the Internal YSQL Port, which is the port that the YugabyteDB internal PostgreSQL process listens on when connection pooling is enabled. It defaults to 6433 and is only required for local binding, not external connectivity. 1. Click **Apply Changes**. diff --git a/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-nodes.md b/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-nodes.md index 466b062b78fb..59675a150c8a 100644 --- a/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-nodes.md +++ b/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-nodes.md @@ -75,10 +75,10 @@ For each node in the universe, use the following general procedure: - Inline patching - You modify the Linux OS binaries in place (for example, using yum). - Boot disk replacement - You create a separate new VM with a virtual disk containing the new Linux OS patch or upgrade, disconnect the virtual disk from the new VM, and use it to replace the DB node's boot disk. This is typically used with a hypervisor or public cloud. - If the node uses assisted or fully manual provisioning, after replacing the boot disk, re-provision the node by following the [manual provisioning steps](../../configure-yugabyte-platform/on-premises-script/). - Ensure that the node retains its IP addresses after the patching of the Linux OS. Also ensure that the existing data volumes on the node remain untouched by the OS patching mechanism. +1. After replacing the boot disk, re-provision the node by following the steps in [Automatically provision on-premises nodes](../../prepare/server-nodes-software/software-on-prem/). + 1. Re-provision the node using the following API command: ```shell diff --git a/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software-prepare.md b/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software-prepare.md index 3c16b0a6f350..e651d3a88b19 100644 --- a/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software-prepare.md +++ b/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software-prepare.md @@ -16,6 +16,20 @@ type: docs If your universe is running on a [deprecated OS](../../../reference/configuration/operating-systems/), you need to update your OS before you can upgrade to the next major release of YugabyteDB. Refer to [Patch and upgrade the Linux operating system](../upgrade-nodes/). +## cron-based universes + +cron and root-level systemd have been deprecated in favor of user-level systemd with node agent for management of universe nodes. + +In particular, cron-based universes will no longer be supported in YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later. Before you will be able to upgrade to v2025.2 or later, all your universes must be using systemd. YugabyteDB Anywhere will automatically upgrade universes that use a cloud provider configuration to systemd. + +However, on-premises cron-based universes must be upgraded manually. To do this, in YugabyteDB Anywhere v2024.2.2 or later, navigate to the universe and choose **Actions>Upgrade to Systemd**. + +## Node agent + +YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later require universes have node agent running on their nodes. Before you will be able to upgrade to v2025.2 or later, all your universes must be using node agent. + +YugabyteDB Anywhere will attempt to automatically update universes. If it is unable to update a universe, make sure the universe nodes satisfy the [prerequisites](../../prepare/server-nodes-software/) and re-try the install by clicking **Actions>Install Node Agent**. + ## Review major changes in previous YugabyteDB releases {{< warning title="For YugabyteDB upgrades in YugabyteDB Anywhere" >}} diff --git a/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software.md b/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software.md index ea873f01cb0a..10dc575ae394 100644 --- a/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software.md +++ b/docs/content/preview/yugabyte-platform/manage-deployments/upgrade-software.md @@ -55,18 +55,18 @@ When performing a database upgrade, do the following: title="Prepare to upgrade" body="Review changes that may affect your automation." href="../upgrade-software-prepare/" - icon="/images/section_icons/quick_start/install.png">}} + icon="fa-thin fa-diamond-exclamation">}} {{}} + icon="fa-thin fa-download">}} {{}} + icon="fa-thin fa-up-from-bracket">}} {{}} diff --git a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md index 92f21e5954a8..69656544c584 100644 --- a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md +++ b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md @@ -48,6 +48,10 @@ type: docs For YugabyteDB Anywhere (YBA) to be able to deploy and manage YugabyteDB clusters, you need to provide YBA with privileges on your cloud infrastructure to create, delete, and modify VMs, mount and unmount disk volumes, and so on. The more permissions that you can provide, the more YBA can automate. +{{}} +If you can't provide YBA with the necessary permissions, you can still deploy to AWS using an [on-premises provider](../cloud-permissions-nodes/). +{{}} + ## AWS The following permissions are required for AWS. @@ -112,7 +116,7 @@ The following permissions are required for AWS. To grant the required access, you do one of the following: -- Create a service account with the permissions. You'll later provide YBA with the service account Access key ID and Secret Access Key when creating the provider. +- Create a service account with the permissions. You'll later provide YBA with the service account Access key ID and Secret Access Key when creating the AWS provider configuration. - Attach an IAM role with the required permissions to the EC2 VM instance where YugabyteDB Anywhere will be running. ### Service account diff --git a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md index 6d06c4bacda2..fa80c1c2eb56 100644 --- a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md +++ b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md @@ -48,6 +48,10 @@ type: docs For YugabyteDB Anywhere (YBA) to be able to deploy and manage YugabyteDB clusters, you need to provide YBA with privileges on your cloud infrastructure to create, delete, and modify VMs, mount and unmount disk volumes, and so on. The more permissions that you can provide, the more YBA can automate. +{{}} +If you can't provide YBA with the necessary permissions, you can still deploy to Azure using an [on-premises provider](../cloud-permissions-nodes/). +{{}} + ## Azure ### Application and resource group diff --git a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md index 942bff2a2e1d..b2190ed5d06f 100644 --- a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md +++ b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md @@ -48,6 +48,10 @@ type: docs For YugabyteDB Anywhere (YBA) to be able to deploy and manage YugabyteDB clusters, you need to provide YBA with privileges on your cloud infrastructure to create, delete, and modify VMs, mount and unmount disk volumes, and so on. The more permissions that you can provide, the more YBA can automate. +{{}} +If you can't provide YBA with the necessary permissions, you can still deploy to GCP using an [on-premises provider](../cloud-permissions-nodes/). +{{}} + ## GCP The [Compute Admin role](https://cloud.google.com/compute/docs/access/iam#compute.admin) permission is required on the GCP service account where you will deploy: @@ -63,7 +67,7 @@ To grant the required access, you must do the following: Then use one of the following methods: -- Obtain a file containing a JSON that describes the service account credentials. You will need to provide this file later to YBA. +- Obtain a file containing a JSON that describes the service account credentials. You will need to provide this file later when creating the GCP provider configuration. - [Attach the service account](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#using) to the GCP VM that will run YBA. | Save for later | To configure | @@ -82,3 +86,31 @@ If you will be using your own custom SSH keys, then ensure that you have them wh | Save for later | To configure | | :--- | :--- | | Custom SSH keys | [GCP provider configuration](../../../configure-yugabyte-platform/gcp/) | + +## GKE service account-based IAM (GCP IAM) + +Google Kubernetes Engine (GKE) uses a concept known as "Workload Identity" to provide a secure way to allow a Kubernetes service account ([KSA](https://kubernetes.io/docs/concepts/security/service-accounts/)) in your GKE cluster to act as an IAM service account so that your Kubernetes universes can access GCS for backups. + +In GKE, each pod can be associated with a KSA. The KSA is used to authenticate and authorize the pod to interact with other Google Cloud services. An IAM service account is a Google Cloud resource that allows applications to make authorized calls to Google Cloud APIs. + +Workload Identity links a KSA to an IAM account using annotations in the KSA. Pods that use the configured KSA automatically authenticate as the IAM service account when accessing Google Cloud APIs. + +By using Workload Identity, you avoid the need for manually managing service account keys or tokens in your applications running on GKE. This approach enhances security and simplifies the management of credentials. + +- To enable GCP IAM when installing YugabyteDB Anywhere on Kubernetes, refer to [Enable GKE service account-based IAM](../../../install-yugabyte-platform/install-software/kubernetes/#enable-gke-service-account-based-iam). + +- To enable GCP IAM during universe creation on Kubernetes, refer to [Configure Helm overrides](../../../create-deployments/create-universe-multi-zone-kubernetes/#helm-overrides). + +- To enable GCP IAM for Google Cloud Storage backup configuration with Kubernetes, refer to [Configure backup storage](../../../back-up-restore-universes/configure-backup-storage/#google-cloud-storage). + +- To upgrade an existing universe with GCP IAM, refer to [Upgrade universes for GKE service account-based IAM support](../../../manage-deployments/edit-helm-overrides/#upgrade-universes-for-gke-service-account-based-iam). + +**Prerequisites** + +- The GKE cluster hosting the pods should have Workload Identity enabled. The worker nodes of this GKE cluster should have the GKE metadata server enabled. + +- The IAM service account, which is used to annotate the KSA, should have sufficient permissions to read, write, list, and delete objects in GCS. + +- The KSA, which is annotated with the IAM service account, should be present in the same namespace where the pod resources for YugabyteDB Anywhere and YugabyteDB universes are expected. If you have multiple namespaces, each namespace should include the annotated KSA. + +For instructions on setting up Workload Identity, see [Use Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) in the GKE documentation. diff --git a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md index bf24c69fa248..64317cd59ba8 100644 --- a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md +++ b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md @@ -48,7 +48,7 @@ type: docs ## On-premises -Because you are creating the VMs manually, nodes for on-premises providers don't require any cloud permissions. +Because you are creating the VMs manually (on a private cloud, bare metal, or cloud provider), nodes for on-premises providers don't require any cloud permissions. With an on-premises provider, permissions against your infrastructure are generally not needed to deploy VMs, modify VMs, and so on. diff --git a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md index 39f7569d0bb1..cd53d9f2f2b8 100644 --- a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md +++ b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md @@ -91,7 +91,12 @@ The Access key ID and Secret Access Key for the service account are used when cr When backing up to and/or restoring from GCP GCS, YBA and database nodes must be able to write to and read from the GCS storage bucket. -To grant the required access, create a GCP service account with [IAM roles for cloud storage](https://cloud.google.com/storage/docs/access-control/iam-roles) with the following permissions: +To grant the required access, you can do one of the following: + +- Provide a GCP service account with [IAM roles for cloud storage](https://cloud.google.com/storage/docs/access-control/iam-roles) with the required permissions. +- Create the VM instances (for both the YBA VM and the DB nodes VMs) with an IAM role that has the required permissions. + +The following permissions are required: ```sh roles/storage.admin @@ -101,9 +106,7 @@ The credentials for this account (in JSON format) are used when creating a backu | Save for later | To configure | | :--- | :--- | -| Storage service account JSON credentials | [Storage configuration](../../../back-up-restore-universes/configure-backup-storage/#google-cloud-storage) for GCS | - -For database clusters deployed to GKE, you can alternatively assign the appropriate IAM roles to the YugabyteDB Anywhere VM and the YugabyteDB nodes. +| Service account JSON credentials | [Storage configuration](../../../back-up-restore-universes/configure-backup-storage/#google-cloud-storage) for GCS |
diff --git a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md index 7a0d82c319a5..9f0879db79d3 100644 --- a/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md +++ b/docs/content/preview/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md @@ -63,6 +63,8 @@ Linux OS root permissions are required for the server, see [Servers for YBA](../ When installing YugabyteDB Anywhere on an AWS VM, no cloud permissions are required. +If you attach an IAM role with the appropriate permissions to the EC2 VM instance where YugabyteDB Anywhere will be running, you can use the YugabyteDB Anywhere instance's IAM role when setting up node servers and S3 backup. Refer to [Permissions to deploy nodes](../cloud-permissions-nodes-aws/) and [Permissions to back up and restore](../cloud-permissions-storage/). + Linux OS root permissions are required for the server, see [Servers for YBA](../../server-yba/).
@@ -71,6 +73,8 @@ Linux OS root permissions are required for the server, see [Servers for YBA](../ When installing YugabyteDB Anywhere on a GCP VM, no cloud permissions are required. +If you attach a service account with appropriate permissions to the GCP VM where YugabyteDB Anywhere will be running, you can use the YugabyteDB Anywhere instance's role when setting up node servers and S3 backup. Refer to [Permissions to deploy nodes](../cloud-permissions-nodes-gcp/) and [Permissions to back up and restore](../cloud-permissions-storage/). + Linux OS root permissions are required for the server, see [Servers for YBA](../../server-yba/). diff --git a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md index 9d2f9526e435..1198d61e9daa 100644 --- a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md +++ b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/_index.md @@ -51,7 +51,7 @@ YugabyteDB Anywhere requires the following additional software to be pre-install #### Python -Install Python 3.8 on the nodes. (If you are using [Legacy on-premises provisioning](software-on-prem-legacy/), Python 3.5-3.9 is supported, and 3.6 is recommended.) +Install Python 3.6-3.11 on the nodes. (If you are using [Legacy on-premises provisioning](software-on-prem-legacy/), Python 3.5-3.9 is supported, and 3.6 is recommended.) Install the Python SELinux package corresponding to your version of Python. You can use pip to do this. Ensure the version of pip matches the version of Python. diff --git a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md index a6686d51d0e1..87fbda62dd15 100644 --- a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md +++ b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md @@ -12,7 +12,7 @@ menu: type: docs --- -When deploying database clusters using a public cloud provider (AWS, GCP, or Azure), YugabyteDB Anywhere (YBA) creates cloud VMs directly. +When deploying database clusters using a public cloud provider configuration (AWS, GCP, or Azure), YugabyteDB Anywhere (YBA) creates cloud VMs directly. You have two options for provisioning the operating system: diff --git a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md index a6e38e6c88fe..2dd9fef48178 100644 --- a/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md +++ b/docs/content/preview/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md @@ -72,6 +72,12 @@ helm repo add prometheus-community https://prometheus-community.github.io/helm-c helm install -n kube-system --version 5.0.0 kube-state-metrics prometheus-community/kube-state-metrics ``` +## Install cert-manager + +You can use [cert-manager](https://cert-manager.io/) to manage certificates for your cluster. To use cert-manager to manage universe certificates, ensure that it is running on your Kubernetes cluster before you create your [Kubernetes provider configuration](../../../configure-yugabyte-platform/kubernetes/). + +For more information, refer to [Add certificates](../../../security/enable-encryption-in-transit/add-certificate-kubernetes/). + ## Pull and push YugabyteDB Docker images to private container registry Due to security concerns, some Kubernetes environments use internal container registries such as Harbor and Nexus. In this type of setup, YugabyteDB deployment must be able to pull images from and push images to a private registry. diff --git a/docs/content/preview/yugabyte-platform/security/authorization-platform.md b/docs/content/preview/yugabyte-platform/security/authorization-platform.md index 15898d967cfe..0cd666151f34 100644 --- a/docs/content/preview/yugabyte-platform/security/authorization-platform.md +++ b/docs/content/preview/yugabyte-platform/security/authorization-platform.md @@ -40,7 +40,7 @@ Enter a password for the default database admin user (`yugabyte` for YSQL, and ` Save your password in a secure location. Your password is not stored in YugabyteDB Anywhere, and if you lose it, you won't be able to access the database. {{< /warning >}} -You can also enable and disable the endpoints and authorization, as well as rotate your admin user password, after deployment. Navigate to your universe, click **Actions**, and choose **Edit YSQL Configuration** or **Edit YCQL Configuration**. +You can also enable and disable the endpoints and authorization, as well as rotate your admin user password, after deployment. Navigate to your universe, click **Actions**, and choose **More** and **Edit YSQL Configuration** or **Edit YCQL Configuration**. To disable YSQL or YCQL authorization or rotate the password, you will need your `yugabyte` or `cassandra` database user password. diff --git a/docs/content/preview/yugabyte-platform/security/enable-encryption-at-rest.md b/docs/content/preview/yugabyte-platform/security/enable-encryption-at-rest.md index 9ee91ae61dbe..1373764ffac0 100644 --- a/docs/content/preview/yugabyte-platform/security/enable-encryption-at-rest.md +++ b/docs/content/preview/yugabyte-platform/security/enable-encryption-at-rest.md @@ -49,7 +49,7 @@ You enable encryption at rest during universe creation as follows: You enable encryption at rest on an existing universe as follows: -1. Navigate to your universe, click **Actions**, and choose **Edit Security > Encryption at Rest**. +1. Navigate to your universe, click **Actions**, and choose **More > Edit Security > Encryption at Rest**. 1. In the **Manage Encryption at Rest** dialog, toggle **Enable Encryption at Rest for this Universe**. @@ -84,7 +84,7 @@ If your configuration includes AWS KMS, the following occurs: after the universe You can disable encryption at rest for a universe as follows: 1. Navigate to the universe for which you want to rotate the keys. -2. Select **Actions > Edit Security > Encryption-at-Rest**. +2. Select **Actions > More > Edit Security > Encryption-at-Rest**. 3. In the **Manage Encryption at Rest** dialog, toggle **Enable Encryption at Rest for this Universe** and click **Apply**. To verify that encryption at rest is disabled, check the current cluster configuration for each node to see that it contains `encryption_enabled: false`. @@ -112,7 +112,7 @@ YugabyteDB Anywhere uses a KMS configuration to house the information about the You can change KMS configurations, and consequently the master keys used to encrypt the universe key, at any time. To accomplish this, do the following: 1. [Create a new KMS configuration](../create-kms-config/aws-kms/) with the new master key to use. -1. After the KMS configuration is successfully created, go to the encryption at rest-enabled universe, and select **Actions > Edit Security > Encryption at Rest**. +1. After the KMS configuration is successfully created, go to the encryption at rest-enabled universe, and select **Actions > More > Edit Security > Encryption at Rest**. 1. In the **Manage Encryption at Rest** dialog, choose the new KMS configuration from the **Key Management Service Config** list. 1. Click **Apply** to use the new KMS configuration and master key for envelope encryption. @@ -141,5 +141,5 @@ Once encryption is enabled with a new universe key, only new data is encrypted w To rotate the universe keys, perform the following: 1. Navigate to the universe for which you want to rotate the keys. -2. Select **Actions > Edit Security > Encryption at Rest**. +2. Select **Actions > More > Edit Security > Encryption at Rest**. 3. Select **Rotate Universe key** and click **Apply**. diff --git a/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/_index.md b/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/_index.md index 84d24f29b074..c14b5a213e4c 100644 --- a/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/_index.md +++ b/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/_index.md @@ -51,7 +51,7 @@ You enable Node-to-Node and Client-to-Node encryption in transit when you [creat You can also enable and disable encryption in transit for an existing universe as follows: 1. Navigate to your universe. -1. Click **Actions > Edit Security > Encryption in-Transit** to open the **Manage encryption in transit** dialog. +1. Click **Actions > More > Edit Security > Encryption in-Transit** to open the **Manage encryption in transit** dialog. 1. Enable or disable the **Enable encryption in transit for this Universe** option. 1. Click **Apply**. diff --git a/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md b/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md index 4d35866d1d09..6e1138626259 100644 --- a/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md +++ b/docs/content/preview/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md @@ -50,13 +50,24 @@ Add TLS certificates issued by the cert-manager as follows: ## Configure the provider -After the certificate is added to YugabyteDB Anywhere, configure the Kubernetes provider configuration by following instructions provided in [Configure region and zones](../../../configure-yugabyte-platform/kubernetes/#configure-region-and-zones). +After the certificate is added to YugabyteDB Anywhere, set up the Kubernetes provider configuration by following the instructions in [Configure region and zones](../../../configure-yugabyte-platform/kubernetes/#configure-region-and-zones). -In the **Add new region** dialog shown in the following illustration, you can specify the Issuer kind, Issuer name, and optionally the Issuer group for each zone. +When adding a region, you can specify the Issuer kind, Issuer name, and optionally the Issuer group for each zone. ![Add new region](/images/yp/security/kubernetes-cert-manager-add-region-2024-2-2.png) -If you need the certificate to include the common name, set the **Common Name Required for Certificates** Universe Runtime Configuration option (config key `yb.tls.cert_manager.common_name_required`) to true. Refer to [Manage runtime configuration settings](../../../administer-yugabyte-platform/manage-runtime-config/). When configured, YugabyteDB Anywhere sets the common name to the name of the service created for the pod. +### Including the common name + +If your certificate issuer (for example, for `aws-privateca-issuer`) requires the certificate to include the common name, set the following [override](../../../create-deployments/create-universe-multi-zone-kubernetes/#helm-overrides) for the provider region: + +```yml +tls: + certManager: + certificates: + commonNameRequired: true +``` + +When configured, YugabyteDB Anywhere sets the common name to the name of the service created for the pod, and adds common name to the certificate request sent to cert-manager. ## Troubleshoot diff --git a/docs/content/preview/yugabyte-platform/upgrade/_index.md b/docs/content/preview/yugabyte-platform/upgrade/_index.md index 55501ce7d124..df85d609a868 100644 --- a/docs/content/preview/yugabyte-platform/upgrade/_index.md +++ b/docs/content/preview/yugabyte-platform/upgrade/_index.md @@ -13,43 +13,26 @@ type: indexpage Keep YugabyteDB Anywhere up to date for the latest fixes and improvements, and to be able to [upgrade your universes](../manage-deployments/upgrade-software/) to the latest version of YugabyteDB. You cannot upgrade a universe to a version of YugabyteDB that is later than the version of YugabyteDB Anywhere. -For information on which versions of YugabyteDB are compatible with your version of YugabyteDB Anywhere, see [YugabyteDB Anywhere releases](/preview/releases/yba-releases/). - You can upgrade YBA using the following methods: | Method | Using | Use If | | :--- | :--- | :--- | -| [YBA Installer](./upgrade-yp-installer/) | yba-ctl CLI | Your installation already uses YBA Installer. | -| [Replicated](./upgrade-yp-replicated/) | Replicated Admin Console | Your installation already uses Replicated.
Before you can migrate from a Replicated installation, upgrade to v2.20.1.3 or later using Replicated.
Note that you must migrate from Replicated to YBA Installer if you are upgrading YugabyteDB Anywhere to v2024.1 or later. | -| [Kubernetes](./upgrade-yp-kubernetes/) | Helm chart | You're deploying in Kubernetes. | - -If you are upgrading a YugabyteDB Anywhere installation with high availability enabled, follow the instructions provided in [Upgrade instances](../administer-yugabyte-platform/high-availability/#upgrade-instances). - -If you have upgraded YugabyteDB Anywhere to version 2.12 or later and [xCluster replication](../../explore/going-beyond-sql/asynchronous-replication-ysql/) for your universe was set up via yb-admin instead of the UI, follow the instructions provided in [Synchronize replication after upgrade](upgrade-yp-xcluster-ybadmin/). - -{{< note title="Upgrading YugabyteDB Anywhere on deprecated operating systems" >}} - -If you are running YugabyteDB Anywhere on a [deprecated OS](../../reference/configuration/operating-systems/), you will need to update your OS before you can upgrade YugabyteDB Anywhere to the next major release. - -{{< /note >}} -{{< note title="Upgrading YugabyteDB Anywhere on systems that have older versions of Python" >}} - -YugabyteDB Anywhere v25.1 and later requires Python v3.10-3.12. If you are running YugabyteDB Anywhere on a system with Python earlier than 3.10, you will need to update Python on your system before you can upgrade YugabyteDB Anywhere to v25.1 or later. - -{{< /note >}} +| YBA Installer | yba-ctl CLI | Your installation already uses YBA Installer. | +| Replicated | Replicated Admin Console | Your installation already uses Replicated.
Before you can migrate from a Replicated installation, upgrade to v2.20.1.3 or later using Replicated.
Note that you must migrate from Replicated to YBA Installer if you are upgrading YugabyteDB Anywhere to v2024.1 or later. | +| Kubernetes | Helm chart | You're deploying in Kubernetes. | {{}} + {{}} + {{}} - - {{}} + icon="fa-thin fa-up-from-bracket">}} {{}} diff --git a/docs/content/preview/yugabyte-platform/upgrade/prepare-to-upgrade.md b/docs/content/preview/yugabyte-platform/upgrade/prepare-to-upgrade.md new file mode 100644 index 000000000000..930d9bb53353 --- /dev/null +++ b/docs/content/preview/yugabyte-platform/upgrade/prepare-to-upgrade.md @@ -0,0 +1,46 @@ +--- +title: Prepare to upgrade YugabyteDB Anywhere +headerTitle: Prepare to upgrade YugabyteDB Anywhere +linkTitle: Prepare to upgrade +description: Review changes that may affect installation +menu: + preview_yugabyte-platform: + identifier: prepare-to-upgrade + parent: upgrade + weight: 50 +type: docs +--- + +For information on which versions of YugabyteDB are compatible with your version of YugabyteDB Anywhere, see [YugabyteDB Anywhere releases](/preview/releases/yba-releases/). + +## High availability + +If you are upgrading a YugabyteDB Anywhere installation with high availability enabled, follow the instructions provided in [Upgrade instances](../../administer-yugabyte-platform/high-availability/#upgrade-instances). + +## Operating system + +If you are running YugabyteDB Anywhere on a [deprecated OS](../../../reference/configuration/operating-systems/), you need to update your OS before you can upgrade YugabyteDB Anywhere to the next major release. + +## Python + +YugabyteDB Anywhere v25.1 and later requires Python v3.10-3.12. If you are running YugabyteDB Anywhere on a system with Python earlier than 3.10, you will need to update Python on your system before you can upgrade YugabyteDB Anywhere to v25.1 or later. + +## cron-based universes + +cron and root-level systemd have been deprecated in favor of user-level systemd with node agent for management of universe nodes. + +In particular, cron-based universes will no longer be supported in YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later. Before you will be able to upgrade to v2025.2 or later, all your universes must be using systemd. YugabyteDB Anywhere will automatically upgrade universes that use a cloud provider configuration to systemd. + +However, on-premises cron-based universes must be upgraded manually. To do this, in YugabyteDB Anywhere v2024.2.2 or later, navigate to the universe and choose **Actions>Upgrade to Systemd**. + +## Node provisioning + +[Legacy provisioning](../../prepare/server-nodes-software/software-on-prem-legacy/) workflows have been deprecated. Provision nodes for on-premises universes using the `node-agent-provision.sh` script. Refer to [Automatically provision on-premises nodes](../../prepare/server-nodes-software/software-on-prem/). + +YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later require universes have node agent running on their nodes. Before you will be able to upgrade to v2025.2 or later, all your universes must be using node agent. + +To upgrade a universe to node agent, first make sure the universe is not cron-based and if necessary [update the universe to systemd](#cron-based-universes). Then navigate to the universe and click **Actions>Install Node Agent**. If installation fails on a node, make sure the node satisfies the [prerequisites](../../prepare/server-nodes-software/) and re-try the install. + +## xCluster + +If you have upgraded YugabyteDB Anywhere to version 2.12 or later and [xCluster replication](../../../explore/going-beyond-sql/asynchronous-replication-ysql/) for your universe was set up via yb-admin instead of the UI, follow the instructions provided in [Synchronize replication after upgrade](../upgrade-yp-xcluster-ybadmin/). diff --git a/docs/content/preview/yugabyte-voyager/known-issues/postgresql.md b/docs/content/preview/yugabyte-voyager/known-issues/postgresql.md index b570d90fecca..9e27e3feb7f3 100644 --- a/docs/content/preview/yugabyte-voyager/known-issues/postgresql.md +++ b/docs/content/preview/yugabyte-voyager/known-issues/postgresql.md @@ -9,57 +9,46 @@ menu: parent: known-issues weight: 101 type: docs -rightNav: - hideH3: true --- -Review limitations and implement suggested workarounds to successfully migrate data from PostgreSQL to YugabyteDB. - -## Contents - -- [Adding primary key to a partitioned table results in an error](#adding-primary-key-to-a-partitioned-table-results-in-an-error) -- [Index creation on partitions fail for some YugabyteDB builds](#index-creation-on-partitions-fail-for-some-yugabytedb-builds) -- [Creation of certain views in the rule.sql file](#creation-of-certain-views-in-the-rule-sql-file) -- [Create or alter conversion is not supported](#create-or-alter-conversion-is-not-supported) -- [GENERATED ALWAYS AS STORED type column is not supported](#generated-always-as-stored-type-column-is-not-supported) -- [Unsupported ALTER TABLE DDL variants in source schema](#unsupported-alter-table-ddl-variants-in-source-schema) -- [Storage parameters on indexes or constraints in the source PostgreSQL](#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql) -- [Foreign table in the source database requires SERVER and USER MAPPING](#foreign-table-in-the-source-database-requires-server-and-user-mapping) -- [Exclusion constraints is not supported](#exclusion-constraints-is-not-supported) -- [PostgreSQL extensions are not supported by target YugabyteDB](#postgresql-extensions-are-not-supported-by-target-yugabytedb) -- [Deferrable constraint on constraints other than foreign keys is not supported](#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported) -- [Data ingestion on XML data type is not supported](#data-ingestion-on-xml-data-type-is-not-supported) -- [GiST, BRIN, and SPGIST index types are not supported](#gist-brin-and-spgist-index-types-are-not-supported) -- [Indexes on some complex data types are not supported](#indexes-on-some-complex-data-types-are-not-supported) -- [Constraint trigger is not supported](#constraint-trigger-is-not-supported) -- [Table inheritance is not supported](#table-inheritance-is-not-supported) -- [%Type syntax is not supported](#type-syntax-is-not-supported) -- [GIN indexes on multiple columns are not supported](#gin-indexes-on-multiple-columns-are-not-supported) -- [Policies on users in source require manual user creation](#policies-on-users-in-source-require-manual-user-creation) -- [VIEW WITH CHECK OPTION is not supported](#view-with-check-option-is-not-supported) -- [UNLOGGED table is not supported](#unlogged-table-is-not-supported) -- [Hash-sharding with indexes on the timestamp/date columns](#hash-sharding-with-indexes-on-the-timestamp-date-columns) -- [Exporting data with names for tables/functions/procedures using special characters/whitespaces fails](#exporting-data-with-names-for-tables-functions-procedures-using-special-characters-whitespaces-fails) -- [Importing with case-sensitive schema names](#importing-with-case-sensitive-schema-names) -- [Unsupported datatypes by YugabyteDB](#unsupported-datatypes-by-yugabytedb) -- [Unsupported datatypes by Voyager during live migration](#unsupported-datatypes-by-voyager-during-live-migration) -- [XID functions is not supported](#xid-functions-is-not-supported) -- [REFERENCING clause for triggers](#referencing-clause-for-triggers) -- [BEFORE ROW triggers on partitioned tables](#before-row-triggers-on-partitioned-tables) -- [Advisory locks is not yet implemented](#advisory-locks-is-not-yet-implemented) -- [System columns is not yet supported](#system-columns-is-not-yet-supported) -- [XML functions is not yet supported](#xml-functions-is-not-yet-supported) -- [Large Objects and its functions are currently not supported](#large-objects-and-its-functions-are-currently-not-supported) -- [PostgreSQL 12 and later features](#postgresql-12-and-later-features) -- [MERGE command](#merge-command) -- [JSONB subscripting](#jsonb-subscripting) -- [Events Listen / Notify](#events-listen-notify) -- [Two-Phase Commit](#two-phase-commit) -- [DDL operations within the Transaction](#ddl-operations-within-the-transaction) -- [Hotspots with range-sharded timestamp/date indexes](#hotspots-with-range-sharded-timestamp-date-indexes) -- [Redundant indexes](#redundant-indexes) - -### Adding primary key to a partitioned table results in an error +When migrating data from PostgreSQL to YugabyteDB, you must address specific limitations and implement necessary workarounds. Some features, like table inheritance, certain DDL operations, and unique constraint types, are unsupported. You will also encounter compatibility issues with data types and functions. This page helps you navigate these challenges by offering advice on schema adjustments, handling unsupported features, and optimizing performance for a successful migration. + +## Data definition + +### Tables + +#### Table inheritance is not supported + +**GitHub**: [Issue #5956](https://github.com/yugabyte/yugabyte-db/issues/5956) + +**Description**: If you have table inheritance in the source database, it will error out in the target as it is not currently supported in YugabyteDB: + +```output +ERROR: INHERITS not supported yet +``` + +**Workaround**: Currently, there is no workaround. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE TABLE public.cities ( + name text, + population real, + elevation integer +); + +CREATE TABLE public.capitals ( + state character(2) NOT NULL +) +INHERITS (public.cities); +``` + +--- + +#### Adding primary key to a partitioned table results in an error **GitHub**: [Issue #612](https://github.com/yugabyte/yb-voyager/issues/612) @@ -104,159 +93,7 @@ PARTITION BY LIST (region); --- -### Index creation on partitions fail for some YugabyteDB builds - -**GitHub**: [Issue #14529](https://github.com/yugabyte/yugabyte-db/issues/14529) - -**Description**: If you have a partitioned table with indexes on it, the migration will fail with an error for YugabyteDB `2.15` or `2.16` due to a regression. - -Note that this is fixed in release [2.17.1.0](../../../releases/ybdb-releases/end-of-life/v2.17/#v2.17.1.0). - -**Workaround**: N/A - -**Example** - -An example schema on the source database is as follows: - -```sql -DROP TABLE IF EXISTS list_part; - -CREATE TABLE list_part (id INTEGER, status TEXT, arr NUMERIC) PARTITION BY LIST(status); - -CREATE TABLE list_active PARTITION OF list_part FOR VALUES IN ('ACTIVE'); - -CREATE TABLE list_archived PARTITION OF list_part FOR VALUES IN ('EXPIRED'); - -CREATE TABLE list_others PARTITION OF list_part DEFAULT; - -INSERT INTO list_part VALUES (1,'ACTIVE',100), (2,'RECURRING',20), (3,'EXPIRED',38), (4,'REACTIVATED',144), (5,'ACTIVE',50); - -CREATE INDEX list_ind ON list_part(status); -``` - ---- - -### Creation of certain views in the rule.sql file - -**GitHub**: [Issue #770](https://github.com/yugabyte/yb-voyager/issues/770) - -**Description**: There may be few cases where certain exported views come under the `rule.sql` file and the `view.sql` file might contain a dummy view definition. This `pg_dump` behaviour may be due to how PostgreSQL handles views internally (via rules). - -{{< note title ="Note" >}} -This does not affect the migration as YugabyteDB Voyager takes care of the DDL creation sequence internally. -{{< /note >}} - -**Workaround**: Not required - -**Example** - -An example schema on the source database is as follows: - -```sql -CREATE TABLE foo(n1 int PRIMARY KEY, n2 int); -CREATE VIEW v1 AS - SELECT n1,n2 - FROM foo - GROUP BY n1; -``` - -The exported schema for `view.sql` is as follows: - -```sql -CREATE VIEW public.v1 AS - SELECT - NULL::integer AS n1, - NULL::integer AS n2; -``` - -The exported schema for `rule.sql` is as follows: - -```sql -CREATE OR REPLACE VIEW public.v1 AS - SELECT foo.n1,foo.n2 - FROM public.foo - GROUP BY foo.n1; -``` - -### Create or alter conversion is not supported - -**GitHub**: [Issue #10866](https://github.com/yugabyte/yugabyte-db/issues/10866) - -**Description**: If you have conversions in your PostgreSQL database, they will error out as follows as conversions are currently not supported in the target YugabyteDB: - -```output -ERROR: CREATE CONVERSION not supported yet -``` - -**Workaround**: Remove the conversions from the exported schema and modify the applications to not use these conversions before pointing them to YugabyteDB. - -**Example** - -An example schema on the source database is as follows: - -```sql -CREATE CONVERSION public.my_latin1_to_utf8 FOR 'LATIN1' TO 'UTF8' FROM public.latin1_to_utf8; - -CREATE FUNCTION public.latin1_to_utf8(src_encoding integer, dest_encoding integer, src bytea, dest bytea, len integer) RETURNS integer - LANGUAGE c - AS '/usr/lib/postgresql/12/lib/latin1_to_utf8.so', 'my_latin1_to_utf8'; -``` - ---- - -### GENERATED ALWAYS AS STORED type column is not supported - -**GitHub**: [Issue #10695](https://github.com/yugabyte/yugabyte-db/issues/10695) - -**Description**: If you have tables in the source database with columns of GENERATED ALWAYS AS STORED type (which means the data of this column is derived from some other columns of the table), it will throw a syntax error in YugabyteDB as follows: - -```output -ERROR: syntax error at or near "(" (SQLSTATE 42601) -``` - -**Workaround**: Create a trigger on this table that updates its value on any INSERT/UPDATE operation, and set a default value for this column. This provides functionality similar to PostgreSQL's GENERATED ALWAYS AS STORED columns using a trigger. - -**Fixed In**: {{}}. - -**Example** - -An example schema on the source database is as follows: - -```sql -CREATE TABLE people ( - name text, - height_cm numeric, - height_in numeric GENERATED ALWAYS AS (height_cm / 2.54) STORED -); -``` - -Suggested change to the schema is as follows: - -```sql -ALTER TABLE people - ALTER COLUMN height_in SET DEFAULT -1; - -CREATE OR REPLACE FUNCTION compute_height_in() RETURNS TRIGGER AS $$ -BEGIN - IF NEW.height_in IS DISTINCT FROM -1 THEN - RAISE EXCEPTION 'cannot insert in column "height_in"'; - ELSE - NEW.height_in := NEW.height_cm / 2.54; - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER compute_height_in_trigger - BEFORE INSERT OR UPDATE ON people - FOR EACH ROW - EXECUTE FUNCTION compute_height_in(); -``` - ---- - -### Unsupported ALTER TABLE DDL variants in source schema +#### Unsupported ALTER TABLE DDL variants in source schema **GitHub**: [Issue #1124](https://github.com/yugabyte/yugabyte-db/issues/1124) @@ -307,7 +144,43 @@ ALTER TABLE public.example --- -### Storage parameters on indexes or constraints in the source PostgreSQL +#### UNLOGGED table is not supported + +**GitHub**: [Issue #1129](https://github.com/yugabyte/yugabyte-db/issues/1129) + +**Description**: If there are UNLOGGED tables in the source schema, they will error out during the import schema with the following error as it is not supported in target YugabyteDB. + +```output +ERROR: UNLOGGED database object not supported yet +``` + +**Workaround**: Convert it to a LOGGED table. + +**Fixed In**: {{}} + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE UNLOGGED TABLE tbl_unlogged ( + id int, + val text +); +``` + +Suggested change to the schema is as follows: + +```sql +CREATE TABLE tbl_unlogged ( + id int, + val text +); +``` + +--- + +#### Storage parameters on indexes or constraints in the source PostgreSQL **GitHub**: [Issue #23467](https://github.com/yugabyte/yugabyte-db/issues/23467) @@ -359,93 +232,37 @@ CREATE INDEX abc --- -### Foreign table in the source database requires SERVER and USER MAPPING +### Constraints -**GitHub**: [Issue #1627](https://github.com/yugabyte/yb-voyager/issues/1627) +#### Exclusion constraints is not supported -**Description**: If you have foreign tables in the schema, during the export schema phase the exported schema does not include the SERVER and USER MAPPING objects. You must manually create these objects before importing schema, otherwise FOREIGN TABLE creation fails with the following error: +**GitHub**: [Issue #3944](https://github.com/yugabyte/yugabyte-db/issues/3944) + +**Description**: If you have exclusion constraints on the tables in the source database, those will error out during import schema to the target with the following error: ```output -ERROR: server "remote_server" does not exist (SQLSTATE 42704) +ERROR: EXCLUDE constraint not supported yet (SQLSTATE 0A000) ``` -**Workaround**: Create the SERVER and its USER MAPPING manually on the target YugabyteDB database. +**Workaround**: To implement exclusion constraints, follow this workaround: + +1. Create a trigger: Set up a TRIGGER for INSERT or UPDATE operations on the table. This trigger will use the specified expression to search the relevant columns for any potential violations. + +1. Add indexes: Create an INDEX on the columns involved in the expression. This helps ensure that the search operation performed by the trigger does not negatively impact performance. + +Note that creating an index on the relevant columns _is essential_ for maintaining performance. Without an index, the trigger's search operation can degrade performance. + +**Caveats**: Note that there are specific issues related to creating indexes on certain data types using certain index methods in YugabyteDB. Depending on the data types or methods involved, additional workarounds may be required to ensure optimal performance for these constraints. **Example** An example schema on the source database is as follows: ```sql -CREATE EXTENSION postgres_fdw; - -CREATE SERVER remote_server - FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (host '127.0.0.1', port '5432', dbname 'postgres'); - -CREATE FOREIGN TABLE foreign_table ( - id INT, - name TEXT, - data JSONB -) -SERVER remote_server -OPTIONS ( - schema_name 'public', - table_name 'remote_table' -); - -CREATE USER MAPPING FOR postgres -SERVER remote_server -OPTIONS (user 'postgres', password 'XXX'); -``` - -Exported schema only has the following: - -```sql -CREATE FOREIGN TABLE foreign_table ( - id INT, - name TEXT, - data JSONB -) -SERVER remote_server -OPTIONS ( - schema_name 'public', - table_name 'remote_table' -); -``` - -Suggested change is to manually create the SERVER and USER MAPPING on the target YugabyteDB. - ---- - -### Exclusion constraints is not supported - -**GitHub**: [Issue #3944](https://github.com/yugabyte/yugabyte-db/issues/3944) - -**Description**: If you have exclusion constraints on the tables in the source database, those will error out during import schema to the target with the following error: - -```output -ERROR: EXCLUDE constraint not supported yet (SQLSTATE 0A000) -``` - -**Workaround**: To implement exclusion constraints, follow this workaround: - -1. Create a trigger: Set up a TRIGGER for INSERT or UPDATE operations on the table. This trigger will use the specified expression to search the relevant columns for any potential violations. - -1. Add indexes: Create an INDEX on the columns involved in the expression. This helps ensure that the search operation performed by the trigger does not negatively impact performance. - -Note that creating an index on the relevant columns _is essential_ for maintaining performance. Without an index, the trigger's search operation can degrade performance. - -**Caveats**: Note that there are specific issues related to creating indexes on certain data types using certain index methods in YugabyteDB. Depending on the data types or methods involved, additional workarounds may be required to ensure optimal performance for these constraints. - -**Example** - -An example schema on the source database is as follows: - -```sql -CREATE TABLE public.meeting ( - id integer NOT NULL, - room_id integer NOT NULL, - time_range tsrange NOT NULL +CREATE TABLE public.meeting ( + id integer NOT NULL, + room_id integer NOT NULL, + time_range tsrange NOT NULL ); ALTER TABLE ONLY public.meeting @@ -480,29 +297,7 @@ CREATE INDEX idx_no_time_overlap on public.meeting USING gist(room_id,time_range --- -### PostgreSQL extensions are not supported by target YugabyteDB - -**Documentation**: [PostgreSQL extensions](../../../explore/ysql-language-features/pg-extensions/) - -**Description**: If you have any PostgreSQL extension that is not supported by the target YugabyteDB, they result in the following errors during import schema: - -```output -ERROR: could not open extension control file "/home/centos/yb/postgres/share/extension/.control": No such file or directory -``` - -**Workaround**: Remove the extension from the exported schema. - -**Example** - -An example schema on the source database is as follows: - -```sql -CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public; -``` - ---- - -### Deferrable constraint on constraints other than foreign keys is not supported +#### Deferrable constraint on constraints other than foreign keys is not supported **GitHub**: [Issue #1709](https://github.com/yugabyte/yugabyte-db/issues/1709) @@ -530,235 +325,503 @@ ALTER TABLE ONLY public.users --- -### Data ingestion on XML data type is not supported +### Columns -**GitHub**: [Issue #1043](https://github.com/yugabyte/yugabyte-db/issues/1043) +#### GENERATED ALWAYS AS STORED type column is not supported -**Description**: If you have XML datatype in the source database, it errors out in the import data to target YugabyteDB phase as data ingestion is not allowed on this data type: +**GitHub**: [Issue #10695](https://github.com/yugabyte/yugabyte-db/issues/10695) + +**Description**: If you have tables in the source database with columns of GENERATED ALWAYS AS STORED type (which means the data of this column is derived from some other columns of the table), it will throw a syntax error in YugabyteDB as follows: ```output - ERROR: unsupported XML feature (SQLSTATE 0A000) +ERROR: syntax error at or near "(" (SQLSTATE 42601) ``` -**Workaround**: To migrate the data, a workaround is to convert the type to text and import the data to target; to read the data on the target YugabyteDB, you need to create some user defined functions similar to XML functions. +**Workaround**: Create a trigger on this table that updates its value on any INSERT/UPDATE operation, and set a default value for this column. This provides functionality similar to PostgreSQL's GENERATED ALWAYS AS STORED columns using a trigger. + +**Fixed In**: {{}}. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE xml_example ( - id integer, - data xml +CREATE TABLE people ( + name text, + height_cm numeric, + height_in numeric GENERATED ALWAYS AS (height_cm / 2.54) STORED ); ``` ---- - -### GiST, BRIN, and SPGIST index types are not supported +Suggested change to the schema is as follows: -**GitHub**: [Issue #1337](https://github.com/yugabyte/yugabyte-db/issues/1337) +```sql +ALTER TABLE people + ALTER COLUMN height_in SET DEFAULT -1; -**Description**: If you have GiST, BRIN, and SPGIST indexes on the source database, it errors out in the import schema phase with the following error: +CREATE OR REPLACE FUNCTION compute_height_in() RETURNS TRIGGER AS $$ +BEGIN + IF NEW.height_in IS DISTINCT FROM -1 THEN + RAISE EXCEPTION 'cannot insert in column "height_in"'; + ELSE + NEW.height_in := NEW.height_cm / 2.54; + END IF; -```output - ERROR: index method "gist" not supported yet (SQLSTATE XX000) + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER compute_height_in_trigger + BEFORE INSERT OR UPDATE ON people + FOR EACH ROW + EXECUTE FUNCTION compute_height_in(); ``` -**Workaround**: Currently, there is no workaround; remove the index from the exported schema. +--- -**Example** +#### System columns is not yet supported -An example schema on the source database is as follows: +**GitHub**: [Issue #24843](https://github.com/yugabyte/yugabyte-db/issues/24843) + +**Description**: System columns, including `xmin`, `xmax`, `cmin`, `cmax`, and `ctid`, are not available in YugabyteDB. Queries or applications referencing these columns will fail as per the following example: ```sql -CREATE INDEX gist_idx ON public.ts_query_table USING gist (query); +yugabyte=# SELECT xmin, xmax FROM employees where id = 100; ``` ---- - -### Indexes on some complex data types are not supported - -**GitHub**: [Issue #9698](https://github.com/yugabyte/yugabyte-db/issues/9698), [Issue #23829](https://github.com/yugabyte/yugabyte-db/issues/23829), [Issue #17017](https://github.com/yugabyte/yugabyte-db/issues/17017) - -**Description**: If you have indexes on some complex types such as TSQUERY, TSVECTOR, JSONB, ARRAYs, INET, UDTs, citext, and so on, those will error out in import schema phase with the following error: - ```output - ERROR: INDEX on column of type '' not yet supported +ERROR: System column "xmin" is not supported yet ``` -**Workaround**: Currently, there is no workaround, but you can cast these data types in the index definition to supported types, which may require adjustments on the application side when querying the column using the index. Ensure you address these changes before modifying the schema. - -**Example** - -An example schema on the source database is as follows: - -```sql -CREATE TABLE public.citext_type ( - id integer, - data public.citext -); - -CREATE TABLE public.documents ( - id integer NOT NULL, - title_tsvector tsvector, - content_tsvector tsvector -); +**Workaround**: Use the application layer to manage tracking instead of relying on system columns. -CREATE TABLE public.ts_query_table ( - id integer, - query tsquery -); +--- -CREATE TABLE public.test_json ( - id integer, - data jsonb -); +### Other objects -CREATE INDEX tsvector_idx ON public.documents (title_tsvector); +#### Large Objects and its functions are currently not supported -CREATE INDEX tsquery_idx ON public.ts_query_table (query); +**GitHub**: Issue [#25318](https://github.com/yugabyte/yugabyte-db/issues/25318) -CREATE INDEX idx_citext ON public.citext_type USING btree (data); +**Description**: If you have large objects (datatype `lo`) in the source schema and are using large object functions in queries, the migration will fail during import-schema, as large object is not supported in YugabyteDB. -CREATE INDEX idx_json ON public.test_json (data); +```sql +SELECT lo_create(''); ``` ---- - -### Constraint trigger is not supported - -**GitHub**: [Issue #4700](https://github.com/yugabyte/yugabyte-db/issues/4700) - -**Description**: If you have constraint triggers in your source database, as they are currently unsupported in YugabyteDB, and they will error out as follows: - ```output - ERROR: CREATE CONSTRAINT TRIGGER not supported yet +ERROR: Transaction for catalog table write operation 'pg_largeobject_metadata' not found ``` -**Workaround**: Currently, there is no workaround; remove the constraint trigger from the exported schema and modify the applications if they are using these triggers before pointing it to YugabyteDB. +**Workaround**: No workaround is available. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.users ( - id int, - email character varying(255) -); - -CREATE FUNCTION public.check_unique_username() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - IF EXISTS ( - SELECT 1 - FROM users - WHERE email = NEW.email AND id <> NEW.id - ) THEN - RAISE EXCEPTION 'Email % already exists.', NEW.email; - END IF; - RETURN NEW; -END; -$$; +CREATE TABLE image (id int, raster lo); -CREATE CONSTRAINT TRIGGER check_unique_username_trigger - AFTER INSERT OR UPDATE ON public.users - DEFERRABLE INITIALLY DEFERRED - FOR EACH ROW - EXECUTE FUNCTION public.check_unique_username(); +CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON public.image + FOR EACH ROW EXECUTE FUNCTION lo_manage(raster); ``` --- -### Table inheritance is not supported +#### VIEW WITH CHECK OPTION is not supported -**GitHub**: [Issue #5956](https://github.com/yugabyte/yugabyte-db/issues/5956) +**GitHub**: [Issue #22716](https://github.com/yugabyte/yugabyte-db/issues/22716) -**Description**: If you have table inheritance in the source database, it will error out in the target as it is not currently supported in YugabyteDB: +**Description**: If there are VIEWs with check option in the source database, they error out during the import schema phase as follows: ```output -ERROR: INHERITS not supported yet +ERROR: VIEW WITH CHECK OPTION not supported yet ``` -**Workaround**: Currently, there is no workaround. +**Workaround**: You can use a TRIGGER with INSTEAD OF clause on INSERT/UPDATE on view to achieve this functionality, but it may require application-side adjustments to handle different errors instead of constraint violations. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.cities ( - name text, - population real, - elevation integer +CREATE TABLE public.employees ( + employee_id integer NOT NULL, + employee_name text, + salary numeric ); -CREATE TABLE public.capitals ( - state character(2) NOT NULL -) -INHERITS (public.cities); +CREATE VIEW public.employees_less_than_12000 AS + SELECT + employees.employee_id, + employees.employee_name, + employees.salary + FROM + public.employees + WHERE + employees.employee_id < 12000 + WITH CASCADED CHECK OPTION; ``` ---- - -### %Type syntax is not supported +Suggested change to the schema is as follows: -**GitHub**: [Issue #23619](https://github.com/yugabyte/yugabyte-db/issues/23619) +```sql +SELECT + employees.employee_id, + employees.employee_name, + employees.salary +FROM + public.employees +WHERE + employees.employee_id < 12000; -**Description**: If you have any function, procedure, or trigger using the `%TYPE` syntax for referencing a type of a column from a table, then it errors out in YugabyteDB with the following error: +CREATE OR REPLACE FUNCTION modify_employees_less_than_12000() +RETURNS TRIGGER AS $$ +BEGIN + -- Handle INSERT operations + IF TG_OP = 'INSERT' THEN + IF NEW.employee_id < 12000 THEN + INSERT INTO employees(employee_id, employee_name, salary) + VALUES (NEW.employee_id, NEW.employee_name, NEW.salary); + RETURN NEW; + ELSE + RAISE EXCEPTION 'new row violates check option for view "employees_less_than_12000"; employee_id must be less than 12000'; + END IF; + + -- Handle UPDATE operations + ELSIF TG_OP = 'UPDATE' THEN + IF NEW.employee_id < 12000 THEN + UPDATE employees + SET employee_name = NEW.employee_name, + salary = NEW.salary + WHERE employee_id = OLD.employee_id; + RETURN NEW; + ELSE + RAISE EXCEPTION 'new row violates check option for view "employees_less_than_12000"; employee_id must be less than 12000'; + END IF; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_modify_employee_12000 + INSTEAD OF INSERT OR UPDATE ON employees_less_than_12000 + FOR EACH ROW + EXECUTE FUNCTION modify_employees_less_than_12000(); +``` + +--- + +#### Create or alter conversion is not supported + +**GitHub**: [Issue #10866](https://github.com/yugabyte/yugabyte-db/issues/10866) + +**Description**: If you have conversions in your PostgreSQL database, they will error out as follows as conversions are currently not supported in the target YugabyteDB: ```output -ERROR: invalid type name "employees.salary%TYPE" (SQLSTATE 42601) +ERROR: CREATE CONVERSION not supported yet ``` -**Workaround**: Fix the syntax to include the actual type name instead of referencing the type of a column. +**Workaround**: Remove the conversions from the exported schema and modify the applications to not use these conversions before pointing them to YugabyteDB. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.employees ( - employee_id integer NOT NULL, - employee_name text, - salary numeric +CREATE CONVERSION public.my_latin1_to_utf8 FOR 'LATIN1' TO 'UTF8' FROM public.latin1_to_utf8; + +CREATE FUNCTION public.latin1_to_utf8(src_encoding integer, dest_encoding integer, src bytea, dest bytea, len integer) RETURNS integer + LANGUAGE c + AS '/usr/lib/postgresql/12/lib/latin1_to_utf8.so', 'my_latin1_to_utf8'; +``` + +--- + +### Data types + +#### Unsupported datatypes by YugabyteDB + +**GitHub**: [Issue 11323](https://github.com/yugabyte/yugabyte-db/issues/11323), [Issue 1731](https://github.com/yugabyte/yb-voyager/issues/1731) + +**Description**: The migration skips databases that have the following data types on any column: `GEOMETRY`, `GEOGRAPHY`, `BOX2D`, `BOX3D`, `TOPOGEOMETRY`, `RASTER`, `PG_LSN`, or `TXID_SNAPSHOT`. + +**Workaround**: None. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE TABLE public.locations ( + id integer NOT NULL, + name character varying(100), + geom geometry(Point,4326) + ); +``` + +--- + +## Data manipulation + +### MERGE command + +**GitHub**: Issue [#25574](https://github.com/yugabyte/yugabyte-db/issues/25574) + +**Description**: If you are using a Merge query to conditionally insert, update, or delete rows on a table on your source database, then this query will fail once you migrate your apps to YugabyteDB as it is a PostgreSQL 15 feature, and not supported yet. + +```output +ERROR: syntax error at or near "MERGE" +``` + +**Workaround**: Use the PL/pgSQL function to implement similar functionality on the database. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE TABLE customer_account ( + customer_id INT PRIMARY KEY, + balance NUMERIC(10, 2) NOT NULL ); +INSERT INTO customer_account (customer_id, balance) +VALUES + (1, 100.00), + (2, 200.00), + (3, 300.00); -CREATE FUNCTION public.get_employee_salary(emp_id integer) RETURNS numeric - LANGUAGE plpgsql - AS $$ -DECLARE - emp_salary employees.salary%TYPE; -- Declare a variable with the same type as employees.salary -BEGIN - SELECT salary INTO emp_salary - FROM employees - WHERE employee_id = emp_id; +CREATE TABLE recent_transactions ( + transaction_id SERIAL PRIMARY KEY, + customer_id INT NOT NULL, + transaction_value NUMERIC(10, 2) NOT NULL +); +INSERT INTO recent_transactions (customer_id, transaction_value) +VALUES + (1, 50.00), + (3, -25.00), + (4, 150.00); - RETURN emp_salary; -END; -$$; +MERGE INTO customer_account ca +USING recent_transactions t +ON t.customer_id = ca.customer_id +WHEN MATCHED THEN + UPDATE SET balance = balance + transaction_value +WHEN NOT MATCHED THEN + INSERT (customer_id, balance) + VALUES (t.customer_id, t.transaction_value); ``` -Suggested change to CREATE FUNCTION is as follows: +Suggested schema change is to replace the MERGE command with a PL/pgSQL function similar to the following: ```sql -CREATE FUNCTION public.get_employee_salary(emp_id integer) RETURNS numeric - LANGUAGE plpgsql - AS $$ -DECLARE - Emp_salary NUMERIC; -- Declare a variable with the same type as employees.salary +CREATE OR REPLACE FUNCTION merge_customer_account() +RETURNS void AS $$ BEGIN - SELECT salary INTO emp_salary - FROM employees - WHERE employee_id = emp_id; - - RETURN emp_salary; + -- Insert new rows or update existing rows in customer_account + INSERT INTO customer_account (customer_id, balance) + SELECT customer_id, transaction_value + FROM recent_transactions + ON CONFLICT (customer_id) + DO UPDATE + SET balance = customer_account.balance + EXCLUDED.balance; END; -$$; +$$ LANGUAGE plpgsql; +``` + +--- + +## Functions and operators + +### XID functions is not supported + +**GitHub**: [Issue #15638](https://github.com/yugabyte/yugabyte-db/issues/15638) + +**Description**: If you have XID datatypes in the source database, its functions, such as, `txid_current()` are not yet supported in YugabyteDB and will result in an error in the target as follows: + +```output + ERROR: Yugabyte does not support xid +``` + +**Workaround**: None. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE TABLE xid_example ( + id integer, + tx_id xid +); +``` + +--- + +### XML functions is not yet supported + +**GitHub**: [Issue #1043](https://github.com/yugabyte/yugabyte-db/issues/1043) + +**Description**: XML functions and the XML data type are unsupported in YugabyteDB. If you use functions like `xpath`, `xmlconcat`, and `xmlparse`, it will fail with an error as per the following example: + +```sql +yugabyte=# SELECT xml_is_well_formed_content('Alpha') AS is_well_formed_content; +``` + +```output +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +HINT: You need to rebuild PostgreSQL using --with-libxml. +``` + +**Workaround**: Convert XML data to JSON format for compatibility with YugabyteDB, or handle XML processing at the application layer before inserting data. + +--- + +### JSONB subscripting + +**GitHub**: Issue [#25575](https://github.com/yugabyte/yugabyte-db/issues/25575) + +**Description**: If you are using the JSONB subscripting in app queries and in the schema (constraints or default expression) on your source database, then the app query will fail once you migrate your apps to YugabyteDB, and import-schema will fail if any DDL has this feature, as it's a PostgreSQL 15 feature. + +```output +ERROR: cannot subscript type jsonb because it is not an array +``` + +**Workaround**: You can use the Arrow ( `-> / ->>` ) operators to access JSONB fields. + +**Fixed In**: {{}}. + +**Example** + +An example query / DDL on the source database is as follows: + +```sql +SELECT ('{"a": {"b": {"c": "some text"}}}'::jsonb)['a']['b']['c']; + +CREATE TABLE test_jsonb_chk ( + id int, + data1 jsonb, + CHECK (data1['key']<>'{}') +); +``` + +Suggested change in query to get it working- + +```sql +SELECT ((('{"a": {"b": {"c": "some text"}}}'::jsonb)->'a')->'b')->>'c'; + +CREATE TABLE test_jsonb_chk ( + id int, + data1 jsonb, + CHECK (data1->'key'<>'{}') +); +``` + +--- + +## Indexes + +### Index creation on partitions fail for some YugabyteDB builds + +**GitHub**: [Issue #14529](https://github.com/yugabyte/yugabyte-db/issues/14529) + +**Description**: If you have a partitioned table with indexes on it, the migration will fail with an error for YugabyteDB `2.15` or `2.16` due to a regression. + +Note that this is fixed in release [2.17.1.0](../../../releases/ybdb-releases/end-of-life/v2.17/#v2.17.1.0). + +**Workaround**: N/A + +**Example** + +An example schema on the source database is as follows: + +```sql +DROP TABLE IF EXISTS list_part; + +CREATE TABLE list_part (id INTEGER, status TEXT, arr NUMERIC) PARTITION BY LIST(status); + +CREATE TABLE list_active PARTITION OF list_part FOR VALUES IN ('ACTIVE'); + +CREATE TABLE list_archived PARTITION OF list_part FOR VALUES IN ('EXPIRED'); + +CREATE TABLE list_others PARTITION OF list_part DEFAULT; + +INSERT INTO list_part VALUES (1,'ACTIVE',100), (2,'RECURRING',20), (3,'EXPIRED',38), (4,'REACTIVATED',144), (5,'ACTIVE',50); + +CREATE INDEX list_ind ON list_part(status); +``` + +--- + +### GiST, BRIN, and SPGIST index types are not supported + +**GitHub**: [Issue #1337](https://github.com/yugabyte/yugabyte-db/issues/1337) + +**Description**: If you have GiST, BRIN, and SPGIST indexes on the source database, it errors out in the import schema phase with the following error: + +```output + ERROR: index method "gist" not supported yet (SQLSTATE XX000) + +``` + +**Workaround**: Currently, there is no workaround; remove the index from the exported schema. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE INDEX gist_idx ON public.ts_query_table USING gist (query); +``` + +--- + +### Indexes on some complex data types are not supported + +**GitHub**: [Issue #9698](https://github.com/yugabyte/yugabyte-db/issues/9698), [Issue #23829](https://github.com/yugabyte/yugabyte-db/issues/23829), [Issue #17017](https://github.com/yugabyte/yugabyte-db/issues/17017) + +**Description**: If you have indexes on some complex types such as TSQUERY, TSVECTOR, JSONB, ARRAYs, INET, UDTs, citext, and so on, those will error out in import schema phase with the following error: + +```output + ERROR: INDEX on column of type '' not yet supported +``` + +**Workaround**: Currently, there is no workaround, but you can cast these data types in the index definition to supported types, which may require adjustments on the application side when querying the column using the index. Ensure you address these changes before modifying the schema. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE TABLE public.citext_type ( + id integer, + data public.citext +); + +CREATE TABLE public.documents ( + id integer NOT NULL, + title_tsvector tsvector, + content_tsvector tsvector +); + +CREATE TABLE public.ts_query_table ( + id integer, + query tsquery +); + +CREATE TABLE public.test_json ( + id integer, + data jsonb +); + +CREATE INDEX tsvector_idx ON public.documents (title_tsvector); + +CREATE INDEX tsquery_idx ON public.ts_query_table (query); + +CREATE INDEX idx_citext ON public.citext_type USING btree (data); + +CREATE INDEX idx_json ON public.test_json (data); ``` --- @@ -773,202 +836,419 @@ $$; ERROR: access method "ybgin" does not support multicolumn indexes (SQLSTATE 0A000) ``` -**Workaround**: Currently, as there is no workaround, modify the schema to not include such indexes. +**Workaround**: Currently, as there is no workaround, modify the schema to not include such indexes. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE TABLE public.test_gin_json ( + id integer, + text jsonb, + text1 jsonb +); + +CREATE INDEX gin_multi_on_json + ON public.test_gin_json USING gin (text, text1); +``` + +--- + +## Concurrency control + +### Advisory locks is not yet implemented + +**GitHub**: [Issue #3642](https://github.com/yugabyte/yugabyte-db/issues/3642) + +**Description**: YugabyteDB does not support PostgreSQL advisory locks (for example, pg_advisory_lock, pg_try_advisory_lock). Any attempt to use advisory locks will result in a "function-not-implemented" error as per the following example: + +```sql +yugabyte=# SELECT pg_advisory_lock(100), COUNT(*) FROM cars; +``` + +```output +ERROR: advisory locks feature is currently in preview +HINT: To enable this preview feature, set the GFlag ysql_yb_enable_advisory_locks to true and add it to the list of allowed preview flags i.e. GFlag allowed_preview_flags_csv. If the app doesn't need strict functionality, this error can be silenced by using the GFlag yb_silence_advisory_locks_not_supported_error. See https://github.com/yugabyte/yugabyte-db/issues/3642 for details +``` + +**Workaround**: Implement a custom locking mechanism in the application to coordinate actions without relying on database-level advisory locks. + +--- + +### Two-Phase Commit + +**GitHub**: Issue [#11084](https://github.com/yugabyte/yugabyte-db/issues/11084) + +**Description**: If your application queries or PL/pgSQL objects rely on [Two-Phase Commit protocol](https://www.postgresql.org/docs/11/two-phase.html) that allows multiple distributed systems to work together in a transactional manner in the source PostgreSQL database, these functionalities will not work after migrating to YugabyteDB. Currently, Two-Phase Commit is not implemented in YugabyteDB and will throw the following error when you attempt to execute the commands: + +```sql +ERROR: PREPARE TRANSACTION not supported yet +``` + +**Workaround**: Currently, there is no workaround. + +--- + +### DDL operations within the Transaction + +**GitHub**: Issue [#1404](https://github.com/yugabyte/yugabyte-db/issues/1404) + +**Description**: If your application queries or PL/pgSQL objects runs DDL operations inside transactions in the source PostgreSQL database, this functionality will not work after migrating to YugabyteDB. Currently, DDL operations in a transaction in YugabyteDB is not supported and will not work as expected. + +**Workaround**: Currently, there is no workaround. + +**Example:** + +```sql +yugabyte=# \d test +Did not find any relation named "test". +yugabyte=# BEGIN; +BEGIN +yugabyte=*# CREATE TABLE test(id int, val text); +CREATE TABLE +yugabyte=*# \d test + Table "public.test" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + val | text | | | +yugabyte=*# ROLLBACK; +ROLLBACK +yugabyte=# \d test + Table "public.test" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + val | text | | | +``` + +--- + +## Extensions + +### PostgreSQL extensions are not supported by target YugabyteDB + +**Documentation**: [PostgreSQL extensions](../../../explore/ysql-language-features/pg-extensions/) + +**Description**: If you have any PostgreSQL extension that is not supported by the target YugabyteDB, they result in the following errors during import schema: + +```output +ERROR: could not open extension control file "/home/centos/yb/postgres/share/extension/.control": No such file or directory +``` + +**Workaround**: Remove the extension from the exported schema. + +**Example** + +An example schema on the source database is as follows: + +```sql +CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public; +``` + +--- + +## Server programming + +### Events Listen / Notify + +**GitHub**: Issue [#1872](https://github.com/yugabyte/yugabyte-db/issues/1872) + +**Description**: If your application queries or PL/pgSQL objects rely on **LISTEN/NOTIFY events** in the source PostgreSQL database, these functionalities will not work after migrating to YugabyteDB. Currently, LISTEN/NOTIFY events are a no-op in YugabyteDB, and any attempt to use them will trigger a warning instead of performing the expected event-driven operations: + +```sql +WARNING: LISTEN not supported yet and will be ignored +``` + +**Workaround**: Currently, there is no workaround. + +**Example:** + +```sql +LISTEN my_table_changes; +INSERT INTO my_table (name) VALUES ('Charlie'); +NOTIFY my_table_changes, 'New row added with name: Charlie'; +``` + +--- + +### Constraint trigger is not supported + +**GitHub**: [Issue #4700](https://github.com/yugabyte/yugabyte-db/issues/4700) + +**Description**: If you have constraint triggers in your source database, as they are currently unsupported in YugabyteDB, and they will error out as follows: + +```output + ERROR: CREATE CONSTRAINT TRIGGER not supported yet +``` + +**Workaround**: Currently, there is no workaround; remove the constraint trigger from the exported schema and modify the applications if they are using these triggers before pointing it to YugabyteDB. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.test_gin_json ( - id integer, - text jsonb, - text1 jsonb +CREATE TABLE public.users ( + id int, + email character varying(255) ); -CREATE INDEX gin_multi_on_json - ON public.test_gin_json USING gin (text, text1); +CREATE FUNCTION public.check_unique_username() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM users + WHERE email = NEW.email AND id <> NEW.id + ) THEN + RAISE EXCEPTION 'Email % already exists.', NEW.email; + END IF; + RETURN NEW; +END; +$$; + +CREATE CONSTRAINT TRIGGER check_unique_username_trigger + AFTER INSERT OR UPDATE ON public.users + DEFERRABLE INITIALLY DEFERRED + FOR EACH ROW + EXECUTE FUNCTION public.check_unique_username(); ``` --- -### Policies on users in source require manual user creation +### REFERENCING clause for triggers -**GitHub**: [Issue #1655](https://github.com/yugabyte/yb-voyager/issues/1655) +**GitHub**: [Issue #1668](https://github.com/yugabyte/yugabyte-db/issues/1668) -**Description**: If there are policies in the source schema for USERs in the database, the USERs have to be created manually on the target YugabyteDB, as currently the migration of USER/GRANT is not supported. Skipping the manual user creation will return an error during import schema as follows: +**Description**: If you have the REFERENCING clause (transition tables) in triggers in source schema, the trigger creation will fail in import schema as it is not currently supported in YugabyteDB. ```output -ERROR: role "" does not exist (SQLSTATE 42704) +ERROR: REFERENCING clause (transition tables) not supported yet ``` -**Workaround**: Create the USERs manually on target before import schema to create policies. +**Workaround**: Currently, there is no workaround. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.z1 ( - a integer, - b text +CREATE TABLE projects ( + id SERIAL PRIMARY KEY, + name TEXT, + region TEXT ); -CREATE ROLE regress_rls_group; -CREATE POLICY p2 ON public.z1 TO regress_rls_group USING (((a % 2) = 1)); + +CREATE OR REPLACE FUNCTION log_deleted_projects() +RETURNS TRIGGER AS $$ +BEGIN + --logic to use the old_table for deleted rows + SELECT id, name, region FROM old_table; + +END; +$$ LANGUAGE plpgsql + + +CREATE TRIGGER projects_loose_fk_trigger +AFTER DELETE ON projects +REFERENCING OLD TABLE AS old_table +FOR EACH STATEMENT +EXECUTE FUNCTION log_deleted_projects(); ``` --- -### VIEW WITH CHECK OPTION is not supported +### BEFORE ROW triggers on partitioned tables -**GitHub**: [Issue #22716](https://github.com/yugabyte/yugabyte-db/issues/22716) +**GitHub**: [Issue #24830](https://github.com/yugabyte/yugabyte-db/issues/24830) -**Description**: If there are VIEWs with check option in the source database, they error out during the import schema phase as follows: +**Description**: If you have the BEFORE ROW triggers on partitioned tables in source schema, the trigger creation will fail in import schema as it is not currently supported in YugabyteDB. ```output -ERROR: VIEW WITH CHECK OPTION not supported yet +ERROR: Partitioned tables cannot have BEFORE / FOR EACH ROW triggers. ``` -**Workaround**: You can use a TRIGGER with INSTEAD OF clause on INSERT/UPDATE on view to achieve this functionality, but it may require application-side adjustments to handle different errors instead of constraint violations. +**Workaround**: Create this trigger on the individual partitions. + +**Fixed In**: {{}}. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.employees ( - employee_id integer NOT NULL, - employee_name text, - salary numeric -); +CREATE TABLE test_partition_trigger ( + id INT, + val TEXT, + PRIMARY KEY (id) +) PARTITION BY RANGE (id); -CREATE VIEW public.employees_less_than_12000 AS - SELECT - employees.employee_id, - employees.employee_name, - employees.salary - FROM - public.employees - WHERE - employees.employee_id < 12000 - WITH CASCADED CHECK OPTION; +CREATE TABLE test_partition_trigger_part1 PARTITION OF test_partition_trigger + FOR VALUES FROM (1) TO (100); + +CREATE TABLE test_partition_trigger_part2 PARTITION OF test_partition_trigger + FOR VALUES FROM (100) TO (200); + +CREATE OR REPLACE FUNCTION check_and_modify_val() +RETURNS TRIGGER AS $$ +BEGIN + -- Check if id is even; if not, modify `val` to indicate an odd ID + IF (NEW.id % 2) <> 0 THEN + NEW.val := 'Odd ID'; + END IF; + + -- Return the row with modifications (if any) + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER before_insert_check +BEFORE INSERT ON test_partition_trigger +FOR EACH ROW +EXECUTE FUNCTION check_and_modify_val(); ``` Suggested change to the schema is as follows: ```sql -SELECT - employees.employee_id, - employees.employee_name, - employees.salary -FROM - public.employees -WHERE - employees.employee_id < 12000; -CREATE OR REPLACE FUNCTION modify_employees_less_than_12000() +CREATE TABLE test_partition_trigger ( + id INT, + val TEXT, + PRIMARY KEY (id) +) PARTITION BY RANGE (id); + +CREATE TABLE test_partition_trigger_part1 PARTITION OF test_partition_trigger + FOR VALUES FROM (1) TO (100); + +CREATE TABLE test_partition_trigger_part2 PARTITION OF test_partition_trigger + FOR VALUES FROM (100) TO (200); + +CREATE OR REPLACE FUNCTION check_and_modify_val() RETURNS TRIGGER AS $$ BEGIN - -- Handle INSERT operations - IF TG_OP = 'INSERT' THEN - IF NEW.employee_id < 12000 THEN - INSERT INTO employees(employee_id, employee_name, salary) - VALUES (NEW.employee_id, NEW.employee_name, NEW.salary); - RETURN NEW; - ELSE - RAISE EXCEPTION 'new row violates check option for view "employees_less_than_12000"; employee_id must be less than 12000'; - END IF; - - -- Handle UPDATE operations - ELSIF TG_OP = 'UPDATE' THEN - IF NEW.employee_id < 12000 THEN - UPDATE employees - SET employee_name = NEW.employee_name, - salary = NEW.salary - WHERE employee_id = OLD.employee_id; - RETURN NEW; - ELSE - RAISE EXCEPTION 'new row violates check option for view "employees_less_than_12000"; employee_id must be less than 12000'; - END IF; + -- Check if id is even; if not, modify `val` to indicate an odd ID + IF (NEW.id % 2) <> 0 THEN + NEW.val := 'Odd ID'; END IF; + + -- Return the row with modifications (if any) + RETURN NEW; END; $$ LANGUAGE plpgsql; -CREATE TRIGGER trigger_modify_employee_12000 - INSTEAD OF INSERT OR UPDATE ON employees_less_than_12000 - FOR EACH ROW - EXECUTE FUNCTION modify_employees_less_than_12000(); +CREATE TRIGGER before_insert_check +BEFORE INSERT ON test_partition_trigger_part1 +FOR EACH ROW +EXECUTE FUNCTION check_and_modify_val(); + +CREATE TRIGGER before_insert_check +BEFORE INSERT ON test_partition_trigger_part2 +FOR EACH ROW +EXECUTE FUNCTION check_and_modify_val(); + ``` --- -### UNLOGGED table is not supported +### %Type syntax is not supported -**GitHub**: [Issue #1129](https://github.com/yugabyte/yugabyte-db/issues/1129) +**GitHub**: [Issue #23619](https://github.com/yugabyte/yugabyte-db/issues/23619) -**Description**: If there are UNLOGGED tables in the source schema, they will error out during the import schema with the following error as it is not supported in target YugabyteDB. +**Description**: If you have any function, procedure, or trigger using the `%TYPE` syntax for referencing a type of a column from a table, then it errors out in YugabyteDB with the following error: ```output -ERROR: UNLOGGED database object not supported yet +ERROR: invalid type name "employees.salary%TYPE" (SQLSTATE 42601) ``` -**Workaround**: Convert it to a LOGGED table. - -**Fixed In**: {{}} +**Workaround**: Fix the syntax to include the actual type name instead of referencing the type of a column. **Example** An example schema on the source database is as follows: ```sql -CREATE UNLOGGED TABLE tbl_unlogged ( - id int, - val text +CREATE TABLE public.employees ( + employee_id integer NOT NULL, + employee_name text, + salary numeric ); -``` -Suggested change to the schema is as follows: -```sql -CREATE TABLE tbl_unlogged ( - id int, - val text -); +CREATE FUNCTION public.get_employee_salary(emp_id integer) RETURNS numeric + LANGUAGE plpgsql + AS $$ +DECLARE + emp_salary employees.salary%TYPE; -- Declare a variable with the same type as employees.salary +BEGIN + SELECT salary INTO emp_salary + FROM employees + WHERE employee_id = emp_id; + + RETURN emp_salary; +END; +$$; ``` ---- +Suggested change to CREATE FUNCTION is as follows: -### Hash-sharding with indexes on the timestamp/date columns +```sql +CREATE FUNCTION public.get_employee_salary(emp_id integer) RETURNS numeric + LANGUAGE plpgsql + AS $$ +DECLARE + Emp_salary NUMERIC; -- Declare a variable with the same type as employees.salary +BEGIN + SELECT salary INTO emp_salary + FROM employees + WHERE employee_id = emp_id; -**GitHub**: [Issue #49](https://github.com/yugabyte/yb-voyager/issues/49) -**Description**: Indexes on timestamp or date columns are commonly used in range-based queries. However, by default, indexes in YugabyteDB are hash-sharded, which is not optimal for range predicates and can impact query performance. + RETURN emp_salary; +END; +$$; +``` -Note that range sharding is currently enabled by default only in [PostgreSQL compatibility mode](../../../develop/postgresql-compatibility/) in YugabyteDB. +--- -**Workaround**: Explicitly configure the index to use range sharding. This ensures efficient data access with range-based queries. +## PostgreSQL 12 and later features -**Example** +### PostgreSQL 12 and later features -An example schema on the source database is as follows: +**GitHub**: Issue [#25575](https://github.com/yugabyte/yugabyte-db/issues/25575) -```sql -CREATE TABLE orders ( - order_id int PRIMARY, - ... - created_at timestamp -); +**Description**: If any of the following PostgreSQL 12 and later features are present in the source schema, the import schema step on the target YugabyteDB will fail. -CREATE INDEX idx_orders_created ON orders(created_at); -``` +- [JSON Constructor functions](https://www.postgresql.org/about/featurematrix/detail/395/) - `JSON_ARRAY_AGG`, `JSON_ARRAY`, `JSON_OBJECT`, `JSON_OBJECT_AGG`. +- [JSON query functions](https://www.postgresql.org/docs/17/functions-json.html#FUNCTIONS-SQLJSON-TABLE) - `JSON_QUERY`, `JSON_VALUE`, `JSON_EXISTS`, `JSON_TABLE`. +- [IS JSON predicate clause](https://www.postgresql.org/about/featurematrix/detail/396/). +- Any Value [Aggregate function](https://www.postgresql.org/docs/16/functions-aggregate.html#id-1.5.8.27.5.2.4.1.1.1.1) - `any_value`. +- [COPY FROM command with ON_ERROR](https://www.postgresql.org/about/featurematrix/detail/433/) option. +- [Non-decimal integer literals](https://www.postgresql.org/about/featurematrix/detail/407/). +- [Non-deterministic collations](https://www.postgresql.org/docs/12/collation.html#COLLATION-NONDETERMINISTIC). +- [COMPRESSION clause](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-COMPRESSION) in TABLE Column for TOASTing method. +- [CREATE DATABASE options](https://www.postgresql.org/docs/15/sql-createdatabase.html) (locale, collation, strategy, and OID related). -Suggested change to the schema is to add the ASC/DESC clause as follows: +In addition, if any of the following PostgreSQL features are present in the source schema, the import schema step on the target YugabyteDB will fail, unless you are importing to YugabyteDB [v2.25](/preview/releases/ybdb-releases/v2.25) (which supports PG15). -```sql -CREATE INDEX idx_orders_created ON orders(created_at DESC); -``` +- [Multirange datatypes](https://www.postgresql.org/docs/current/rangetypes.html#RANGETYPES-BUILTIN). +- [UNIQUE NULLS NOT DISTINCT clause](https://www.postgresql.org/about/featurematrix/detail/392/) in constraint and index. +- [Range Aggregate functions](https://www.postgresql.org/docs/16/functions-aggregate.html#id-1.5.8.27.5.2.4.1.1.1.1) - `range_agg`, `range_intersect_agg`. +- [FETCH FIRST … WITH TIES in select](https://www.postgresql.org/docs/13/sql-select.html#SQL-LIMIT) statement. +- [Regex functions](https://www.postgresql.org/about/featurematrix/detail/367/) - `regexp_count`, `regexp_instr`, `regexp_like`. +- [Foreign key references](https://www.postgresql.org/about/featurematrix/detail/319/) to partitioned table. +- [Security invoker views](https://www.postgresql.org/about/featurematrix/detail/389/). +- COPY FROM command with WHERE [clause](https://www.postgresql.org/about/featurematrix/detail/330/). +- [Deterministic attribute](https://www.postgresql.org/docs/12/collation.html#COLLATION-NONDETERMINISTIC) in COLLATION objects. +- [SQL Body in Create function](https://www.postgresql.org/docs/15/sql-createfunction.html#:~:text=a%20new%20session.-,sql_body,-The%20body%20of). +- [Common Table Expressions (With queries) with MATERIALIZED clause](https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-CTE-MATERIALIZATION). --- +## Migration process and tooling issues + ### Exporting data with names for tables/functions/procedures using special characters/whitespaces fails **GitHub**: [Issue #636](https://github.com/yugabyte/yb-voyager/issues/636), [Issue #688](https://github.com/yugabyte/yb-voyager/issues/688), [Issue #702](https://github.com/yugabyte/yb-voyager/issues/702) @@ -1046,27 +1326,62 @@ Suggested changes to the schema can be done using the following steps: --- -### Unsupported datatypes by YugabyteDB +### Foreign table in the source database requires SERVER and USER MAPPING -**GitHub**: [Issue 11323](https://github.com/yugabyte/yugabyte-db/issues/11323), [Issue 1731](https://github.com/yugabyte/yb-voyager/issues/1731) +**GitHub**: [Issue #1627](https://github.com/yugabyte/yb-voyager/issues/1627) -**Description**: The migration skips databases that have the following data types on any column: `GEOMETRY`, `GEOGRAPHY`, `BOX2D`, `BOX3D`, `TOPOGEOMETRY`, `RASTER`, `PG_LSN`, or `TXID_SNAPSHOT`. +**Description**: If you have foreign tables in the schema, during the export schema phase the exported schema does not include the SERVER and USER MAPPING objects. You must manually create these objects before importing schema, otherwise FOREIGN TABLE creation fails with the following error: -**Workaround**: None. +```output +ERROR: server "remote_server" does not exist (SQLSTATE 42704) +``` + +**Workaround**: Create the SERVER and its USER MAPPING manually on the target YugabyteDB database. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE public.locations ( - id integer NOT NULL, - name character varying(100), - geom geometry(Point,4326) - ); +CREATE EXTENSION postgres_fdw; + +CREATE SERVER remote_server + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host '127.0.0.1', port '5432', dbname 'postgres'); + +CREATE FOREIGN TABLE foreign_table ( + id INT, + name TEXT, + data JSONB +) +SERVER remote_server +OPTIONS ( + schema_name 'public', + table_name 'remote_table' +); + +CREATE USER MAPPING FOR postgres +SERVER remote_server +OPTIONS (user 'postgres', password 'XXX'); +``` +Exported schema only has the following: + +```sql +CREATE FOREIGN TABLE foreign_table ( + id INT, + name TEXT, + data JSONB +) +SERVER remote_server +OPTIONS ( + schema_name 'public', + table_name 'remote_table' +); ``` +Suggested change is to manually create the SERVER and USER MAPPING on the target YugabyteDB. + --- ### Unsupported datatypes by Voyager during live migration @@ -1096,536 +1411,381 @@ CREATE TABLE combined_tbl ( --- -### XID functions is not supported +### Data ingestion on XML data type is not supported -**GitHub**: [Issue #15638](https://github.com/yugabyte/yugabyte-db/issues/15638) +**GitHub**: [Issue #1043](https://github.com/yugabyte/yugabyte-db/issues/1043) -**Description**: If you have XID datatypes in the source database, its functions, such as, `txid_current()` are not yet supported in YugabyteDB and will result in an error in the target as follows: +**Description**: If you have XML datatype in the source database, it errors out in the import data to target YugabyteDB phase as data ingestion is not allowed on this data type: ```output - ERROR: Yugabyte does not support xid + ERROR: unsupported XML feature (SQLSTATE 0A000) ``` -**Workaround**: None. +**Workaround**: To migrate the data, a workaround is to convert the type to text and import the data to target; to read the data on the target YugabyteDB, you need to create some user defined functions similar to XML functions. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE xid_example ( +CREATE TABLE xml_example ( id integer, - tx_id xid + data xml ); ``` --- -### REFERENCING clause for triggers +### Policies on users in source require manual user creation -**GitHub**: [Issue #1668](https://github.com/yugabyte/yugabyte-db/issues/1668) +**GitHub**: [Issue #1655](https://github.com/yugabyte/yb-voyager/issues/1655) -**Description**: If you have the REFERENCING clause (transition tables) in triggers in source schema, the trigger creation will fail in import schema as it is not currently supported in YugabyteDB. +**Description**: If there are policies in the source schema for USERs in the database, the USERs have to be created manually on the target YugabyteDB, as currently the migration of USER/GRANT is not supported. Skipping the manual user creation will return an error during import schema as follows: ```output -ERROR: REFERENCING clause (transition tables) not supported yet +ERROR: role "" does not exist (SQLSTATE 42704) ``` -**Workaround**: Currently, there is no workaround. +**Workaround**: Create the USERs manually on target before import schema to create policies. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE projects ( - id SERIAL PRIMARY KEY, - name TEXT, - region TEXT +CREATE TABLE public.z1 ( + a integer, + b text ); - -CREATE OR REPLACE FUNCTION log_deleted_projects() -RETURNS TRIGGER AS $$ -BEGIN - --logic to use the old_table for deleted rows - SELECT id, name, region FROM old_table; - -END; -$$ LANGUAGE plpgsql - - -CREATE TRIGGER projects_loose_fk_trigger -AFTER DELETE ON projects -REFERENCING OLD TABLE AS old_table -FOR EACH STATEMENT -EXECUTE FUNCTION log_deleted_projects(); +CREATE ROLE regress_rls_group; +CREATE POLICY p2 ON public.z1 TO regress_rls_group USING (((a % 2) = 1)); ``` --- -### BEFORE ROW triggers on partitioned tables - -**GitHub**: [Issue #24830](https://github.com/yugabyte/yugabyte-db/issues/24830) +### Creation of certain views in the rule.sql file -**Description**: If you have the BEFORE ROW triggers on partitioned tables in source schema, the trigger creation will fail in import schema as it is not currently supported in YugabyteDB. +**GitHub**: [Issue #770](https://github.com/yugabyte/yb-voyager/issues/770) -```output -ERROR: Partitioned tables cannot have BEFORE / FOR EACH ROW triggers. -``` +**Description**: There may be few cases where certain exported views come under the `rule.sql` file and the `view.sql` file might contain a dummy view definition. This `pg_dump` behaviour may be due to how PostgreSQL handles views internally (via rules). -**Workaround**: Create this trigger on the individual partitions. +{{< note title ="Note" >}} +This does not affect the migration as YugabyteDB Voyager takes care of the DDL creation sequence internally. +{{< /note >}} -**Fixed In**: {{}}. +**Workaround**: Not required **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE test_partition_trigger ( - id INT, - val TEXT, - PRIMARY KEY (id) -) PARTITION BY RANGE (id); - -CREATE TABLE test_partition_trigger_part1 PARTITION OF test_partition_trigger - FOR VALUES FROM (1) TO (100); - -CREATE TABLE test_partition_trigger_part2 PARTITION OF test_partition_trigger - FOR VALUES FROM (100) TO (200); - -CREATE OR REPLACE FUNCTION check_and_modify_val() -RETURNS TRIGGER AS $$ -BEGIN - -- Check if id is even; if not, modify `val` to indicate an odd ID - IF (NEW.id % 2) <> 0 THEN - NEW.val := 'Odd ID'; - END IF; - - -- Return the row with modifications (if any) - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER before_insert_check -BEFORE INSERT ON test_partition_trigger -FOR EACH ROW -EXECUTE FUNCTION check_and_modify_val(); +CREATE TABLE foo(n1 int PRIMARY KEY, n2 int); +CREATE VIEW v1 AS + SELECT n1,n2 + FROM foo + GROUP BY n1; ``` -Suggested change to the schema is as follows: +The exported schema for `view.sql` is as follows: ```sql +CREATE VIEW public.v1 AS + SELECT + NULL::integer AS n1, + NULL::integer AS n2; +``` -CREATE TABLE test_partition_trigger ( - id INT, - val TEXT, - PRIMARY KEY (id) -) PARTITION BY RANGE (id); - -CREATE TABLE test_partition_trigger_part1 PARTITION OF test_partition_trigger - FOR VALUES FROM (1) TO (100); - -CREATE TABLE test_partition_trigger_part2 PARTITION OF test_partition_trigger - FOR VALUES FROM (100) TO (200); - -CREATE OR REPLACE FUNCTION check_and_modify_val() -RETURNS TRIGGER AS $$ -BEGIN - -- Check if id is even; if not, modify `val` to indicate an odd ID - IF (NEW.id % 2) <> 0 THEN - NEW.val := 'Odd ID'; - END IF; - - -- Return the row with modifications (if any) - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER before_insert_check -BEFORE INSERT ON test_partition_trigger_part1 -FOR EACH ROW -EXECUTE FUNCTION check_and_modify_val(); - -CREATE TRIGGER before_insert_check -BEFORE INSERT ON test_partition_trigger_part2 -FOR EACH ROW -EXECUTE FUNCTION check_and_modify_val(); +The exported schema for `rule.sql` is as follows: +```sql +CREATE OR REPLACE VIEW public.v1 AS + SELECT foo.n1,foo.n2 + FROM public.foo + GROUP BY foo.n1; ``` --- -### Advisory locks is not yet implemented +## Performance optimizations -**GitHub**: [Issue #3642](https://github.com/yugabyte/yugabyte-db/issues/3642) +### Hash-sharding with indexes on the timestamp/date columns -**Description**: YugabyteDB does not support PostgreSQL advisory locks (for example, pg_advisory_lock, pg_try_advisory_lock). Any attempt to use advisory locks will result in a "function-not-implemented" error as per the following example: +**GitHub**: [Issue #49](https://github.com/yugabyte/yb-voyager/issues/49) +**Description**: Indexes on timestamp or date columns are commonly used in range-based queries. However, indexes in YugabyteDB are hash-sharded by default, which is not optimal for range predicates, and can impact query performance. -```sql -yugabyte=# SELECT pg_advisory_lock(100), COUNT(*) FROM cars; -``` +Note that range sharding is currently enabled by default only in [PostgreSQL compatibility mode](../../../develop/postgresql-compatibility/) in YugabyteDB. -```output -ERROR: advisory locks feature is currently in preview -HINT: To enable this preview feature, set the GFlag ysql_yb_enable_advisory_locks to true and add it to the list of allowed preview flags i.e. GFlag allowed_preview_flags_csv. If the app doesn't need strict functionality, this error can be silenced by using the GFlag yb_silence_advisory_locks_not_supported_error. See https://github.com/yugabyte/yugabyte-db/issues/3642 for details -``` +**Workaround**: Explicitly configure the index to use range sharding. This ensures efficient data access with range-based queries. -**Workaround**: Implement a custom locking mechanism in the application to coordinate actions without relying on database-level advisory locks. +**Example** ---- +An example schema on the source database is as follows: -### System columns is not yet supported +```sql +CREATE TABLE orders ( + order_id int PRIMARY, + ... + created_at timestamp +); -**GitHub**: [Issue #24843](https://github.com/yugabyte/yugabyte-db/issues/24843) +CREATE INDEX idx_orders_created ON orders(created_at); +``` -**Description**: System columns, including `xmin`, `xmax`, `cmin`, `cmax`, and `ctid`, are not available in YugabyteDB. Queries or applications referencing these columns will fail as per the following example: +Suggested change to the schema is to add the ASC/DESC clause as follows: ```sql -yugabyte=# SELECT xmin, xmax FROM employees where id = 100; -``` - -```output -ERROR: System column "xmin" is not supported yet +CREATE INDEX idx_orders_created ON orders(created_at DESC); ``` -**Workaround**: Use the application layer to manage tracking instead of relying on system columns. - --- -### XML functions is not yet supported +### Hotspots with range-sharded timestamp/date indexes -**GitHub**: [Issue #1043](https://github.com/yugabyte/yugabyte-db/issues/1043) +**Description**: Range-sharded indexes on timestamp or date columns can lead to read/write hotspots in distributed databases like YugabyteDB, due to the way these values increment. For example, take a column of values `created_at timestamp`. As new values are inserted, all the writes will go to the same tablet. This tablet remains a hotspot until it is manually split or meets the auto-splitting criteria. Then, after a split, the newly created tablet becomes the next hotspot as inserts continue to follow the same increasing pattern. This leads to uneven data and query distribution, resulting in performance bottlenecks. -**Description**: XML functions and the XML data type are unsupported in YugabyteDB. If you use functions like `xpath`, `xmlconcat`, and `xmlparse`, it will fail with an error as per the following example: +Note that if the table is colocated, this hotspot concern can safely be ignored, as all the data resides on a single tablet, and the distribution is no longer relevant. -```sql -yugabyte=# SELECT xml_is_well_formed_content('Alpha') AS is_well_formed_content; -``` +**Workaround**: -```output -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -HINT: You need to rebuild PostgreSQL using --with-libxml. -``` +To address this issue and improve query performance, the recommendation is to change the sharding key to a value that is well distributed among all nodes while keeping the timestamp column as the clustering key. The new sharding key will be a modulo of the hash of the timestamp column value, which is then used to distribute data using a hash-based strategy, effectively spreading the load across multiple nodes. -**Workaround**: Convert XML data to JSON format for compatibility with YugabyteDB, or handle XML processing at the application layer before inserting data. +Ensure that the index on the column is configured to be range-sharded. ---- +**Example** -### Large Objects and its functions are currently not supported +An example schema on the source database is as follows: +```sql +CREATE TABLE orders ( + order_id int PRIMARY, + ... + created_at timestamp +); +CREATE INDEX idx_orders_created ON orders(created_at DESC); -**GitHub**: Issue [#25318](https://github.com/yugabyte/yugabyte-db/issues/25318) +``` -**Description**: If you have large objects (datatype `lo`) in the source schema and are using large object functions in queries, the migration will fail during import-schema, as large object is not supported in YugabyteDB. +Suggested change to the schema is to add the sharding key as the modulo of the hash of the timestamp column value, which gives a key in a range (for example, 0-15). This can change depending on the use case. This key will be used to distribute the data among various tablets and hence help in distributing the data evenly. ```sql -SELECT lo_create(''); +CREATE TABLE orders ( + order_id int PRIMARY, + ... + created_at timestamp +); +CREATE INDEX idx_orders_created ON orders( (yb_hash_code(created_at) % 16) ASC, created_at DESC); ``` -```output -ERROR: Transaction for catalog table write operation 'pg_largeobject_metadata' not found -``` +--- -**Workaround**: No workaround is available. +### Redundant indexes + +**Description**: A redundant index is an index that duplicates the functionality of another index or is unnecessary because the database can use an existing index to achieve the same result. This happens when multiple indexes cover the same columns or when a subset of columns in one index is already covered by another. + +**Workaround**: Remove the redundant index from the schema. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE image (id int, raster lo); +CREATE TABLE orders ( + order_id int PRIMARY, + product_id int, + ... +); -CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON public.image - FOR EACH ROW EXECUTE FUNCTION lo_manage(raster); +CREATE INDEX idx_orders_order_id on orders(order_id); +CREATE INDEX idx_orders_order_id_product_id on orders(order_id, product_id); +``` + +Suggested change to the schema is to remove this redundant index `idx_orders_order_id` as another stronger index is present `idx_orders_order_id_product_id`: + +```sql +CREATE INDEX idx_orders_order_id on orders(order_id); ``` --- -### PostgreSQL 12 and later features +### Index on low-cardinality column -**GitHub**: Issue [#25575](https://github.com/yugabyte/yugabyte-db/issues/25575) +**Description**: -**Description**: If any of these PostgreSQL features for version 12 and later are present in the source schema, the import schema step on the target YugabyteDB will fail as YugabyteDB is currently PG11 compatible. +In YugabyteDB, you can specify three kinds of columns when using [CREATE INDEX](../../../api/ysql/the-sql-language/statements/ddl_create_index): sharding, clustering, and covering. (For more details, refer to [Secondary indexes](../../../explore/ysql-language-features/indexes-constraints/secondary-indexes-ysql/).) The default sharding strategy is HASH unless [Enhanced PostgreSQL Compatibility mode](../../../develop/postgresql-compatibility/) is enabled, in which case, RANGE is the default sharding strategy. -- [JSON Constructor functions](https://www.postgresql.org/about/featurematrix/detail/395/) - `JSON_ARRAY_AGG`, `JSON_ARRAY`, `JSON_OBJECT`, `JSON_OBJECT_AGG`. -- [JSON query functions](https://www.postgresql.org/docs/17/functions-json.html#FUNCTIONS-SQLJSON-TABLE) - `JSON_QUERY`, `JSON_VALUE`, `JSON_EXISTS`, `JSON_TABLE`. -- [IS JSON predicate clause](https://www.postgresql.org/about/featurematrix/detail/396/). -- Any Value [Aggregate function](https://www.postgresql.org/docs/16/functions-aggregate.html#id-1.5.8.27.5.2.4.1.1.1.1) - `any_value`. -- [COPY FROM command with ON_ERROR](https://www.postgresql.org/about/featurematrix/detail/433/) option. -- [Non-decimal integer literals](https://www.postgresql.org/about/featurematrix/detail/407/). -- [Non-deterministic collations](https://www.postgresql.org/docs/12/collation.html#COLLATION-NONDETERMINISTIC). -- [COMPRESSION clause](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-COMPRESSION) in TABLE Column for TOASTing method. -- [CREATE DATABASE options](https://www.postgresql.org/docs/15/sql-createdatabase.html) (locale, collation, strategy, and oid related). +Design the index to evenly distribute data across all nodes and optimize performance based on query patterns. Avoid using low-cardinality columns, such as boolean values, ENUMs, or days of the week, as sharding keys, as they result in data being distributed across only a few tablets. -Apart from these, the following issues are supported in YugabyteDB [v2.25](/preview/releases/ybdb-releases/v2.25), which supports PostgreSQL 15. +#### Single column index -- [Multirange datatypes](https://www.postgresql.org/docs/current/rangetypes.html#RANGETYPES-BUILTIN). -- [UNIQUE NULLS NOT DISTINCT clause](https://www.postgresql.org/about/featurematrix/detail/392/) in constraint and index. -- [Range Aggregate functions](https://www.postgresql.org/docs/16/functions-aggregate.html#id-1.5.8.27.5.2.4.1.1.1.1) - `range_agg`, `range_intersect_agg`. -- [FETCH FIRST … WITH TIES in select](https://www.postgresql.org/docs/13/sql-select.html#SQL-LIMIT) statement. -- [Regex functions](https://www.postgresql.org/about/featurematrix/detail/367/) - `regexp_count`, `regexp_instr`, `regexp_like`. -- [Foreign key references](https://www.postgresql.org/about/featurematrix/detail/319/) to partitioned table. -- [Security invoker views](https://www.postgresql.org/about/featurematrix/detail/389/). -- COPY FROM command with WHERE [clause](https://www.postgresql.org/about/featurematrix/detail/330/). -- [Deterministic attribute](https://www.postgresql.org/docs/12/collation.html#COLLATION-NONDETERMINISTIC) in COLLATION objects. -- [SQL Body in Create function](https://www.postgresql.org/docs/15/sql-createfunction.html#:~:text=a%20new%20session.-,sql_body,-The%20body%20of). -- [Common Table Expressions (With queries) with MATERIALIZED clause](https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-CTE-MATERIALIZATION). +Using a single-column index on a low-cardinality column leads to uneven data distribution, regardless of the sharding strategy. ---- +**Workaround**: -### MERGE command +It is recommended to drop the index if it is not required. -**GitHub**: Issue [#25574](https://github.com/yugabyte/yugabyte-db/issues/25574) +If the index is used in queries, combine it with a high-cardinality column to create either a multi-column index with the sharding key on the high-cardinality column or a multi-column range-sharding index. This ensures better data distribution across all nodes. -**Description**: If you are using a Merge query to conditionally insert, update, or delete rows on a table on your source database, then this query will fail once you migrate your apps to YugabyteDB as it is a PostgreSQL 15 feature, and not supported yet. +#### Multi-column index -```output -ERROR: syntax error at or near "MERGE" -``` +In a multi-column index with a low cardinality column as the sharding key, the data will be unevenly distributed. -**Workaround**: Use the PL/pgSQL function to implement similar functionality on the database. +**Workaround**: -**Example** +Make the index range-sharded to distribute data based on the combined values of all columns, or reorder the index columns to place the high-cardinality column first. This enables sharding on the high-cardinality column and ensures even distribution across all nodes. + +**Example**: An example schema on the source database is as follows: ```sql -CREATE TABLE customer_account ( - customer_id INT PRIMARY KEY, - balance NUMERIC(10, 2) NOT NULL -); - -INSERT INTO customer_account (customer_id, balance) -VALUES - (1, 100.00), - (2, 200.00), - (3, 300.00); +CREATE TYPE order_statuses AS ENUM ('CONFIRMED', 'SHIPPED', 'OUT FOR DELIVERY', 'DELIVERED', 'CANCELLED'); -CREATE TABLE recent_transactions ( - transaction_id SERIAL PRIMARY KEY, - customer_id INT NOT NULL, - transaction_value NUMERIC(10, 2) NOT NULL +CREATE TABLE orders ( + order_id int PRIMARY, + ..., + status order_statuses ); -INSERT INTO recent_transactions (customer_id, transaction_value) -VALUES - (1, 50.00), - (3, -25.00), - (4, 150.00); - -MERGE INTO customer_account ca -USING recent_transactions t -ON t.customer_id = ca.customer_id -WHEN MATCHED THEN - UPDATE SET balance = balance + transaction_value -WHEN NOT MATCHED THEN - INSERT (customer_id, balance) - VALUES (t.customer_id, t.transaction_value); -``` -Suggested schema change is to replace the MERGE command with a PL/pgSQL function similar to the following: +CREATE INDEX idx_order_status on orders (status); --single column index on column having only 5 values -```sql -CREATE OR REPLACE FUNCTION merge_customer_account() -RETURNS void AS $$ -BEGIN - -- Insert new rows or update existing rows in customer_account - INSERT INTO customer_account (customer_id, balance) - SELECT customer_id, transaction_value - FROM recent_transactions - ON CONFLICT (customer_id) - DO UPDATE - SET balance = customer_account.balance + EXCLUDED.balance; -END; -$$ LANGUAGE plpgsql; +CREATE INDEX idx_order_status_order_id on orders (status, order_id); --multi column index on first column with only 5 values ``` ---- - -### JSONB subscripting +Since the number of distinct values of the column `status` is 5, there will be a maximum of 5 tablets created, limiting the scalability. -**GitHub**: Issue [#25575](https://github.com/yugabyte/yugabyte-db/issues/25575) +Suggested change to both types of indexes is one of the following. -**Description**: If you are using the JSONB subscripting in app queries and in the schema (constraints or default expression) on your source database, then the app query will fail once you migrate your apps to YugabyteDB, and import-schema will fail if any DDL has this feature, as it's a PostgreSQL 15 feature. +Make it a multi-column range-index: -```output -ERROR: cannot subscript type jsonb because it is not an array -``` +```sql + --These indexes will distribute the data on the combine value of both and as order_id is high cardinality column, it will make sure that data is distributed evenly -**Workaround**: You can use the Arrow ( `-> / ->>` ) operators to access JSONB fields. +CREATE INDEX idx_order_status on orders(status ASC, order_id); --adding order_id and making it a range-sharded index explictly -**Fixed In**: {{}}. +CREATE INDEX idx_order_status_order_id on orders (status ASC, order_id); --making it a range-sharded index explictly +``` -**Example** -An example query / DDL on the source database is as follows: +Make it multi-column with a sharding key on a high-cardinality column: ```sql -SELECT ('{"a": {"b": {"c": "some text"}}}'::jsonb)['a']['b']['c']; - -CREATE TABLE test_jsonb_chk ( - id int, - data1 jsonb, - CHECK (data1['key']<>'{}') -); -``` +--these indexes will distribute the data on order_id first and then each shard is clustered on status -Suggested change in query to get it working- - -```sql -SELECT ((('{"a": {"b": {"c": "some text"}}}'::jsonb)->'a')->'b')->>'c'; +CREATE INDEX idx_orders_status on orders(order_id, status); --making it multi column by adding order_id as first column -CREATE TABLE test_jsonb_chk ( - id int, - data1 jsonb, - CHECK (data1->'key'<>'{}') -); +CREATE INDEX idx_order_status_order_id on orders (order_id, status); --reordering the columns to place the order_id first and then keeping status. ``` --- -### Events Listen / Notify +### Index on column with a high percentage of NULL values -**GitHub**: Issue [#1872](https://github.com/yugabyte/yugabyte-db/issues/1872) +**Description**: -**Description**: If your application queries or PL/pgSQL objects rely on **LISTEN/NOTIFY events** in the source PostgreSQL database, these functionalities will not work after migrating to YugabyteDB. Currently, LISTEN/NOTIFY events are a no-op in YugabyteDB, and any attempt to use them will trigger a warning instead of performing the expected event-driven operations: +In YugabyteDB, you can specify three kinds of columns when using [CREATE INDEX](../../../api/ysql/the-sql-language/statements/ddl_create_index): sharding, clustering, and covering. (For more details, refer to [Secondary indexes](../../../explore/ysql-language-features/indexes-constraints/secondary-indexes-ysql/).) The default sharding strategy is HASH unless [Enhanced PostgreSQL Compatibility mode](../../../develop/postgresql-compatibility/) is enabled, in which case, RANGE is the default sharding strategy. -```sql -WARNING: LISTEN not supported yet and will be ignored -``` +Design the index to evenly distribute data across all nodes and optimize performance based on query patterns. -**Workaround**: Currently, there is no workaround. +If an index is created on a column with a high percentage of NULL values, all NULL entries will be stored in a single tablet. This concentration can create a hotspot, leading to performance degradation. -**Example:** +**Workaround**: If the NULL values are not being queried, it is recommended to create a Partial index by filtering the NULL values and optimizing it for the other data. -```sql -LISTEN my_table_changes; -INSERT INTO my_table (name) VALUES ('Charlie'); -NOTIFY my_table_changes, 'New row added with name: Charlie'; -``` +If NULL values are being queried and the index is a single-column index, it is recommended to add another column and make it a multi-column range-sharded index to distribute the NULL values evenly across various nodes. If the index is multi-column, it is recommended to make it a range-sharded index. ---- +**Example** -### Two-Phase Commit +An example schema on the source database is as follows: -**GitHub**: Issue [#11084](https://github.com/yugabyte/yugabyte-db/issues/11084) +```sql +CREATE TABLE users ( + user_id int PRIMARY, + first_name text, + middle_name text, + ... +); -**Description**: If your application queries or PL/pgSQL objects rely on [Two-Phase Commit protocol](https://www.postgresql.org/docs/11/two-phase.html) that allows multiple distributed systems to work together in a transactional manner in the source PostgreSQL database, these functionalities will not work after migrating to YugabyteDB. Currently, Two-Phase Commit is not implemented in YugabyteDB and will throw the following error when you attempt to execute the commands: +CREATE INDEX idx_users_middle_name on users (middle_name); -- this index is on middle name which is having 50% NULL values -```sql -ERROR: PREPARE TRANSACTION not supported yet +CREATE INDEX idx_users_middle_name_user_id on users (middle_name, user_id); -- this index is having first column as middle name which is having 50% NULL values ``` -**Workaround**: Currently, there is no workaround. +As these indexes have a sharding key on the `middle_name` column, where half of the values as NULL, half of the data resides on a single tablet and becomes a hotspot. ---- +Suggested change to the schema is one of the following. -### DDL operations within the Transaction +Partial indexing by removing the NULL values: -**GitHub**: Issue [#1404](https://github.com/yugabyte/yugabyte-db/issues/1404) +```sql +CREATE INDEX idx_users_middle_name on users (middle_name) where middle_name <> NULL; --filtering the NULL values so those will not be indexed -**Description**: If your application queries or PL/pgSQL objects runs DDL operations inside transactions in the source PostgreSQL database, this functionality will not work after migrating to YugabyteDB. Currently, DDL operations in a transaction in YugabyteDB is not supported and will not work as expected. +CREATE INDEX idx_users_middle_name_user_id on users (middle_name, user_id) where middle_name <> NULL; --filtering the NULL values so those will not be indexed +``` -**Workaround**: Currently, there is no workaround. -**Example:** +Making it a range-sharded index explicitly so that NULLs are evenly distributed across all nodes by using another column: ```sql -yugabyte=# \d test -Did not find any relation named "test". -yugabyte=# BEGIN; -BEGIN -yugabyte=*# CREATE TABLE test(id int, val text); -CREATE TABLE -yugabyte=*# \d test - Table "public.test" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | | - val | text | | | -yugabyte=*# ROLLBACK; -ROLLBACK -yugabyte=# \d test - Table "public.test" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | | - val | text | | | +CREATE INDEX idx_users_middle_name on users (middle_name ASC, user_id); --adding user_id + +CREATE INDEX idx_users_middle_name_user_id on users (middle_name ASC, user_id); + ``` --- -### Hotspots with range-sharded timestamp/date indexes +### Index on column with high percentage of a particular value -**Description**: Range-sharded indexes on timestamp or date columns can lead to read/write hotspots in distributed databases like YugabyteDB, due to the way these values increment. For example, take a column of values `created_at timestamp`. As new values are inserted, all the writes will go to the same tablet. This tablet remains a hotspot until it is manually split or meets the auto-splitting criteria. Then, after a split, the newly created tablet becomes the next hotspot as inserts continue to follow the same increasing pattern. This leads to uneven data and query distribution, resulting in performance bottlenecks. +**Description**: -Note that if the table is colocated, this hotspot concern can safely be ignored, as all the data resides on a single tablet, and the distribution is no longer relevant. +In YugabyteDB, you can specify three kinds of columns when using [CREATE INDEX](../../../api/ysql/the-sql-language/statements/ddl_create_index): sharding, clustering, and covering. (For more details, refer to [Secondary indexes](../../../explore/ysql-language-features/indexes-constraints/secondary-indexes-ysql/).) The default sharding strategy is HASH unless [Enhanced PostgreSQL Compatibility mode](../../../develop/postgresql-compatibility/) is enabled, in which case, RANGE is the default sharding strategy. -**Workaround**: To address this issue and improve query performance, application-level sharding is recommended. This approach involves adding an additional column to the table and creating a multi-column index including both the new column and the timestamp/date column. The additional column distributes data using a hash-based strategy, effectively spreading the load across multiple nodes. +Design the index to evenly distribute data across all nodes and optimize performance based on query patterns. -Implementing this solution requires minor adjustments to queries. In addition to range conditions on the timestamp/date column, the new sharding column should be included in the query filters to benefit from distributed execution. +If the index is designed for a column with a high percentage of a particular value in the data, all the data for that value will reside on a single tablet, which will become a hotspot, causing performance degradation. -Ensure that the index on the column is configured to be range-sharded. +**Workaround**: If the frequently occurring value is not being queried, it is recommended that a Partial index be created by filtering this value, optimizing it for other data. -References: [How to Avoid Hotspots on Range-based Indexes in Distributed Databases](https://www.yugabyte.com/blog/distributed-databases-hotspots-range-based-indexes/), [[YFTT] Avoiding Hot-Spots on Timestamp Based Index](https://www.youtube.com/watch?v=tiYZn0U1wzY) +If the value is being queried and the index is a single-column index, it is recommended to add another column and make it a multi-column range-sharded index to distribute the value evenly across various nodes. If the index is multi-column, it is recommended to make it a range-sharded index. **Example** An example schema on the source database is as follows: ```sql -CREATE TABLE orders ( - order_id int PRIMARY, +CREATE TABLE user_activity ( + user_id int PRIMARY, + event_type text, --type of the activity 'login', 'logout', 'profile_update +, 'email_verification', so on.. various events + event_timestamp timestampz, ... - created_at timestamp ); -CREATE INDEX idx_orders_created ON orders(created_at DESC); +CREATE INDEX idx_user_activity_event_type on user_activity (event_type); --this index is on the event_type which is having 80% data with 'login' type + +CREATE INDEX idx_user_activity_event_type_user_id on user_activity (event_type, user_id); --this index is on the event_type which is having 80% data with 'login' type -SELECT * FROM orders WHERE created_at >= NOW() - INTERVAL '1 month'; -- for fetching orders of last one month ``` -Suggested change to the schema is to add the column `shard_id` with a default value as an integer between 0 and the number of shards required for the use case. In addition, you add this column to the index columns with hash sharding. In this way the data is distributed by `shard_id` and ordered based on `created_at`. +As these indexes have a sharding key on the `event_type` column, where the value ‘login’ is 80% of the data, 80% of the data resides on a single tablet, which becomes a hotspot. -This also requires modifying the range queries to include the `shard_id` in the filter to help the optimizer. In this example, you specify the shard IDs in the IN clause. +Suggested change to the schema is one of the following. -```sql -CREATE TABLE orders ( - order_id int PRIMARY, - ..., - shard_id int DEFAULT (floor(random() * 100)::int % 16), - created_at timestamp -); +Partial indexing by removing the ‘login’ value from the index to optimize it for other values. -CREATE INDEX idx_orders_created ON orders(shard_id HASH, created_at DESC); +```sql +CREATE INDEX idx_user_activity_event_type on user_activity (event_type) where event_type <> 'login' ; --filtering the 'login' values so those will not be indexed -SELECT * FROM orders WHERE shard_id IN (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15) AND created_at >= NOW() - INTERVAL '1 month'; -- for fetching orders of last one month +CREATE INDEX idx_user_activity_event_type_user_id on user_activity (event_type, user_id) where event_type <> 'login' ; --filtering the 'login' values so those will not be indexed ``` -### Redundant indexes - -**Description**: A redundant index is an index that duplicates the functionality of another index or is unnecessary because the database can use an existing index to achieve the same result. This happens when multiple indexes cover the same columns or when a subset of columns in one index is already covered by another. - -**Workaround**: Remove the redundant index from the schema. - -**Example** +OR -An example schema on the source database is as follows: +Explicitly making it a range-sharded index so that the empty string value is evenly distributed across all nodes by adding another column. ```sql -CREATE TABLE orders ( - order_id int PRIMARY, - product_id int, - ... -); - -CREATE INDEX idx_orders_order_id on orders(order_id); -CREATE INDEX idx_orders_order_id_product_id on orders(order_id, product_id); -``` +CREATE INDEX idx_user_activity_event_type on user_activity (event_type ASC, user_id); --adding column user_id -Suggested change to the schema is to remove this redundant index `idx_orders_order_id` as another stronger index is present `idx_orders_order_id_product_id`: +CREATE INDEX idx_user_activity_event_type_user_id on user_activity (event_type ASC, user_id) -```sql -CREATE INDEX idx_orders_order_id on orders(order_id); ``` diff --git a/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md b/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md index b5cca4ab0108..73771eb76eb4 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md +++ b/docs/content/preview/yugabyte-voyager/migrate/live-fall-back.md @@ -678,7 +678,7 @@ The `yb-voyager export schema` command extracts the schema from the source datab The `source_db_schema` argument specifies the schema of the source database. -- For Oracle, `source-db-schema` can take only one schema name and you can migrate _only one_ schema at a time. +For Oracle, `source-db-schema` can take only one schema name and you can migrate _only one_ schema at a time. {{< /note >}} @@ -696,6 +696,8 @@ yb-voyager export schema --export-dir \ ``` +Note that if the source database is PostgreSQL and you haven't already run `assess-migration`, the schema is also assessed and a migration assessment report is generated. + Refer to [export schema](../../reference/schema-migration/export-schema/) for details about the arguments. #### Analyze schema diff --git a/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md b/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md index 8e5ceb15606e..cccd44a8be59 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md +++ b/docs/content/preview/yugabyte-voyager/migrate/live-fall-forward.md @@ -689,7 +689,7 @@ The `yb-voyager export schema` command extracts the schema from the source datab The `source_db_schema` argument specifies the schema of the source database. -- For Oracle, `source-db-schema` can take only one schema name and you can migrate _only one_ schema at a time. +For Oracle, `source-db-schema` can take only one schema name and you can migrate _only one_ schema at a time. {{< /note >}} @@ -707,6 +707,8 @@ yb-voyager export schema --export-dir \ ``` +Note that if the source database is PostgreSQL and you haven't already run `assess-migration`, the schema is also assessed and a migration assessment report is generated. + Refer to [export schema](../../reference/schema-migration/export-schema/) for details about the arguments. #### Analyze schema diff --git a/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md b/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md index bedcb12983ee..1d46654a3c4d 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md +++ b/docs/content/preview/yugabyte-voyager/migrate/live-migrate.md @@ -539,7 +539,7 @@ The `yb-voyager export schema` command extracts the schema from the source datab The `source_db_schema` argument specifies the schema of the source database. -- For Oracle, `source-db-schema` can take only one schema name and you can migrate _only one_ schema at a time. +For Oracle, `source-db-schema` can take only one schema name and you can migrate _only one_ schema at a time. {{< /note >}} @@ -557,6 +557,8 @@ yb-voyager export schema --export-dir \ ``` +Note that if the source database is PostgreSQL and you haven't already run `assess-migration`, the schema is also assessed and a migration assessment report is generated. + Refer to [export schema](../../reference/schema-migration/export-schema/) for details about the arguments. #### Analyze schema diff --git a/docs/content/preview/yugabyte-voyager/migrate/migrate-steps.md b/docs/content/preview/yugabyte-voyager/migrate/migrate-steps.md index e3e49de0bdf9..079f0c4a8667 100644 --- a/docs/content/preview/yugabyte-voyager/migrate/migrate-steps.md +++ b/docs/content/preview/yugabyte-voyager/migrate/migrate-steps.md @@ -180,6 +180,8 @@ yb-voyager export schema --export-dir \ ``` +Note that if the source database is PostgreSQL and you haven't already run `assess-migration`, the schema is also assessed and a migration assessment report is generated. + Refer to [export schema](../../reference/schema-migration/export-schema/) for details about the arguments. #### Analyze schema diff --git a/docs/content/preview/yugabyte-voyager/release-notes.md b/docs/content/preview/yugabyte-voyager/release-notes.md index 840b05e55a47..4e539190e120 100644 --- a/docs/content/preview/yugabyte-voyager/release-notes.md +++ b/docs/content/preview/yugabyte-voyager/release-notes.md @@ -13,9 +13,35 @@ type: docs What follows are the release notes for the YugabyteDB Voyager v1 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB v1 series. +## Versioning + +Voyager releases (starting with v2025.5.2) use the numbering format `YYYY.M.N`, where `YYYY` is the release year, `M` is the month, and `N` is the number of the release in that month. + +## v2025.5.2 - May 20, 2025 + +### New features + +- Added support for using a config file to manage parameters in offline migration using `yb-voyager`. + +### Enhancements + +- If you run `export schema` without first running `assess-migration`, Voyager will now automatically run assess the migration before exporting the schema for PostgreSQL source databases. +- Performance optimizations are now reported only in assessment reports, not in schema analysis reports. +- Assessment Report + - The assessment report now includes detailed recommendations related to index design to help you identify potential uneven distribution or hotspot issues in YugabyteDB. This includes: + - Indexes on low-cardinality columns (for example, `BOOLEAN` or `ENUM`) + - Indexes on columns with a high percentage of `NULL` values + - Indexes on columns with a high frequency of a particular value +- Import Data + - The `import-data` command now monitors replication (CDC/xCluster) only for the target database specified in the migration. This avoids false positives caused by replication streams on other databases. + +### Bug fixes + +- Fixed an issue where left-padded zeros in PostgreSQL `BIT VARYING` columns were incorrectly omitted during live migration. + ## v1.8.17 - May 6, 2025 -### New Feature +### New feature - New Command: `finalize-schema-post-data-import` This command is used to re-add NOT VALID constraints and refresh materialized views after import, and replaces the use of `import schema` with the `--post-snapshot-import true` and `--refresh-mviews` flags; both of these flags are now deprecated in import schema. @@ -32,7 +58,7 @@ What follows are the release notes for the YugabyteDB Voyager v1 release series. ## v1.8.16 - April 22, 2025 -### New Features +### New features - Regularly monitor the YugabyteDB cluster during data import to ensure good health and prevent suboptimal configurations. - If a YugabyteDB node goes down, the terminal UI notifies the user, and Voyager automatically shifts the load to the remaining nodes. @@ -85,7 +111,7 @@ What follows are the release notes for the YugabyteDB Voyager v1 release series. - Merged the ALTER TABLE ADD constraints DDL (Primary Key, Unique Key, and Check Constraints) with the CREATE TABLE statement, reducing the number of DDLs to analyze/review and improving overall import schema performance. - Introduced a guardrails check to ensure live migration uses a single, fixed table list throughout the migration, preventing any changes to the table list after the migration has started. -### Bug Fixes +### Bug fixes - Fixed an issue where the `iops-capture-interval` flag in the assess-migration command did not honor the user-defined value and always defaulted to its preset. - Fixed an issue in the IOPs calculation logic, ensuring it counts the number of scans (both sequential and index) instead of using `seq_tup_read` for read statistics. @@ -202,14 +228,14 @@ What follows are the release notes for the YugabyteDB Voyager v1 release series. - Miscellaneous - Enhanced guardrail checks in import-schema for YugabyteDB Aeon. -### Bug Fixes +### Bug fixes - Skip Unsupported Query Constructs detection if `pg_stat_statements` is not loaded via `shared_preloaded_libraries`. - Prevent Voyager from panicking/erroring out in case of `analyze-schema` and `import data` when `export-dir` is empty. ## v1.8.7 - December 10, 2024 -### New Features +### New features - Introduced a framework in the `assess-migration` and `analyze-schema` commands to accept the target database version (`--target-db-version` flag) as input and use it for reporting issues not supported in that target version for the source schema. @@ -231,7 +257,7 @@ What follows are the release notes for the YugabyteDB Voyager v1 release series. ## v1.8.6 - November 26, 2024 -### New Features +### New features - Unsupported PL/pgSQL objects detection. Migration assessment and schema analysis commands can now detect and report SQL features and constructs in PL/pgSQL objects in the source schema that are not supported by YugabyteDB. This includes detecting advisory locks, system columns, and XML functions. Voyager reports individual queries in these objects that contain unsupported constructs, such as queries in PL/pgSQL blocks for functions and procedures, or select statements in views and materialized views. @@ -352,7 +378,7 @@ To bypass this issue, set the environment variable `REPORT_UNSUPPORTED_QUERY_CON ## v1.8 - September 3, 2024 -### New Features +### New features - Introduced the notion of Migration complexity in assessment and analyze-schema reports, which range from LOW to MEDIUM to HIGH. For PostgreSQL source, this depends on the number and complexity of the PostgreSQL features present in the schema that are unsupported in YugabyteDB. - Introduced a bulk assessment command (`assess-migration-bulk`) for Oracle which allows you to assess multiple schemas in one or more database instances simultaneously. @@ -408,7 +434,7 @@ To bypass this issue, set the environment variable `REPORT_UNSUPPORTED_QUERY_CON ## v1.7.1 - May 28, 2024 -### Bug Fixes +### Bug fixes - Fixed a bug where [export data](../reference/data-migration/export-data/) command ([live migration](../migrate/live-migrate/)) from Oracle source fails with a "table already exists" error, when stopped and re-run (resuming CDC phase of export-data). - Fixed a known issue in the dockerized version of yb-voyager where commands [get data-migration-report](../reference/data-migration/import-data/#get-data-migration-report) and [end migration](../reference/end-migration/) did not work if you had previously passed ssl-cert/ssl-key/ssl-root-cert in [export data](../reference/data-migration/export-data/) or [import data](../reference/data-migration/import-data/) or [import data to source replica](../reference/data-migration/import-data/#import-data-to-source-replica) commands. @@ -489,7 +515,7 @@ To bypass this issue, set the environment variable `REPORT_UNSUPPORTED_QUERY_CON ## v1.6 - November 30, 2023 -### New Features +### New features - Live migration diff --git a/docs/content/stable/api/ysql/the-sql-language/statements/ddl_alter_table.md b/docs/content/stable/api/ysql/the-sql-language/statements/ddl_alter_table.md index fb1aa204cc21..1150c9dd2c95 100644 --- a/docs/content/stable/api/ysql/the-sql-language/statements/ddl_alter_table.md +++ b/docs/content/stable/api/ysql/the-sql-language/statements/ddl_alter_table.md @@ -27,7 +27,7 @@ Use the `ALTER TABLE` statement to change the definition of a table.

{{< note title="Table inheritance is not yet supported" >}} -YSQL in the present "latest" YugabyteDB does not yet support the "table inheritance" feature that is described in the [PostgreSQL documentation](https://www.postgresql.org/docs/11/ddl-inherit.html). The attempt to create a table that inherits another table causes the _0A000 (feature_not_supported)_ error with the message _"INHERITS not supported yet"_. This means that the syntax that the `table_expr` rule allows doesn't not yet bring any useful meaning. +YSQL in the present "latest" YugabyteDB does not yet support the "table inheritance" feature that is described in the [PostgreSQL documentation](https://www.postgresql.org/docs/11/ddl-inherit.html). The attempt to create a table that inherits another table causes the _0A000 (feature_not_supported)_ error with the message _"INHERITS not supported yet"_. This means that the syntax that the `table_expr` rule allows doesn't yet bring any useful meaning. It says that you can write, for example, this: @@ -50,9 +50,23 @@ These variants are useful only when at least one other table inherits `t`. But a Specify one of the following actions. -#### ADD [ COLUMN ] [ IF NOT EXISTS ] *column_name* *data_type* [*constraint*](#constraints) +#### ADD [ COLUMN ] [ IF NOT EXISTS ] *column_name* *data_type* *constraint* -Add the specified column with the specified data type and constraint. +Add the specified column with the specified data type and [constraint](#constraints). + +##### Table rewrites + +ADD COLUMN … DEFAULT statements require a [table rewrite](#alter-table-operations-that-involve-a-table-rewrite) when the default value is a _volatile_ expression. [Volatile expressions](https://www.postgresql.org/docs/current/xfunc-volatility.html#XFUNC-VOLATILITY) can return different results for different rows, so a table rewrite is required to fill in values for existing rows. For non-volatile expressions, no table rewrite is required. + +Examples of volatile expressions: + +- ALTER TABLE … ADD COLUMN v1 INT DEFAULT random() +- ALTER TABLE .. ADD COLUMN v2 UUID DEFAULT gen_random_uuid() + +Examples of non-volatile expressions (no table rewrite): + +- ALTER TABLE … ADD COLUMN nv1 INT DEFAULT 5 +- ALTER TABLE … ADD COLUMN nv2 timestamp DEFAULT now() -- uses the same timestamp now() for all existing rows #### RENAME TO *table_name* @@ -64,11 +78,12 @@ Renaming a table is a non blocking metadata change operation. {{< /note >}} - #### SET TABLESPACE *tablespace_name* Asynchronously change the tablespace of an existing table. + The tablespace change will immediately reflect in the config of the table, however the tablet move by the load balancer happens in the background. + While the load balancer is performing the move it is perfectly safe from a correctness perspective to do reads and writes, however some query optimization that happens based on the data location may be off while data is being moved. ##### Example @@ -83,8 +98,8 @@ DETAIL: Data movement is a long running asynchronous process and can be monitor ALTER TABLE ``` - Tables can be moved to the default tablespace using: + ```sql ALTER TABLE table_name SET TABLESPACE pg_default; ``` @@ -218,24 +233,20 @@ alter table parents drop column b cascade; It quietly succeeds. Now `\d children` shows that the foreign key constraint `children_fk` has been transitively dropped. -#### ADD [*alter_table_constraint*](#constraints) +#### ADD *alter_table_constraint* -Add the specified constraint to the table. +Add the specified [constraint](#constraints) to the table. +##### Table rewrites -{{< warning >}} -Adding a `PRIMARY KEY` constraint results in a full table rewrite and full rewrite of all indexes associated with the table. -This happens because of the clustered storage by primary key that YugabyteDB uses to store rows and indexes. -Tables without a `PRIMARY KEY` have a hidden one underneath and rows are stored clustered on it. The secondary indexes of the table -link to this hidden `PRIMARY KEY`. -While the tables and indexes are being rewritten, you may lose any modifications made to the table. -For reference, the same semantics as [Alter type with table rewrite](#alter-type-with-table-rewrite) apply. -{{< /warning >}} +Adding a `PRIMARY KEY` constraint results in a full table rewrite of the main table and all associated indexes, which can be a potentially expensive operation. For more details about table rewrites, see [Alter table operations that involve a table rewrite](#alter-table-operations-that-involve-a-table-rewrite). + +The table rewrite is needed because of how YugabyteDB stores rows and indexes. In YugabyteDB, data is distributed based on the primary key; when a table does not have an explicit primary key assigned, YugabyteDB automatically creates an internal row ID to use as the table's primary key. As a result, these rows need to be rewritten to use the newly added primary key column. For more information, refer to [Primary keys](../../../../../develop/data-modeling/primary-keys-ysql). #### ALTER [ COLUMN ] *column_name* [ SET DATA ] TYPE *data_type* [ COLLATE *collation* ] [ USING *expression* ] Change the type of an existing column. The following semantics apply: -- If data on disk is required to change, a full table rewrite is needed. + - If the optional `COLLATE` clause is not specified, the default collation for the new column type will be used. - If the optional `USING` clause is not provided, the default conversion for the new column value will be the same as an assignment cast from the old type to the new type. - A `USING` clause must be included when there is no implicit assignment cast available from the old type to the new type. @@ -243,48 +254,51 @@ Change the type of an existing column. The following semantics apply: - Alter type is not supported for tables with rules (limitation inherited from PostgreSQL). - Alter type is not supported for tables with CDC streams, or xCluster replication when it requires data on disk to change. See [#16625](https://github.com/yugabyte/yugabyte-db/issues/16625). -##### Alter type without table-rewrite +##### Table rewrites -If the change doesn't require data on disk to change, concurrent DMLs to the table can be safely performed as shown in the following example: +Altering a column's type requires a [full table rewrite](#alter-table-operations-that-involve-a-table-rewrite), and any indexes that contain this column when the underlying storage format changes or if the data changes. +The following type changes commonly require a table rewrite: -```sql -CREATE TABLE test (id BIGSERIAL PRIMARY KEY, a VARCHAR(50)); -ALTER TABLE test ALTER COLUMN a TYPE VARCHAR(51); -``` +| From | To | Reason for table rewrite | +| ------------ | -------------- | --------------------------------------------------------------------- | +| INTEGER | TEXT | Different storage formats. | +| TEXT | INTEGER | Needs parsing and validation. | +| JSON | JSONB | Different internal representation. | +| UUID | TEXT | Different binary format. | +| BYTEA | TEXT | Different encoding. | +| TIMESTAMP | DATE | Loses time info; storage changes. | +| BOOLEAN | INTEGER | Different sizes and encoding. | +| REAL | NUMERIC | Different precision and format. | +| NUMERIC(p,s) | NUMERIC(p2,s2) | Requires data changes if scale is changed or if precision is smaller. | -##### Alter type with table rewrite +The following type changes do not require a rewrite when there is no associated index table on the column. When there is an associated index table on the column, a rewrite is performed on the index table alone but not on the main table. -If the change requires data on disk to change, a full table rewrite will be done and the following semantics apply: -- The action creates an entirely new table under the hood, and concurrent DMLs may not be reflected in the new table which can lead to correctness issues. -- The operation preserves split properties for hash-partitioned tables and hash-partitioned secondary indexes. For range-partitioned tables (and secondary indexes), split properties are only preserved if the altered column is not part of the table's (or secondary index's) range key. +| From | To | Notes | +| ------------ | ------------------ | ------------------------------------------------------ | +| VARCHAR(n) | VARCHAR(m) (m > n) | Length increase is compatible. | +| VARCHAR(n) | TEXT | Always compatible. | +| SERIAL | INTEGER | Underlying type is INTEGER; usually OK. | +| NUMERIC(p,s) | NUMERIC(p2,s2) | If new precision is larger and scale remains the same. | +| CHAR(n) | CHAR(m) (m > n) | PG stores it as padded TEXT, so often fine. | +| Domain types | Their base type | Compatible, unless additional constraints exist. | -Following is an example of alter type with table rewrite: +Altering a column with a (non-trivial) USING clause always requires a rewrite. -```sql -CREATE TABLE test (id BIGSERIAL PRIMARY KEY, a VARCHAR(50)); -INSERT INTO test(a) VALUES ('1234555'); -ALTER TABLE test ALTER COLUMN a TYPE VARCHAR(40); --- try to change type to BIGINT -ALTER TABLE test ALTER COLUMN a TYPE BIGINT; -ERROR: column "a" cannot be cast automatically to type bigint -HINT: You might need to specify "USING a::bigint". --- use USING clause to cast the values -ALTER TABLE test ALTER COLUMN a SET DATA TYPE BIGINT USING a::BIGINT; -``` +The table rewrite operation preserves split properties for hash-partitioned tables and hash-partitioned secondary indexes. For range-partitioned tables (and secondary indexes), split properties are only preserved if the altered column is not part of the table's (or secondary index's) range key. -Another option is to use a custom function as follows: +For example, the following ALTER TYPE statements would cause a table rewrite: -```sql -CREATE OR REPLACE FUNCTION myfunc(text) RETURNS BIGINT - AS 'select $1::BIGINT;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; +- ALTER TABLE foo + ALTER COLUMN foo_timestamp TYPE timestamp with time zone + USING + timestamp with time zone 'epoch' + foo_timestamp * interval '1 second'; +- ALTER TABLE t ALTER COLUMN t_num1 TYPE NUMERIC(9,5) -- from NUMERIC(6,1); +- ALTER TABLE test ALTER COLUMN a SET DATA TYPE BIGINT USING a::BIGINT; -- from INT -ALTER TABLE test ALTER COLUMN a SET DATA TYPE BIGINT USING myfunc(a); -``` +The following ALTER TYPE statement does not cause a table rewrite: +- ALTER TABLE test ALTER COLUMN a TYPE VARCHAR(51); -- from VARCHAR(50) #### DROP CONSTRAINT *constraint_name* [ RESTRICT | CASCADE ] @@ -293,13 +307,9 @@ Drop the named constraint from the table. - `RESTRICT` — Remove only the specified constraint. - `CASCADE` — Remove the specified constraint and any dependent objects. -{{< warning >}} -Dropping the `PRIMARY KEY` constraint results in a full table rewrite and full rewrite of all indexes associated with the table. -This happens because of the clustered storage by primary key that YugabyteDB uses to store rows and indexes. -While the tables and indexes are being rewritten, you may lose any modifications made to the table. -For reference, the same semantics as [Alter type with table rewrite](#alter-type-with-table-rewrite) apply. -{{< /warning >}} +##### Table rewrites +Dropping the `PRIMARY KEY` constraint results in a full table rewrite and full rewrite of all indexes associated with the table, which is a potentially expensive operation. For more details and common limitations of table rewrites, refer to [Alter table operations that involve a table rewrite](#alter-table-operations-that-involve-a-table-rewrite). #### RENAME [ COLUMN ] *column_name* TO *column_name* @@ -322,15 +332,21 @@ ALTER TABLE test RENAME CONSTRAINT vague_name TO unique_a_constraint; #### ENABLE / DISABLE ROW LEVEL SECURITY This enables or disables row level security for the table. + If enabled and no policies exist for the table, then a default-deny policy is applied. + If disabled, then existing policies for the table will not be applied and will be ignored. + See [CREATE POLICY](../dcl_create_policy) for details on how to create row level security policies. #### FORCE / NO FORCE ROW LEVEL SECURITY This controls the application of row security policies for the table when the user is the table owner. + If enabled, row level security policies will be applied when the user is the table owner. + If disabled (the default) then row level security will not be applied when the user is the table owner. + See [CREATE POLICY](../dcl_create_policy) for details on how to create row level security policies. ### Constraints @@ -368,6 +384,24 @@ Constraints marked as `INITIALLY IMMEDIATE` will be checked after every row with Constraints marked as `INITIALLY DEFERRED` will be checked at the end of the transaction. +## Alter table operations that involve a table rewrite + +Most ALTER TABLE statements only involve a schema modification and complete quickly. However, certain specific ALTER TABLE statements require a new copy of the underlying table (and associated index tables, in some cases) to be made and can potentially take a long time, depending on the sizes of the tables and indexes involved. This is typically referred to as a "table rewrite". This behavior is [similar to PostgreSQL](https://www.crunchydata.com/blog/when-does-alter-table-require-a-rewrite), though the exact scenarios when a rewrite is triggered may differ between PostgreSQL and YugabyteDB. + +It is not safe to execute concurrent DML on the table during a table rewrite because the results of any concurrent DML are not guaranteed to be reflected in the copy of the table being made. This restriction is similar to PostgreSQL, which explicitly prevents concurrent DML during a table rewrite by acquiring an ACCESS EXCLUSIVE table lock. + +If you need to perform one of these expensive rewrites, it is recommended to combine them into a single ALTER TABLE statement to avoid multiple expensive rewrites. For example: + +```sql +ALTER TABLE t ADD COLUMN c6 UUID DEFAULT gen_random_uuid(), ALTER COLUMN c8 TYPE TEXT +``` + +The following ALTER TABLE operations involve making a full copy of the underlying table (and possibly associated index tables): + +1. [Adding](#add-alter) or [dropping](#drop-constraint-constraint-restrict-cascade) the primary key of a table. +1. [Adding a column with a (volatile) default value](#add-column-if-not-exists-column-data-constraint). +1. [Changing the type of a column](#alter-column-column-set-data-type-data-collate-collation-using-expression). + ## See also -- [`CREATE TABLE`](../ddl_create_table) +- [CREATE TABLE](../ddl_create_table) diff --git a/docs/content/stable/architecture/docdb-replication/async-replication.md b/docs/content/stable/architecture/docdb-replication/async-replication.md index 5ff49061f385..999d231c6f8a 100644 --- a/docs/content/stable/architecture/docdb-replication/async-replication.md +++ b/docs/content/stable/architecture/docdb-replication/async-replication.md @@ -12,164 +12,189 @@ menu: type: docs --- -## Synchronous versus asynchronous replication - -YugabyteDB's [synchronous replication](../replication/) can be used to tolerate losing entire data centers or regions. It replicates data in a single universe spread across multiple (three or more) data centers so that the loss of one data center does not impact availability, durability, or strong consistency courtesy of the Raft consensus algorithm. - -However, synchronous replication has two important drawbacks when used this way: - -- _High write latency_: each write must achieve consensus across at least two data centers, which means at least one round trip between data centers. This can add tens or even hundreds of milliseconds of extra latency in a multi-region deployment. - -- _Need for at least three data centers_: to tolerate the failure of `f` fault domains, you need at least `2f + 1` fault domains. So, to survive the loss of one data center, you need at least three data centers, which adds operational cost. See [fault tolerance](../replication/#fault-tolerance) for more information. +## YugabyteDB's xCluster replication -As an alternative, YugabyteDB provides asynchronous replication that replicates data between two or more separate universes. It does not suffer from the drawbacks of synchronous replication: because it is done in the background, it does not impact write latency, and because it does not use consensus it does not require a third data center. +xCluster replication is YugabyteDB's implementation of high throughput asynchronous physical replication between two YugabyteDB universes. It allows you to set up one or more unidirectional replication _flows_ between universes. For each flow, data is replicated from a _source_ (also called a producer) universe to a _target_ (also called a consumer) universe. Replication is done at the DocDB layer, by efficiently replicating WAL records asynchronously to the target universe. Both YSQL and YCQL are supported. -Asynchronous replication has its own drawbacks, however, including: +Multiple flows can be configured; for instance, setting up two unidirectional flows between two universes, one in each direction, enables bidirectional replication. This ensures that data written in one universe is replicated to the other without causing infinite loops. Refer to [supported deployment scenarios](#deployment-scenarios) for details on the supported flow combinations. -- __Data loss on failure__: when a universe fails, the data in it that has not yet been replicated will be lost. The amount of data lost depends on the replication lag, which is usually subsecond. +For simplicity, flows are described as being between entire universes. However, flows are actually composed of streams between pairs of YCQL tables or YSQL databases, one in each universe, allowing replication of only certain tables or databases. -- __Limitations on transactionality__: Because transactions in the universes cannot coordinate with each other, either the kinds of transactions must be restricted or some consistency and isolation must be lost. +Note that xCluster can only be used to replicate between primary clusters in two different universes; it cannot be used to replicate between clusters in the same universe. (See [universe versus cluster](../../key-concepts/#universe) for more on the distinction between universes and clusters.) +{{< tip >}} +To understand the difference between xCluster, Geo-Partitioning, and Read Replicas, refer to [Multi-Region Deployments](../../../explore/multi-region-deployments/). +{{< /tip >}} -## YugabyteDB's xCluster replication +## Synchronous versus asynchronous replication -xCluster replication is YugabyteDB's implementation of asynchronous replication for disaster recovery. It allows you to set up one or more unidirectional replication _flows_ between universes. Note that xCluster can only be used to replicate between primary clusters in two different universes; it cannot be used to replicate between clusters in the same universe. (See [universe versus cluster](../../key-concepts/#universe) for more on the distinction between universes and clusters.) +YugabyteDB's [synchronous replication](../replication/) can be used to tolerate losing entire data centers or regions. It replicates data in a single universe spread across multiple (three or more) data centers so that the loss of one data center does not impact availability, durability, or strong consistency enabled by the Raft consensus algorithm. -For each flow, data is replicated from a _source_ (also called a producer) universe to a _target_ (also called a consumer) universe. Replication is done at the DocDB level, with newly committed writes in the source universe asynchronously replicated to the target universe. Both YSQL and YCQL are supported. +However, asynchronous replication can be beneficial in certain scenarios: -Multiple flows can be used; for example, two unidirectional flows between two universes, one in each direction, produce bidirectional replication where anything written in one universe will be replicated to the other — data is only asynchronously replicated once to avoid infinite loops. See [supported deployment scenarios](#supported-deployment-scenarios) for which flow combinations are currently supported. +- **Low write latency**: With synchronous replication, each write must reach a consensus across a majority of data centers. This can add tens or even hundreds of milliseconds of extra latency for writes in a multi-region deployment. xCluster reduces this latency by eliminating the need for immediate consensus across regions. +- **Only two data centers needed**: With synchronous replication, to tolerate the failure of `f` fault domains, you need at least `2f + 1` fault domains. Therefore, to survive the loss of one data center, a minimum of three data centers is required, which can increase operational costs. For more details, see [fault tolerance](../replication/#fault-tolerance). With xCluster, you can achieve multi-region deployments with only two data centers. +- **Disaster recovery**: xCluster uses independent YugabyteDB universes in each region that can function independently of each other. This setup allows for quick failover with minimal data loss in the event of a regional outage caused by hardware or software issues. -Although for simplicity, we will describe flows between entire universes, flows are actually composed of streams between pairs of tables, one in each universe, allowing replication of only certain namespaces or tables. +Asynchronous xCluster replication has the following drawbacks: -xCluster is more flexible than a hypothetical scheme whereby read replicas are promoted to full replicas when primary replicas are lost because it does not require the two universes to be tightly coupled. With xCluster, for example, the same table can be split into tablets in different ways in the two universes. xCluster also allows for bidirectional replication, which is not possible using read replicas because read replicas cannot take writes. +- **Potential data loss**: In the event of a data center failure, any data that has not yet been replicated to the secondary data center will be lost. The extent of data loss is determined by the replication lag, which is typically subsecond but can vary depending on the network conditions between the data centers. +- **Stale reads**: When reading from the secondary data center, there may be a delay in data availability due to the asynchronous nature of the replication. This can result in stale reads, which may not reflect the most recent writes. Non-transactional modes can serve torn reads of recently written data. +{{< tip title="Deploy" >}} +To better understand how xCluster replication works in practice, check out [xCluster deployment](../../../deploy/multi-dc/async-replication/). +{{< /tip >}} ## Asynchronous replication modes Because there is a useful trade-off between how much consistency is lost and what transactions are allowed, YugabyteDB provides two different modes of asynchronous replication: -- __non-transactional replication__: all transactions are allowed but some consistency is lost -- __transactional replication__: consistency is preserved but target-universe transactions must be read-only +- Non-transactional replication: Writes are allowed on the target universe, but reads of recently replicated data can be inconsistent. +- Transactional replication: Consistency of reads is preserved on the target universe, but writes are not allowed. ### Non-transactional replication -Here, after each transaction commits in the source universe, its writes are independently replicated to the target universe where they are applied with the same timestamp they had on the source universe. No locks are taken or honored on the target side. +All writes to the source universe are independently replicated to the target universe, where they are applied with the same timestamp they committed on the source universe. No locks are taken or honored on the target side. -Note that the writes are usually being written in the past as far as the target universe is concerned. This violates the preconditions for YugabyteDB serving consistent reads (see the discussion on [safe timestamps](../../transactions/single-row-transactions/#safe-timestamp-assignment-for-a-read-request)). Accordingly, reads on the target universe are no longer strongly consistent but rather eventually consistent. +Due to replication lag, a read performed in the target universe immediately after a write in the source universe may not reflect the recent write. In other words, reads in the target universe do not wait for the latest data from the source universe to become available. -If both target and source universes write to the same key then the last writer wins. The deciding factor is the underlying hybrid time of the updates from each universe. +Note that the writes are usually being written in the past as far as the target universe is concerned. This violates the preconditions for YugabyteDB serving consistent reads (see the discussion on [safe timestamps](../../transactions/single-row-transactions/#safe-timestamp-assignment-for-a-read-request)). Accordingly, reads on the target universe are no longer strongly consistent but rather eventually consistent even in a single table. -Because of replication lag, a read done immediately in the target universe after a write done on the source universe may not see that write. Another way of putting this is that reads in the target universe do not wait for up-to-date data from the source universe to become visible. +If both target and source universes write to the same key, then the last writer wins. The deciding factor is the underlying hybrid time of the updates from each universe. #### Inconsistencies affecting transactions -Because the writes are being independently replicated, a transaction from the source universe becomes visible over time. This means transactions in the target universe can see non-repeated reads and phantom reads no matter what their declared isolation level is. Effectively then all transactions on the target universe are at SQL-92 isolation level READ COMMITTED, which only guarantees that transactions never read uncommitted data. Unlike the normal YugabyteDB READ COMMITTED level, this does not guarantee a statement will see a consistent snapshot or all the data that has been committed before the statement is issued. +Due to the independent replication of writes, transactions from the source universe become visible over time. This results in transactions on the target universe experiencing non-repeatable reads and phantom reads, regardless of their declared isolation level. Consequently, all transactions on the target universe effectively operate at the SQL-92 isolation level READ COMMITTED, which only ensures that transactions do not read uncommitted data. Unlike the standard YugabyteDB READ COMMITTED level, this does not guarantee that a statement will see a consistent snapshot or all data committed before the statement is issued. -If the source universe dies, then the target universe may be left in an inconsistent state where some source universe transactions have only some of their writes applied in the target universe (these are called _torn transactions_). This inconsistency will not automatically heal over time and may need to be manually resolved. +If the source universe fails, the target universe may be left in an inconsistent state where some source universe transactions have only some of their writes applied in the target universe (these are called _torn transactions_). This inconsistency will not automatically heal over time and may need to be manually resolved. Note that these inconsistencies are limited to the tables/rows being written to and replicated from the source universe: any target transaction that does not interact with such rows is unaffected. +{{< tip >}} +For YSQL deployments, transactional mode is preferred because it provides the necessary consistency guarantees typically required for such deployments. +{{< /tip >}} + ### Transactional replication -This mode is an extension of the previous one. In order to restore consistency, we additionally disallow writes on the target universe and cause reads to read as of a time far enough in the past (typically 250 ms) that all the relevant data from the source universe has already been replicated. +In this mode, reads occur at a time sufficiently in the past (typically 1-2 seconds) to ensure that all relevant data from the source universe has already been replicated. Additionally, writes to the target universe are not allowed. + +Reads occur as of the _xCluster safe time_, ensuring that all writes from all source transactions that will commit at or before the _xCluster safe time_ have been replicated to the target universe. This means we read as of a time far enough in the past that there cannot be new incoming commits at or before that time. This guarantees consistent reads and ensures source universe transactions become visible atomically. Note that the _xCluster safe time_ is not blocked by any in-flight or long-running source-universe transactions. + +_xCluster safe time_ advances as replication proceeds but lags behind real-time by the current replication lag. This means, for example, if we write at 2:00:00 PM in the source universe and read at 2:00:01 PM in the target universe and replication lag is, say, five seconds, then the read may read as of 1:59:56 PM and will not see the write. We may not be able to see the write until 2:00:06 PM in the target universe, assuming the replication lag remains at five seconds. + +![Transactional xCluster](/images/deploy/xcluster/xcluster-transactional.png) + +If the source universe fails, we can discard all incomplete information in the target universe by rewinding it to the latest _xCluster safe time_ (1:59:56 PM in the example) using YugabyteDB's [Point-in-Time Recovery (PITR)](../../../manage/backup-restore/point-in-time-recovery/) feature. The result will be a consistent database that includes only the transactions from the source universe that committed at or before the _xCluster safe time_. Unlike with non-transactional replication, there is no need to handle torn transactions. + +Target universe read-only transactions run at serializable isolation level on a single consistent snapshot as of the _xCluster safe time_. + +In xCluster transactional replication mode, writes to the target universe are not allowed. Consequently, this mode does not support bidirectional replication. -In particular, we pick the time to read as of, _T_, so that all the writes from all the source transactions that will commit at or before time _T_ have been replicated to the target universe. Put another way, we read as of a time far enough in the past that there cannot be new incoming source commits at or before that time. This restores consistent reads and ensures source universe transaction results become visible atomically. Note that we do _not_ wait for any current in flight source-universe transactions. +Transactional replication is currently only available for YSQL deployments. -In order to know when to read as of, we maintain an analog of safe time called _xCluster safe time_, which is the latest time it is currently safe to read as of with xCluster transactional replication in order to guarantee consistency and atomicity. xCluster safe time advances as replication proceeds but lags behind real-time by the current replication lag. This means, for example, if we write at 2 PM in the source universe and read at 2:01 PM in the target universe and replication lag is say five minutes then the read will read as of 1:56 PM and will not see the write. We won't be able to see the write until 2:06 PM in the target universe assuming the replication lag remains at five minutes. +Transactional replication comes in the following modes: -If the source universe dies, then we can discard all the incomplete information in the target universe by rewinding it to the latest xCluster safe time (1:56 PM in the example) using YugabyteDB's [Point-in-Time Recovery (PITR)](../../../manage/backup-restore/point-in-time-recovery/) feature. The result will be the fully consistent database that results from applying a prefix of the source universe's transactions, namely exactly those that committed at or before the xCluster safe time. Unlike with non-transactional replication, there is thus no need to handle torn transactions. +#### Semi-automatic mode -It is unclear how to best support writes in the target universe using this strategy of maintaining consistency by reading only at a safe time in the past: A target update transaction would appear to need to read from the past but write in the present; it would thus have to wait for at least the replication lag to make sure no interfering writes from the source universe occurred during that interval. Such transactions would thus be slow and prone to aborting. +Provides operationally simpler setup and management of replication, as well as fewer steps for performing DDL changes. This is the recommended mode for new deployments. -Accordingly, target writes are not currently permitted when using xCluster transactional replication. This means that the transactional replication mode cannot support bidirectional replication. +{{}} +To learn more, watch [Simplified schema management with xCluster DB Scoped](https://www.youtube.com/live/vYyn2OUSZFE?si=i3ZkBh6QqHKukB_p) +{{}} -Target-universe read-only transactions are still permitted; they run at serializable isolation level on a single consistent snapshot taken in the past. +#### Manual mode +This mode requires manual intervention for schema changes and is more complex to set up and manage. ## High-level implementation details -At a high level, xCluster replication is implemented by having _pollers_ in the target universe that poll the source universe tablet servers for recent changes. Each poller works independently and polls one source tablet, distributing the received changes among a set of target tablets. +xCluster replicates WAL records from source universe tablets to target universe tablets. It is implemented by having _pollers_ in the target universe that poll the source universe tablet servers for WAL records. Each poller works independently and polls one source tablet, distributing the received changes among one or more target tablets. This allows xCluster to scale horizontally as more nodes are added. -The polled tablets examine only their Raft logs to determine what changes have occurred recently rather than looking at their RocksDB instances. The incoming poll request specifies the Raft log entry ID to start gathering changes from and the response includes a batch of changes and the Raft log entry ID to continue with next time. +The polled tablets examine only the WAL to determine recent changes rather than looking at their RocksDB instances. The incoming poll request specifies the WAL OpId to start gathering changes from, and the response includes a batch of changes and the WAL OpId to continue with next time. -Pollers occasionally checkpoint the continue-with Raft ID of the last batch of changes they have processed; this ensures each change is processed at least once. +The source universe periodically saves the OpId that the target universe has confirmed as processed. This information is stored in the `cdc_state` table. -### The mapping between source and target tablets +{{}} +To learn more, watch [xCluster Replication](https://youtu.be/9TF3xPDDJ30?si=foKnj1CvDYidHqmx) +{{}} -In simple cases, we can associate a poller with each target tablet that polls the corresponding source tablet. +### The mapping between source and target tablets -However, in the general case the number of tablets for a table in the source universe and in the target universe may be different. Even if the number of tablets is the same, they may have different sharding boundaries due to tablet splits occurring at different places in the past. +In simple cases, each target tablet can have a dedicated poller that directly polls the corresponding source tablet. However, in more complex scenarios, the number of tablets in the source and target universes may differ. Even if the number of tablets is the same, their sharding boundaries might not align due to historical tablet splits occurring at different points in time. -This means that each target tablet may need the changes from multiple source tablets and multiple target tablets may need changes from the same source tablet. To avoid multiple redundant cross-universe reads to the same source tablet, only one poller reads from each source tablet; in cases where a source tablet's changes are needed by multiple target tablets, the poller assigned to that source tablet distributes the changes to the relevant target tablets. +This means that each target tablet may require changes from multiple source tablets, and multiple target tablets may need changes from the same source tablet. To prevent redundant cross-universe reads from the same source tablet, only one poller reads from each source tablet. When a source tablet's changes are needed by multiple target tablets, the assigned poller distributes the changes to the relevant target tablets. -The following illustration shows what this might look like for one table: +The following illustration shows an example of this setup for a single table: -![distribution of pollers and where they pull data from and send it to](/images/architecture/replication/distribution-of-pollers-new.png) +![distribution of pollers to tablets](/images/architecture/replication/distribution-of-pollers-new.png) -Here, the source universe is on the left with three TServers (the white boxes) each containing one tablet of the table (the boxes inside) with the shown ranges of the table. The target universe is on the right with one fewer TServer and tablet. As you can see, the top source tablet's data is split among both target tablets by the poller running in the top target TServer and the remaining source tablets' data is replicated to the second target tablet by the pollers running in the other target TServer. For simplicity, only the tablet leaders are shown here — pollers run at and poll from only leaders. +In the illustration, the source universe is depicted on the left with three TServers (white boxes), each containing one tablet of the table (boxes inside) with specified ranges. The target universe is on the right, featuring one fewer TServer and tablet. The data from the top source tablet is distributed among both target tablets by the poller in the top target TServer. Meanwhile, the data from the remaining source tablets is replicated to the second target tablet by the pollers in the other target TServer. For simplicity, only the tablet leaders are shown, as pollers operate at and poll from leaders only. -Tablet splitting generates a Raft log entry, which is replicated to the target side so that the mapping of pollers to source tablets can be updated as needed when a source tablet splits. +Tablet splitting generates WAL records, which are replicated to the target side. This ensures that the mapping of pollers to source tablets is automatically updated as needed when a source tablet splits. ### Single-shard transactions -These are straightforward: when one of these transaction commits, a single Raft log entry is produced containing all of that transaction's writes and its commit time. This entry in turn is used to generate part of a batch of changes when the poller requests changes. +When a single-shard transaction commits, a single WAL record is generated that includes all the writes and the commit time for that transaction. This WAL record is then included in a batch of changes when the poller requests updates. Single-shard transactions only modify a single tablet. -Upon receiving the changes, the poller examines each write to see what key it writes to in order to determine which target tablet covers that part of the table. The poller then forwards the writes to the appropriate tablets. The commit times of the writes are preserved and the writes are marked as _external_, which prevents them from being further replicated by xCluster, whether onward to an additional cluster or back to the cluster they came from in bidirectional cases. +Upon receiving the changes, the poller examines each write to determine the key it writes to and identifies the corresponding target tablet. The poller then forwards the writes to the appropriate tablets. The commit times of the writes are preserved, and the writes are marked as _external_. This marking prevents them from being further replicated by xCluster, whether onward to another cluster or back to the original cluster in bidirectional replication scenarios. ### Distributed transactions -These are more complicated because they involve multiple Raft records and the transaction status table. Simplifying somewhat, each time one of these transactions makes a provisional write, a Raft entry is made on the appropriate tablet and after the transaction commits, a Raft entry is made on all the involved tablets to _apply the transaction_. Applying a transaction here means converting its writes from provisional writes to regular writes. +Distributed transactions involve multiple WAL records and the transaction status tablet. Writes generate provisional records (intents) and corresponding WAL records linked to a transaction on the involved user tablets. The state of the transaction is tracked by one transaction status tablet. The transaction is committed by updating the transaction state in the transaction status tablet, which produces a WAL record. After the commit is made durable, all involved tablets are asynchronously informed to apply the transaction. This process converts provisional writes into regular writes and generates a further WAL record. The provisional records are made available for reads immediately after the commit, even if the apply has not occurred yet. -Provisional writes are handled similarly to the normal writes in the single-shard transaction case but are written as provisional records instead of normal writes. A special inert format is used that differs from the usual provisional records format. This both saves space as the original locking information, which is not needed on the target side, is omitted and prevents the provisional records from interacting with the target read or locking pathways. This ensures the transaction will not affect transactions on the target side yet. +On the target universe, xCluster generates a special inert format for provisional records. This format omits the original row locking information and an additional index on the key in the intents DB, as these are unnecessary on the target side. -The apply Raft entries also generate changes received by the pollers. When a poller receives an apply entry, it sends instructions to all the target tablets it handles to apply the given transaction. Transaction application on the target tablets is similar to that on the source universe but differs among other things due to the different provisional record format. It converts the provisional writes into regular writes, again at the same commit time as on the source universe and with them being marked as external. At this point the writes of the transaction to this tablet become visible to reads. +When a poller receives an apply WAL record, it distributes it to all the target tablets it manages. The transaction application on the target tablets mirrors that of the source universe. It converts the provisional writes into regular writes, maintaining the same commit time as on the source universe and marking them as external. At this stage, the transaction's writes to this tablet become visible for reads. -Because pollers operate independently and the writes/applies to multiple tablets are not done as a set atomically, writes from a single transaction — even a single-shard one — to multiple tablets can become visible at different times. +Because pollers operate independently and the writes to multiple tablets are not applied atomically, writes from a single transaction affecting multiple tablets can become visible at different times. -When a source transaction commits, it is applied to the relevant tablets lazily. This means that even though transaction _X_ commits before transaction _Y_, _X_'s application Raft entry may occur after _Y_'s application Raft entry on some tablets. If this happens, the writes from _X_ can become visible in the target universe after _Y_'s. This is why non-transactional–mode reads are only eventually consistent and not timeline consistent. +When a source transaction commits, it is applied to the relevant tablets lazily. This means that even though transaction _X_ commits before transaction _Y_, _X_'s apply WAL record may occur after _Y_'s apply WAL record on some tablets. If this happens, the writes from _X_ can become visible in the target universe after _Y_'s. This is why non-transactional mode reads are only eventually consistent and not timeline consistent. ### Transactional mode -xCluster safe time is computed for each database by the target-universe master leader as the minimum _xCluster application time_ any tablet in that database has reached. Pollers determine this time using information from the source tablet servers of the form "once you have fully applied all the changes before this one, your xCluster application time for this tablet will be _T_". +Transactional mode addresses these issues by selecting an appropriate xCluster safe time. -A source tablet server sends such information when it determines that no active transaction involving that tablet can commit before _T_ and that all transactions involving that tablet that committed before _T_ have application Raft entries that have been previously sent as changes. It also periodically (currently 250 ms) checks for committed transactions that are missing apply Raft entries and generates such entries for them; this helps xCluster safe time advance faster. +The xCluster safe time for each database on the target universe is calculated as the minimum _xCluster apply safe time_ reached by any tablet in that database. Pollers use information from the source tablet leaders to determine their _xCluster apply safe time_. This time ensures that all transactions committed before it have been applied on the target tablets. -## Schema differences +A source tablet leader determines the _xCluster apply safe time_ that the target poller can advance to based on the state of the apply operations of committed transactions. It periodically (every 250 ms) checks the state of in-flight transactions and generates apply WAL records for committed transactions. This ensures that the _xCluster apply safe time_ can keep advancing even when there are long-running transactions in the system. -xCluster replication does not support replicating between two copies of a table with different schemas. For example, you cannot replicate a table to a version of that table missing a column or with a column having a different type. +{{}} +To learn more, watch [Transactional xCluster](https://youtu.be/lI6gw7ncBs8?si=gAioZ_NgOyl2dsM5) +{{}} -More subtly, this restriction extends to hidden schema metadata like the assignment of column IDs to columns. Just because two tables show the same schema in YSQL does not mean their schemas are actually identical. Because of this, in practice the target table schema needs to be copied from that of the source table; see [replication bootstrapping](#replication-bootstrapping) for how this is done. - -Because of this restriction, xCluster does not need to do a deep translation of row contents (for example, dropping columns or translating column IDs inside of keys and values) as rows are replicated between universes. Avoiding deep translation simplifies the code and reduces the cost of replication. +## Schema differences -### Supporting schema changes +xCluster replication requires that the source and target tables have identical schemas. This means that you cannot replicate data between tables if there are differences in their schemas, such as missing columns or columns with different data types. Ensuring schema consistency is crucial for the replication process to function correctly. -Currently, this is a manual process where the exact same schema change must be manually made on first one side then the other. Replication of the given table automatically pauses while schema differences are detected and resumes after the schemas are the same again. +Additionally, this restriction includes hidden schema metadata, such as the assignment of column IDs. Even if two tables appear to have the same schema in YSQL, their schemas might not be identical. Therefore, in practice, the target table schema should be copied from the source table schema. For more details, refer to [replication bootstrapping](#replication-bootstrapping). -Ongoing work, [#11537](https://github.com/yugabyte/yugabyte-db/issues/11537), will make this automatic: schema changes made on the source universe will automatically be replicated to the target universe and made, allowing replication to continue running without operator intervention. +Because of this restriction, xCluster does not need to perform deep translations of row contents (such as dropping columns or translating column IDs within keys and values) when replicating rows between universes. This avoidance of deep translation reduces the replication cost and improves throughput. +Schema changes must be manually applied first to the source universe and then to the target universe. During this process, replication for the affected table is automatically paused when schema differences are detected and resumes once the schemas are identical. ## Replication bootstrapping -xCluster replication copies changes made on the source universe to the target universe. This is fine if the source universe starts empty but what if we want to start replicating a universe that already contains data? +xCluster replicates the source WAL records to the target universe. WAL is garbage collected over time to conserve disk space. When setting up a new replication flow, the source universe may have already deleted some of the WAL records needed for an empty target universe to catch up. This is especially likely if the source universe has been running for a while and has accumulated a lot of WAL. -In that case, we need to bootstrap the replication process by first copying the source universe to the target universe. +In this case, you need to bootstrap the target universe. -Today, this is done by backing up the source universe and restoring it to the target universe. In addition to copying all the data, this copies the table schemas so they are identical on both sides. Before the backup is done, the current Raft log IDs are saved so the replication can be started after the restore from a time before the backup was done. This ensures any data written to the source universe during the backup is replicated. +This process involves checkpointing the source universe to ensure that any new WAL records are preserved for xCluster. Following this, a [distributed backup](../../../manage/backup-restore/snapshot-ysql/#move-a-snapshot-to-external-storage) is performed and restored to the target universe. This not only copies all the data but also ensures that the table schemas are identical on both sides. -Ongoing work, [#17862](https://github.com/yugabyte/yugabyte-db/issues/17862), will replace using backup and restore here with directly copying RocksDB files between the source and target universes. This will be more performant and flexible and remove the need for external storage like S3 to set up replication. - -## Supported deployment scenarios +## Deployment scenarios xCluster currently supports active-active single-master and active-active multi-master deployments. ### Active-active single-master -In this setup the replication is unidirectional from a source universe to a target universe. The target universe is typically located in data centers or regions that are different from the source universe. The source universe can serve both reads and writes. The target universe can only serve reads. Since only the nodes in one universe can take writes this mode is referred to as single master. Note that within the source universe all nodes can serve writes. +In this setup, replication is unidirectional from a source universe to a target universe, typically located in different data centers or regions. The source universe can handle both reads and writes, while the target universe is read-only. Because only the source universe can accept writes, this mode is referred to as single-master. Note that in the source universe, all nodes can serve writes. + +These deployments are typically used for serving low-latency reads from the target universes and for disaster recovery purposes. When the primary purpose is disaster recovery, these deployments are referred to as active-standby, as the target universe is on standby to take over if the source universe fails. -Usually, such deployments are used for serving low-latency reads from the target universes, as well as for disaster recovery purposes. When used primarily for disaster recovery purposes, these deployments are also called active-standby because the target universe stands by to take over if the source universe is lost. +Transactional mode is generally preferred for this deployment because it ensures consistency even if the source universe is lost. However, non-transactional mode can also be used depending on the specific requirements and trade-offs. -Either transactional or non-transactional mode can be used here, but transactional mode is usually preferred because it provides consistency if the source universe is lost. +{{}} +To learn more, watch [Disaster Recovery in YugabyteDB](https://youtu.be/6rmrcVQqb0o?si=4CuiByQGLaNzhdn_) +{{}} The following diagram shows an example of this deployment: @@ -177,113 +202,98 @@ The following diagram shows an example of this deployment: ### Active-active multi-master -The replication of data can be bidirectional between two universes, in which case both universes can perform reads and writes. Writes to any universe are asynchronously replicated to the other universe with a timestamp for the update. If the same key is updated in both universes at similar times, this results in the write with the larger timestamp becoming the latest write. In this case, both the universes serve writes, hence this deployment mode is called multi-master. +In a multi-master deployment, data replication is bidirectional between two universes, allowing both universes to perform reads and writes. Writes to any universe are asynchronously replicated to the other universe with a timestamp for the update. This mode implements last-writer-wins, where if the same key is updated in both universes around the same time, the write with the larger timestamp overrides the other one. This deployment mode is called multi-master because both universes serve writes. -The multi-master deployment is built using bidirectional replication which has two unidirectional replication streams using non-transactional mode. Special care is taken to ensure that the timestamps are assigned to guarantee last-writer-wins semantics and the data arriving from the replication stream is not re-replicated. +The multi-master deployment uses bidirectional replication, which involves two unidirectional replication streams operating in non-transactional mode. Special measures are taken to assign timestamps that ensure last-writer-wins semantics, and data received from the replication stream is not re-replicated. -The following diagram shows an example of this deployment: +The following diagram illustrates this deployment: ![example of active-active deployment](/images/architecture/replication/active-active-deployment-new.png) -## Not supported deployment scenarios - -A number of deployment scenarios are not yet supported in YugabyteDB. +### Unsupported deployment scenarios -### Broadcast +The following deployment scenarios are not yet supported: -This topology involves one source universe sending data to many target universes. See [#11535](https://github.com/yugabyte/yugabyte-db/issues/11535) for details. +- _Broadcast_: This topology involves one source universe sending data to many target universes, for example: `A -> B, A -> C`. See [#11535](https://github.com/yugabyte/yugabyte-db/issues/11535) for details. -### Consolidation +- _Consolidation_: This topology involves many source universes sending data to one central target universe, for example: `B -> A, C -> A`. See [#11535](https://github.com/yugabyte/yugabyte-db/issues/11535) for details. -This topology involves many source universes sending data to one central target universe. See [#11535](https://github.com/yugabyte/yugabyte-db/issues/11535) for details. +- _Daisy chaining_: This involves connecting a series of universes, for example: `A -> B -> C` -### More complex topologies +- _Star_: This involves connecting all universes to each other, for example: `A <-> B <-> C <-> A` -Outside of the traditional 1:1 topology and the previously described 1:N and N:1 topologies, there are many other desired configurations that are not currently supported, such as the following: - -- Daisy chaining, which involves connecting a series of universes as both source and target, for example: `A <-> B <-> C` -- Ring, which involves connecting a series of universes in a loop, for example: `A <-> B <-> C <-> A` +## Limitations -Some of these topologies might become naturally available as soon as the [Broadcast](#broadcast) and [Consolidation](#consolidation) use cases are resolved, thus allowing a universe to simultaneously be both a source and a target to several other universes. For details, see [#11535](https://github.com/yugabyte/yugabyte-db/issues/11535). +The following limitations apply to all xCluster modes and deployment scenarios: +- Materialized views -## Limitations + [Materialized views](../../../explore/ysql-language-features/advanced-features/views/#materialized-views) are not replicated by xCluster. When setting up replication for a database, materialized views need to be excluded. You can create them on the target universe after the replication is set up. When refreshing, make sure to refresh on both sides. -There are a number of limitations in the current xCluster implementation besides what deployments are possible. +- Backups -### Database triggers do not fire for replicated data + Backups are supported on both universes. However, for backups on target clusters, if there is an active workload, the consistency of the latest data is not guaranteed. This applies even to transactional modes. Therefore, it is recommended to take backups on the source universe only. -Because xCluster replication bypasses the query layer, any database triggers are not fired on the target side for replicated records, which can result in unexpected behavior. +- Change Data Capture -### Constraints cannot be enforced in active-active multi-master + CDC [gRPC protocol](../change-data-capture) and [PostgreSQL protocol](../cdc-logical-replication) are not supported on the target universe. It is recommended to set up CDC on the source universe only. -Similarly, there is no way to check for violations of unique constraints in active-active multiple-master setups. It is possible, for example, to have two conflicting writes in separate universes that together would violate a unique constraint and cause the main table to contain both rows, yet the index to contain only one row, resulting in an inconsistent state. +- Modifications of Types -Because of this applications using active-active multi-master should avoid `UNIQUE` indexes and constraints as well as serial columns in primary keys: Because both universes generate the same sequence numbers, this can result in conflicting rows. It is recommended to use UUIDs instead. + When xCluster is active, composite user types, array types whose base types are row types, domains, and other non-primitive types should not be created, altered, or dropped. Create these types before xCluster is set up. If you need to modify these types, you must first drop xCluster replication, make the necessary changes, and then re-enable xCluster via bootstrap. [#24078](https://github.com/yugabyte/yugabyte-db/issues/24078), [#24079](https://github.com/yugabyte/yugabyte-db/issues/24079) -In the future, it may be possible to detect such unsafe constraints and issue a warning, potentially by default. This is tracked in [#11539](https://github.com/yugabyte/yugabyte-db/issues/11539). +Limitations specific to each scenario and mode are listed below: -Note that if you attempt to insert the same row on both universes at the same time to a table that does not have a primary key then you will end up with two rows with the same data. This is the expected PostgreSQL behavior — tables without primary keys can have multiple rows with the same data. +### Non-transactional -### Materialized views are not supported +- Consistency issues -Setting up xCluster replication for [materialized views](../../../explore/ysql-language-features/advanced-features/views/#materialized-views) is currently not supported. When setting up replication for a database, materialized views need to be excluded. YugabyteDB Anywhere automatically excludes materialized views from replication setup. + Refer to [Inconsistencies affecting transactions](#inconsistencies-affecting-transactions) for details on how non-transactional mode can lead to inconsistencies. -### Non-transactional–mode consistency issues +- Enum types -When interacting with data replicated from another universe using non-transactional mode: + When xCluster is active, user-defined ENUM types should not be created, altered, or dropped. Create these types before xCluster is set up. If you need to modify these types, you must first drop xCluster replication, make the necessary changes, and then re-enable xCluster via [bootstrap](#replication-bootstrapping). -- Reads are only eventually consistent -- Last writer wins for writes -- Transactions are limited to isolation level SQL-92 READ COMMITTED +#### Multi-master asynchronous replication -After losing one universe, the other universe may be left with torn transactions. +- Triggers -### Transactional-mode limitations - -Transactional mode has the following limitations: + Because xCluster replication operates at the DocDB layer, it bypasses the query layer. So, only the database triggers on the source universe are fired, and the ones on the target side are not fired. It is recommended to avoid using the same triggers on both universes to avoid any confusion. -- No writes are allowed in the target universe -- Active-active multi-master is not supported -- YCQL is not yet supported +- Indexes and Constraints -When the source universe is lost, an explicit decision must be made to switch over to the standby universe and point-in-time recovery must run; this is expected to increase recovery time by a minute or so. + In active-active multi-master setups, unique constraints cannot be guaranteed. When conflicting writes to the same key occur from separate universes simultaneously, they can violate unique constraints or result in inconsistent indexes. For example, two conflicting writes might result in both rows being present in the main table, but only one row in the index. -### Bootstrapping replication + Note that if you attempt to insert the same row on both universes at the same time to a table that does not have a primary key, you will end up with two rows with the same data. This is the expected PostgreSQL behavior — tables without primary keys can have multiple rows with the same data. -- Currently, it is your responsibility to ensure that a target universe has sufficiently recent updates so that replication can safely resume (for instructions, refer to [Bootstrap a target universe](../../../deploy/multi-dc/async-replication/async-deployment/#bootstrap-a-target-universe)). In the future, bootstrapping the target universe will be automated, which is tracked in [#11538](https://github.com/yugabyte/yugabyte-db/issues/11538). -- Bootstrap currently relies on the underlying backup and restore (BAR) mechanism of YugabyteDB. This means it also inherits all of the limitations of BAR. For YSQL, currently the scope of BAR is at a database level, while the scope of replication is at table level. This implies that when you bootstrap a target universe, you automatically bring any tables from the source database to the target database, even the ones that you might not plan to actually configure replication on. This is tracked in [#11536](https://github.com/yugabyte/yugabyte-db/issues/11536). +- Sequences and Serial columns -### DDL changes + Sequence data is not replicated by xCluster. Serial columns use sequences internally. Avoid serial columns in primary keys, as both universes would generate the same sequence numbers, resulting in conflicting rows. It is recommended to use UUIDs instead. -- Currently, DDL changes are not automatically replicated. Applying commands such as `CREATE TABLE`, `ALTER TABLE`, and `CREATE INDEX` to the target universes is your responsibility. -- `DROP TABLE` is not supported in YCQL. You must first disable replication for this table. -- `TRUNCATE TABLE` is not supported. This is an underlying limitation, due to the level at which the two features operate. That is, replication is implemented on top of the Raft WAL files, while truncate is implemented on top of the RocksDB SST files. -- In the future, it will be possible to propagate DDL changes safely to other universes. This is tracked in [#11537](https://github.com/yugabyte/yugabyte-db/issues/11537). +### Transactional -### Kubernetes +Transactional mode has the following limitations: -- Technically, xCluster replication can be set up with Kubernetes-deployed universes. However, the source and target must be able to communicate by directly referencing the pods in the other universe. In practice, this either means that the two universes must be part of the same Kubernetes cluster or that two Kubernetes clusters must have DNS and routing properly set up amongst themselves. -- Being able to have two YugabyteDB universes, each in their own standalone Kubernetes cluster, communicating with each other via a load balancer, is not currently supported, as per [#2422](https://github.com/yugabyte/yugabyte-db/issues/2422). +- By default, no writes are allowed in the target universe. -### Backups + In v2024.2.3 and later, you can allow writes to the target on an exception basis, overriding the default read-only behavior by setting the following YSQL configuration parameter before executing a DML operation: -Backups are supported. However for backups on target clusters, if there is an active workload, consistency of the latest data is not guaranteed. + ```sql + SET yb_non_ddl_txn_for_sys_tables_allowed = true + ``` -## Cross-feature interactions + This is intended strictly for specialized use cases, such as enabling tools like Flyway to update maintenance tables (for example, schema version trackers) on the replica. -A number of interactions across features are supported. + {{< warning title="Important" >}} +Improper use can compromise replication consistency and lead to data divergence. Use this setting only when absolutely necessary and with a clear understanding of its implications. + {{< /warning >}} -### Supported +- YCQL is not yet supported. +- Schema changes are not automatically replicated. They must be manually applied to both source and target universes. Refer to [DDLs in semi-automatic mode](../../../deploy/multi-dc/async-replication/async-transactional-setup-dblevel/#making-ddl-changes) and [DDLs in manual mode](../../../deploy/multi-dc/async-replication/async-transactional-tables/) for more information. +- All DDL changes must be manually applied to both source and target universes. For more information, refer to [DDLs in semi-automatic mode](../../../deploy/multi-dc/async-replication/async-transactional-setup-dblevel/#making-ddl-changes) and [DDLs in manual mode](../../../deploy/multi-dc/async-replication/async-transactional-tables/). +- When xCluster is active, user-defined ENUM types should not be created, altered, or dropped. Consider setting up these types before xCluster is set up. If you need to modify these types, you must first drop xCluster replication, make the necessary changes, and then re-enable xCluster via [bootstrap](#replication-bootstrapping). -- TLS is supported for both client and internal RPC traffic. Universes can also be configured with different certificates. -- RPC compression is supported. Note that both universes must be on a version that supports compression before a compression algorithm is enabled. -- Encryption at rest is supported. Note that the universes can technically use different Key Management Service (KMS) configurations. However, for bootstrapping a target universe, the reliance is on the backup and restore flow. As such, a limitation from that is inherited, which requires that the universe being restored has at least access to the same KMS as the one in which the backup was taken. This means both the source and the target must have access to the same KMS configurations. -- YSQL colocation is supported. -- YSQL geo-partitioning is supported. Note that you must configure replication on all new partitions manually as DDL changes are not replicated automatically. -- Source and target universes can have different numbers of tablets. -- Tablet splitting is supported on both source and target universes. +### Kubernetes -{{< tip title="Explore" >}} -To better understand how xCluster replication works in practice, see [xCluster deployment](../../../deploy/multi-dc/async-replication/) and [Transactional xCluster deployment](../../../deploy/multi-dc/async-replication/async-replication-transactional/) in Launch and Manage. -{{< /tip >}} +- xCluster replication can be set up with Kubernetes-deployed universes. However, the source and target must be able to communicate by directly referencing the pods in the other universe. In practice, this either means that the two universes must be part of the same Kubernetes cluster or that two Kubernetes clusters must have DNS and routing properly set up amongst themselves. +- Having two YugabyteDB universes, each in their own standalone Kubernetes cluster, communicating with each other via a load balancer, is not currently supported. See [#2422](https://github.com/yugabyte/yugabyte-db/issues/2422) for details. diff --git a/docs/content/stable/architecture/docdb-replication/cdc-logical-replication.md b/docs/content/stable/architecture/docdb-replication/cdc-logical-replication.md index c7365d65c15f..1afbc93a03b2 100644 --- a/docs/content/stable/architecture/docdb-replication/cdc-logical-replication.md +++ b/docs/content/stable/architecture/docdb-replication/cdc-logical-replication.md @@ -68,7 +68,7 @@ Each tablet sends changes in transaction commit time order. Further, in a transa ![VWAL-walsender](/images/architecture/vwal_walsender_interaction.png) -VWAL collects changes across multiple tablets, assembles the transactions, assigns LSN to each change and transaction boundary (BEGIN, COMMIT) record, and sends the changes to the walsender in transaction commit time order. +VWAL collects changes across multiple tablets, assembles the transactions, assigns a Log Sequence Number ([LSN](../../../develop/change-data-capture/using-logical-replication/key-concepts/#lsn-type)) to each change and transaction boundary (BEGIN, COMMIT) record, and sends the changes to the walsender in transaction commit time order. **Step 3 - walsender to client** diff --git a/docs/content/stable/deploy/kubernetes/single-zone/oss/helm-chart.md b/docs/content/stable/deploy/kubernetes/single-zone/oss/helm-chart.md index c2872544a343..a37b1dead976 100644 --- a/docs/content/stable/deploy/kubernetes/single-zone/oss/helm-chart.md +++ b/docs/content/stable/deploy/kubernetes/single-zone/oss/helm-chart.md @@ -422,6 +422,15 @@ helm repo update helm upgrade yb-demo yugabytedb/yugabyte --version {{}} --wait -n yb-demo ``` +Then finalize the upgrade as follows: + +```sh +kubectl exec -it yb-master-0 -- /home/yugabyte/bin/yb-admin --master_addresses yb-master-0.yb-masters.default.svc.cluster.local:7100 promote_auto_flags +kubectl exec -it yb-master-0 -- /home/yugabyte/bin/yb-admin --master_addresses yb-master-0.yb-masters.default.svc.cluster.local:7100 upgrade_ysql +``` + +The `upgrade_ysql` command is only needed if YSQL is enabled. + ## Update the configuration of YugabyteDB pods You can update most settings in the helm chart by running a `helm upgrade` with the new values. By default, this performs a [rolling update](https://github.com/yugabyte/charts/blob/853d7ac744cf6d637b5877f4681940825beda8f6/stable/yugabyte/values.yaml#L60) of the pods. diff --git a/docs/content/stable/develop/_index.md b/docs/content/stable/develop/_index.md index 5084599f7ed6..d9fa6c765f23 100644 --- a/docs/content/stable/develop/_index.md +++ b/docs/content/stable/develop/_index.md @@ -43,8 +43,8 @@ To learn how to build applications on top of YugabyteDB, see [Learn app developm Use these best practices to build distributed applications on top of YugabyteDB; this includes a list of techniques that you can adopt to make your application perform its best. -{{}} -For more details, see [Best practices](./best-practices-ysql). +{{}} +For more details, see [Best practices](./best-practices-develop). {{}} ## Drivers and ORMs diff --git a/docs/content/stable/develop/best-practices-develop/_index.md b/docs/content/stable/develop/best-practices-develop/_index.md new file mode 100644 index 000000000000..08af7a5f0147 --- /dev/null +++ b/docs/content/stable/develop/best-practices-develop/_index.md @@ -0,0 +1,49 @@ +--- +title: Best practices for applications +headerTitle: Best practices +linkTitle: Best practices +description: Tips and tricks to build applications +headcontent: Tips and tricks to build applications for high performance and availability +menu: + stable: + identifier: best-practices-develop + parent: develop + weight: 570 +type: indexpage +--- + +## YSQL + +{{}} + + {{}} + + {{}} + + {{}} + +{{}} + +## YCQL + +{{}} + + {{}} + +{{}} diff --git a/docs/content/stable/develop/best-practices-develop/administration.md b/docs/content/stable/develop/best-practices-develop/administration.md new file mode 100644 index 000000000000..249986767c98 --- /dev/null +++ b/docs/content/stable/develop/best-practices-develop/administration.md @@ -0,0 +1,57 @@ +--- +title: Best practices for YSQL database administrators +headerTitle: Best practices for YSQL database administrators +linkTitle: YSQL database administrators +description: Tips and tricks to build YSQL applications +headcontent: Tips and tricks for administering YSQL databases +menu: + stable: + identifier: best-practices-ysql-administration + parent: best-practices-develop + weight: 30 +type: docs +--- + +Database administrators can fine-tune YugabyteDB deployments for better reliability, performance, and operational efficiency by following targeted best practices. This guide outlines key recommendations for configuring single-AZ environments, optimizing memory use, accelerating CI/CD tests, and safely managing concurrent DML and DDL operations. These tips are designed to help DBAs maintain stable, scalable YSQL clusters in real-world and test scenarios alike. + +## Single availability zone (AZ) deployments + +In single AZ deployments, you need to set the [yb-tserver](../../../reference/configuration/yb-tserver) flag `--durable_wal_write=true` to not lose data if the whole data center goes down (for example, power failure). + +## Allow for tablet replica overheads + +Although you can manually provision the amount of memory each TServer uses using flags ([--memory_limit_hard_bytes](../../../reference/configuration/yb-tserver/#memory-limit-hard-bytes) or [--default_memory_limit_to_ram_ratio](../../../reference/configuration/yb-tserver/#default-memory-limit-to-ram-ratio)), this can be tricky as you need to take into account how much memory the kernel needs, along with the PostgreSQL processes and any Master process that is going to be colocated with the TServer. + +Accordingly, you should use the [--use_memory_defaults_optimized_for_ysql](../../../reference/configuration/yb-tserver/#use-memory-defaults-optimized-for-ysql) flag, which gives good memory division settings for using YSQL, optimized for your node's size. + +If this flag is true, then the [memory division flag defaults](../../../reference/configuration/yb-tserver/#memory-division-flags) change to provide much more memory for PostgreSQL; furthermore, they optimize for the node size. + +Note that although the default setting is false, when creating a new universe using yugabyted or YugabyteDB Anywhere, the flag is set to true, unless you explicitly set it to false. + +## Settings for CI and CD integration tests + +You can set certain flags to increase performance using YugabyteDB in CI and CD automated test scenarios as follows: + +- Point the flags `--fs_data_dirs`, and `--fs_wal_dirs` to a RAMDisk directory to make DML, DDL, cluster creation, and cluster deletion faster, ensuring that data is not written to disk. +- Set the flag `--yb_num_shards_per_tserver=1`. Reducing the number of shards lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. +- Use colocated databases in YSQL. Colocation lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. +- Set the flag `--replication_factor=1` for test scenarios, as keeping the data three way replicated (default) is not necessary. Reducing that to 1 reduces space usage and increases performance. +- Use `TRUNCATE table1,table2,table3..tablen;` instead of CREATE TABLE, and DROP TABLE between test cases. + +## Concurrent DML during a DDL operation + +In YugabyteDB, DML is allowed to execute while a DDL statement modifies the schema that is accessed by the DML statement. For example, an `ALTER TABLE
.. ADD COLUMN` DDL statement may add a new column while a `SELECT * from
` executes concurrently on the same relation. In PostgreSQL, this is typically not allowed because such DDL statements take a table-level exclusive lock that prevents concurrent DML from executing. (Support for similar behavior in YugabyteDB is being tracked in issue {{}}.) + +In YugabyteDB, when a DDL modifies the schema of tables that are accessed by concurrent DML statements, the DML statement may do one of the following: + +- Operate with the old schema prior to the DDL. +- Operate with the new schema after the DDL completes. +- Encounter temporary errors such as `schema mismatch errors` or `catalog version mismatch`. It is recommended for the client to [retry such operations](https://www.yugabyte.com/blog/retry-mechanism-spring-boot-app/) whenever possible. + +Most DDL statements complete quickly, so this is typically not a significant issue in practice. However, [certain kinds of ALTER TABLE DDL statements](../../../api/ysql/the-sql-language/statements/ddl_alter_table/#alter-table-operations-that-involve-a-table-rewrite) involve making a full copy of the table(s) whose schema is being modified. For these operations, it is not recommended to run any concurrent DML statements on the table being modified by the `ALTER TABLE`, as the effect of such concurrent DML may not be reflected in the table copy. + +## Concurrent DDL during a DDL operation + +DDL statements that affect entities in different databases can be run concurrently. However, for DDL statements that impact the same database, it is recommended to execute them sequentially. + +DDL statements that relate to shared objects, such as roles or tablespaces, are considered as affecting all databases in the cluster, so they should also be run sequentially. diff --git a/docs/content/stable/develop/best-practices-ycql.md b/docs/content/stable/develop/best-practices-develop/best-practices-ycql.md similarity index 54% rename from docs/content/stable/develop/best-practices-ycql.md rename to docs/content/stable/develop/best-practices-develop/best-practices-ycql.md index f70cb4a3d805..a0f32b3c3923 100644 --- a/docs/content/stable/develop/best-practices-ycql.md +++ b/docs/content/stable/develop/best-practices-develop/best-practices-ycql.md @@ -1,32 +1,32 @@ --- title: Best practices for YCQL applications -headerTitle: Best practices -linkTitle: Best practices +headerTitle: Best practices for YCQL applications +linkTitle: YCQL applications description: Tips and tricks to build YCQL applications headcontent: Tips and tricks to build YCQL applications for high performance and availability menu: stable: identifier: best-practices-ycql - parent: develop - weight: 571 + parent: best-practices-develop + weight: 40 type: docs --- -{{}} +To build high-performance and scalable applications using YCQL, developers should follow key schema design and operational best practices tailored for YugabyteDB's distributed architecture. This guide covers strategies for using indexes efficiently, optimizing read/write paths with batching and prepared statements, managing JSON and collection data types, and ensuring memory settings align with your query layer. These practices help ensure reliable performance, especially under real-world workloads. ## Global secondary indexes -Indexes use multi-shard transactional capability of YugabyteDB and are global and strongly consistent (ACID). To add secondary indexes, you need to create tables with [transactions enabled](../../api/ycql/ddl_create_table/#table-properties-1). They can also be used as materialized views by using the [`INCLUDE` clause](../../api/ycql/ddl_create_index#included-columns). +Indexes use multi-shard transactional capability of YugabyteDB and are global and strongly consistent (ACID). To add secondary indexes, you need to create tables with [transactions enabled](../../../api/ycql/ddl_create_table/#table). They can also be used as materialized views by using the [INCLUDE clause](../../../api/ycql/ddl_create_index#included-columns). ## Unique indexes -YCQL supports [unique indexes](../../api/ycql/ddl_create_index#unique-index). A unique index disallows duplicate values from being inserted into the indexed columns. +YCQL supports [unique indexes](../../../api/ycql/ddl_create_index#unique-index). A unique index disallows duplicate values from being inserted into the indexed columns. ## Covering indexes When querying by a secondary index, the original table is consulted to get the columns that aren't specified in the index. This can result in multiple random reads across the main table. -Sometimes, a better way is to include the other columns that you're querying that are not part of the index using the [`INCLUDE` clause](../../api/ycql/ddl_create_index/#included-columns). When additional columns are included in the index, they can be used to respond to queries directly from the index without querying the table. +Sometimes, a better way is to include the other columns that you're querying that are not part of the index using the [INCLUDE clause](../../../api/ycql/ddl_create_index/#included-columns). When additional columns are included in the index, they can be used to respond to queries directly from the index without querying the table. This turns a (possible) random read from the main table to just a filter on the index. @@ -36,23 +36,23 @@ For operations like `UPDATE ... IF EXISTS` and `INSERT ... IF NOT EXISTS` that r ## JSONB -YugabyteDB supports the [`jsonb`](../../api/ycql/type_jsonb/) data type to model JSON data, which does not have a set schema and might change often. You can use JSONB to group less accessed columns of a table. YCQL also supports JSONB expression indexes that can be used to speed up data retrieval that would otherwise require scanning the JSON entries. +YugabyteDB supports the [JSONB](../../../api/ycql/type_jsonb/) data type to model JSON data, which does not have a set schema and might change often. You can use JSONB to group less accessed columns of a table. YCQL also supports JSONB expression indexes that can be used to speed up data retrieval that would otherwise require scanning the JSON entries. {{< note title="Use JSONB columns only when necessary" >}} -`jsonb` columns are slower to read and write compared to normal columns. They also take more space because they need to store keys in strings and make keeping data consistency more difficult. A good schema design is to keep most columns as regular columns or collections, and use `jsonb` only for truly dynamic values. Don't create a `data jsonb` column where you store everything; instead, use a `dynamic_data jsonb` column with the others being primitive columns. +JSONB columns are slower to read and write compared to normal columns. They also take more space because they need to store keys in strings and make keeping data consistency more difficult. A good schema design is to keep most columns as regular columns or collections, and use JSONB only for truly dynamic values. Don't create a `data jsonb` column where you store everything; instead, use a `dynamic_data jsonb` column with the others being primitive columns. {{< /note >}} ## Increment and decrement numeric types -In YugabyteDB, YCQL extends Apache Cassandra to add increment and decrement operators for integer data types. [Integers](../../api/ycql/type_int) can be set, inserted, incremented, and decremented while `COUNTER` can only be incremented or decremented. YugabyteDB implements CAS(compare-and-set) operations in one round trip, compared to four for Apache Cassandra. +In YugabyteDB, YCQL extends Apache Cassandra to add increment and decrement operators for integer data types. [Integers](../../../api/ycql/type_int) can be set, inserted, incremented, and decremented while `COUNTER` can only be incremented or decremented. YugabyteDB implements CAS(compare-and-set) operations in one round trip, compared to four for Apache Cassandra. ## Expire older records automatically with TTL -YCQL supports automatic expiration of data using the [TTL feature](../../api/ycql/ddl_create_table/#use-table-property-to-define-the-default-expiration-time-for-rows). You can set a retention policy for data at table/row/column level and the older data is automatically purged from the database. +YCQL supports automatic expiration of data using the [TTL feature](../../../api/ycql/ddl_create_table/#use-table-property-to-define-the-default-expiration-time-for-rows). You can set a retention policy for data at table/row/column level and the older data is automatically purged from the database. -If configuring TTL for a time series dataset or any dataset with a table-level TTL, it is recommended for CPU and space efficiency to expire older files directly by using TTL-specific configuration options. More details can be found in [Efficient data expiration for TTL](../learn/ttl-data-expiration-ycql/#efficient-data-expiration-for-ttl). +If configuring TTL for a time series dataset or any dataset with a table-level TTL, it is recommended for CPU and space efficiency to expire older files directly by using TTL-specific configuration options. More details can be found in [Efficient data expiration for TTL](../../learn/ttl-data-expiration-ycql/#efficient-data-expiration-for-ttl). {{}} TTL does not apply to transactional tables and so, its unsupported in that context. @@ -60,7 +60,7 @@ TTL does not apply to transactional tables and so, its unsupported in that conte ## Use YugabyteDB drivers -Use YugabyteDB-specific [client drivers](../../drivers-orms/) because they are cluster- and partition-aware, and support `jsonb` columns. +Use YugabyteDB-specific [client drivers](../../../drivers-orms/) because they are cluster- and partition-aware, and support `jsonb` columns. ## Leverage connection pooling in the YCQL client @@ -88,22 +88,22 @@ Collections are designed for storing small sets of values that are not expected ## Collections with many elements -Each element inside a collection ends up as a [separate key value](../../architecture/docdb/data-model#examples) in DocDB adding per-element overhead. +Each element inside a collection ends up as a [separate key value](../../../architecture/docdb/data-model#examples) in DocDB adding per-element overhead. -If your collections are immutable, or you update the whole collection in full, consider using the `JSONB` data type. An alternative would also be to use ProtoBuf or FlatBuffers and store the serialized data in a `BLOB` column. +If your collections are immutable, or you update the whole collection in full, consider using the JSONB data type. An alternative would also be to use ProtoBuf or FlatBuffers and store the serialized data in a BLOB column. ## Use partition_hash for large table scans -`partition_hash` function can be used for querying a subset of the data to get approximate row counts or to break down full-table operations into smaller sub-tasks that can be run in parallel. See [example usage](../../api/ycql/expr_fcall#partition-hash-function) along with a working Python script. +`partition_hash` function can be used for querying a subset of the data to get approximate row counts or to break down full-table operations into smaller sub-tasks that can be run in parallel. See [example usage](../../../api/ycql/expr_fcall#partition-hash-function) along with a working Python script. ## TRUNCATE tables instead of DELETE -[TRUNCATE](../../api/ycql/dml_truncate/) deletes the database files that store the table and is much faster than [DELETE](../../api/ycql/dml_delete/) which inserts a _delete marker_ for each row in transactions and they are removed from storage when a compaction runs. +[TRUNCATE](../../../api/ycql/dml_truncate/) deletes the database files that store the table and is much faster than [DELETE](../../../api/ycql/dml_delete/) which inserts a _delete marker_ for each row in transactions and they are removed from storage when a compaction runs. ## Memory and tablet limits -If you are not using YSQL, ensure the [use_memory_defaults_optimized_for_ysql](../../reference/configuration/yb-master/#use-memory-defaults-optimized-for-ysql) flag is set to false. This flag optimizes YugabyteDB's memory setup for YSQL, reserving a considerable amount of memory for PostgreSQL; if you are not using YSQL then that memory is wasted when it could be helping improve performance by allowing more data to be cached. +If you are not using YSQL, ensure the [use_memory_defaults_optimized_for_ysql](../../../reference/configuration/yb-master/#use-memory-defaults-optimized-for-ysql) flag is set to false. This flag optimizes YugabyteDB's memory setup for YSQL, reserving a considerable amount of memory for PostgreSQL; if you are not using YSQL then that memory is wasted when it could be helping improve performance by allowing more data to be cached. Note that although the default setting is false, when creating a new universe using yugabyted or YugabyteDB Anywhere, the flag is set to true, unless you explicitly set it to false. -See [Memory division flags](../../reference/configuration/yb-tserver/#memory-division-flags) for more information. +See [Memory division flags](../../../reference/configuration/yb-tserver/#memory-division-flags) for more information. diff --git a/docs/content/stable/develop/best-practices-develop/clients.md b/docs/content/stable/develop/best-practices-develop/clients.md new file mode 100644 index 000000000000..46b006e03968 --- /dev/null +++ b/docs/content/stable/develop/best-practices-develop/clients.md @@ -0,0 +1,50 @@ +--- +title: Best practices for YSQL clients +headerTitle: Best practices for YSQL clients +linkTitle: YSQL clients +description: Tips and tricks for administering YSQL clients +headcontent: Tips and tricks for administering YSQL clients +menu: + stable: + identifier: best-practices-ysql-clients + parent: best-practices-develop + weight: 20 +type: docs +--- + +Client-side configuration plays a critical role in the performance, scalability, and resilience of YSQL applications. This guide highlights essential best practices for managing connections, balancing load across nodes, and handling failovers efficiently using YugabyteDB's smart drivers and connection pooling. Whether you're deploying in a single region or across multiple data centers, these tips will help ensure your applications make the most of YugabyteDB's distributed architecture + +## Load balance and failover using smart drivers + +YugabyteDB [smart drivers](../../../drivers-orms/smart-drivers/) provide advanced cluster-aware load-balancing capabilities that enable your applications to send requests to multiple nodes in the cluster by connecting to one node. You can also set a fallback hierarchy by assigning priority to specific regions and ensuring that connections are made to the region with the highest priority, and then fall back to the region with the next priority in case the high-priority region fails. + +{{}} +For more information, see [Load balancing with smart drivers](https://www.yugabyte.com/blog/multi-region-database-deployment-best-practices/#load-balancing-with-smart-driver). +{{}} + +## Make sure the application uses new nodes + +When a cluster is expanded, newly added nodes do not automatically start to receive client traffic. Regardless of the language of the driver or whether you are using a smart driver, the application must either explicitly request new connections or, if it is using a pooling solution, it can configure the pooler to recycle connections periodically (for example, by setting maxLifetime and/or idleTimeout). + +## Scale your application with connection pools + +Set up different pools with different load balancing policies as needed for your application to scale by using popular pooling solutions such as HikariCP and Tomcat along with YugabyteDB [smart drivers](../../../drivers-orms/smart-drivers/). + +{{}} +For more information, see [Connection pooling](../../../drivers-orms/smart-drivers/#connection-pooling). +{{}} + +### Database migrations and connection pools + +In some cases, connection pools may trigger unexpected errors while running a sequence of database migrations or other DDL operations. + +Because YugabyteDB is distributed, it can take a while for the result of a DDL to fully propagate to all caches on all nodes in a cluster. As a result, after a DDL statement completes, the next DDL statement that runs right afterwards on a different PostgreSQL connection may, in rare cases, see errors such as `duplicate key value violates unique constraint "pg_attribute_relid_attnum_index"` (see issue {{}}). It is recommended to use a single connection while running a sequence of DDL operations, as is common with application migration scripts with tools such as Flyway or Active Record. + +## Use YSQL Connection Manager + +YugabyteDB includes a built-in connection pooler, YSQL Connection Manager {{}}, which provides the same connection pooling advantages as other external pooling solutions, but without many of their limitations. As the manager is bundled with the product, it is convenient to manage, monitor, and configure the server connections. + +For more information, refer to the following: + +- [YSQL Connection Manager](../../../explore/going-beyond-sql/connection-mgr-ysql/) +- [Built-in Connection Manager Turns Key PostgreSQL Weakness into a Strength](https://www.yugabyte.com/blog/connection-pooling-management/) diff --git a/docs/content/stable/develop/best-practices-ysql.md b/docs/content/stable/develop/best-practices-develop/data-modeling-perf.md similarity index 53% rename from docs/content/stable/develop/best-practices-ysql.md rename to docs/content/stable/develop/best-practices-develop/data-modeling-perf.md index ea389c65d8db..e726a5f124c2 100644 --- a/docs/content/stable/develop/best-practices-ysql.md +++ b/docs/content/stable/develop/best-practices-develop/data-modeling-perf.md @@ -1,25 +1,25 @@ --- -title: Best practices for YSQL applications -headerTitle: Best practices -linkTitle: Best practices -description: Tips and tricks to build YSQL applications -headcontent: Tips and tricks to build YSQL applications for high performance and availability +title: Best practices for Data Modeling and performance of YSQL applications +headerTitle: Best practices for Data Modeling and performance of YSQL applications +linkTitle: YSQL data modeling +description: Tips and tricks for building YSQL applications +headcontent: Tips and tricks for building YSQL applications menu: stable: - identifier: best-practices-ysql - parent: develop - weight: 570 + identifier: data-modeling-perf + parent: best-practices-develop + weight: 10 type: docs --- -{{}} +Designing efficient, high-performance YSQL applications requires thoughtful data modeling and an understanding of how YugabyteDB handles distributed workloads. This guide offers a collection of best practices, from leveraging colocation and indexing techniques to optimizing transactions and parallelizing queries, that can help you build scalable, globally distributed applications with low latency and high availability. Whether you're developing new applications or tuning existing ones, these tips will help you make the most of YSQL's capabilities ## Use application patterns -Running applications in multiple data centers with data split across them is not a trivial task. When designing global applications, choose a suitable design pattern for your application from a suite of battle-tested design paradigms, including [Global database](../build-global-apps/global-database), [Multi-master](../build-global-apps/active-active-multi-master), [Standby cluster](../build-global-apps/active-active-single-master), [Duplicate indexes](../build-global-apps/duplicate-indexes), [Follower reads](../build-global-apps/follower-reads), and more. You can also combine these patterns as per your needs. +Running applications in multiple data centers with data split across them is not a trivial task. When designing global applications, choose a suitable design pattern for your application from a suite of battle-tested design paradigms, including [Global database](../../build-global-apps/global-database), [Multi-master](../../build-global-apps/active-active-multi-master), [Standby cluster](../../build-global-apps/active-active-single-master), [Duplicate indexes](../../build-global-apps/duplicate-indexes), [Follower reads](../../build-global-apps/follower-reads), and more. You can also combine these patterns as per your needs. {{}} -For more details, see [Build global applications](../build-global-apps). +For more details, see [Build global applications](../../build-global-apps). {{}} ## Colocation @@ -27,14 +27,14 @@ For more details, see [Build global applications](../build-global-apps). Colocated tables optimize latency and performance for data access by reducing the need for additional trips across the network for small tables. Additionally, it reduces the overhead of creating a tablet for every relation (tables, indexes, and so on) and their storage per node. {{}} -For more details, see [Colocation](../../explore/colocation/). +For more details, see [Colocation](../../../explore/colocation/). {{}} ## Faster reads with covering indexes When a query uses an index to look up rows faster, the columns that are not present in the index are fetched from the original table. This results in additional round trips to the main table leading to increased latency. -Use [covering indexes](../../explore/ysql-language-features/indexes-constraints/covering-index-ysql/) to store all the required columns needed for your queries in the index. Indexing converts a standard Index-Scan to an [Index-Only-Scan](https://dev.to/yugabyte/boosts-secondary-index-queries-with-index-only-scan-5e7j). +Use [covering indexes](../../../explore/ysql-language-features/indexes-constraints/covering-index-ysql/) to store all the required columns needed for your queries in the index. Indexing converts a standard Index-Scan to an [Index-Only-Scan](https://dev.to/yugabyte/boosts-secondary-index-queries-with-index-only-scan-5e7j). {{}} For more details, see [Avoid trips to the table with covering indexes](https://www.yugabyte.com/blog/multi-region-database-deployment-best-practices/#avoid-trips-to-the-table-with-covering-indexes). @@ -45,7 +45,7 @@ For more details, see [Avoid trips to the table with covering indexes](https://w A partial index is an index that is built on a subset of a table and includes only rows that satisfy the condition specified in the WHERE clause. This speeds up any writes to the table and reduces the size of the index, thereby improving speed for read queries that use the index. {{}} -For more details, see [Partial indexes](../../explore/ysql-language-features/indexes-constraints/partial-index-ysql/). +For more details, see [Partial indexes](../../../explore/ysql-language-features/indexes-constraints/partial-index-ysql/). {{}} ## Distinct keys with unique indexes @@ -55,14 +55,14 @@ If you need values in some of the columns to be unique, you can specify your ind When a unique index is applied to two or more columns, the combined values in these columns can't be duplicated in multiple rows. Note that because a NULL value is treated as a distinct value, you can have multiple NULL values in a column with a unique index. {{}} -For more details, see [Unique indexes](../../explore/ysql-language-features/indexes-constraints/unique-index-ysql/). +For more details, see [Unique indexes](../../../explore/ysql-language-features/indexes-constraints/unique-index-ysql/). {{}} ## Faster sequences with server-level caching Sequences in databases automatically generate incrementing numbers, perfect for generating unique values like order numbers, user IDs, check numbers, and so on. They prevent multiple application instances from concurrently generating duplicate values. However, generating sequences on a database that is spread across regions could have a latency impact on your applications. -Enable [server-level caching](../../api/ysql/exprs/func_nextval/#caching-values-on-the-yb-tserver) to improve the speed of sequences, and also avoid discarding many sequence values when an application disconnects. +Enable [server-level caching](../../../api/ysql/exprs/func_nextval/#caching-values-on-the-yb-tserver) to improve the speed of sequences, and also avoid discarding many sequence values when an application disconnects. {{}} For a demo, see the YugabyteDB Friday Tech Talk on [Scaling sequences with server-level caching](https://www.youtube.com/watch?v=hs-CU3vjMQY&list=PL8Z3vt4qJTkLTIqB9eTLuqOdpzghX8H40&index=76). @@ -85,15 +85,15 @@ UPDATE txndemo SET v = v + 3 WHERE k=1 RETURNING v; ``` {{}} -For more details, see [Fast single-row transactions](../../develop/learn/transactions/transactions-performance-ysql/#fast-single-row-transactions). +For more details, see [Fast single-row transactions](../../../develop/learn/transactions/transactions-performance-ysql/#fast-single-row-transactions). {{}} ## Delete older data quickly with partitioning -Use [table partitioning](../../explore/ysql-language-features/advanced-features/partitions/) to split your data into multiple partitions according to date so that you can quickly delete older data by dropping the partition. +Use [table partitioning](../../../explore/ysql-language-features/advanced-features/partitions/) to split your data into multiple partitions according to date so that you can quickly delete older data by dropping the partition. {{}} -For more details, see [Partition data by time](../data-modeling/common-patterns/timeseries/partitioning-by-time/). +For more details, see [Partition data by time](../../data-modeling/common-patterns/timeseries/partitioning-by-time/). {{}} ## Use the right data types for partition keys @@ -163,47 +163,12 @@ SELECT * FROM products; ``` {{}} -For more information, see [Data manipulation](../../explore/ysql-language-features/data-manipulation). +For more information, see [Data manipulation](../../../explore/ysql-language-features/data-manipulation). {{}} -## Load balance and failover using smart drivers - -YugabyteDB [smart drivers](../../drivers-orms/smart-drivers/) provide advanced cluster-aware load-balancing capabilities that enables your applications to send requests to multiple nodes in the cluster just by connecting to one node. You can also set a fallback hierarchy by assigning priority to specific regions and ensuring that connections are made to the region with the highest priority, and then fall back to the region with the next priority in case the high-priority region fails. - -{{}} -For more information, see [Load balancing with smart drivers](https://www.yugabyte.com/blog/multi-region-database-deployment-best-practices/#load-balancing-with-smart-driver). -{{}} - -## Make sure the application uses new nodes - -When a cluster is expanded, newly added nodes do not automatically start to receive client traffic. Regardless of the language of the driver or whether you are using a smart driver, the application must either explicitly request new connections or, if it is using a pooling solution, it can configure the pooler to recycle connections periodically (for example, by setting maxLifetime and/or idleTimeout). - -## Scale your application with connection pools - -Set up different pools with different load balancing policies as needed for your application to scale by using popular pooling solutions such as HikariCP and Tomcat along with YugabyteDB [smart drivers](../../drivers-orms/smart-drivers/). - -{{}} -For more information, see [Connection pooling](../../drivers-orms/smart-drivers/#connection-pooling). -{{}} - -### Database migrations and connection pools - -In some cases, connection pools may trigger unexpected errors while running a sequence of database migrations or other DDL operations. - -Because YugabyteDB is distributed, it can take a while for the result of a DDL to fully propagate to all caches on all nodes in a cluster. As a result, after a DDL statement completes, the next DDL statement that runs right afterwards on a different PostgreSQL connection may, in rare cases, see errors such as `duplicate key value violates unique constraint "pg_attribute_relid_attnum_index"` (see issue {{}}). It is recommended to use a single connection while running a sequence of DDL operations, as is common with application migration scripts with tools such as Flyway or Active Record. - -## Use YSQL Connection Manager - -YugabyteDB includes a built-in connection pooler, YSQL Connection Manager {{}}, which provides the same connection pooling advantages as other external pooling solutions, but without many of their limitations. As the manager is bundled with the product, it is convenient to manage, monitor, and configure the server connections. - -For more information, refer to the following: - -- [YSQL Connection Manager](../../explore/going-beyond-sql/connection-mgr-ysql/) -- [Built-in Connection Manager Turns Key PostgreSQL Weakness into a Strength](https://www.yugabyte.com/blog/connection-pooling-management/) - ## Re-use query plans with prepared statements -Whenever possible, use [prepared statements](../../api/ysql/the-sql-language/statements/perf_prepare/) to ensure that YugabyteDB can re-use the same query plan and eliminate the need for a server to parse the query on each operation. +Whenever possible, use [prepared statements](../../../api/ysql/the-sql-language/statements/perf_prepare/) to ensure that YugabyteDB can re-use the same query plan and eliminate the need for a server to parse the query on each operation. {{}} @@ -228,12 +193,12 @@ For more details, see [Prepared statements in PL/pgSQL](https://dev.to/aws-heroe Use BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE for batch or long-running jobs, which need a consistent snapshot of the database without interfering, or being interfered with by other transactions. {{}} -For more details, see [Large scans and batch jobs](../../develop/learn/transactions/transactions-performance-ysql/#large-scans-and-batch-jobs). +For more details, see [Large scans and batch jobs](../../../develop/learn/transactions/transactions-performance-ysql/#large-scans-and-batch-jobs). {{}} ## JSONB datatype -Use the [JSONB](../../api/ysql/datatypes/type_json) datatype to model JSON data; that is, data that doesn't have a set schema but has a truly dynamic schema. +Use the [JSONB](../../../api/ysql/datatypes/type_json) datatype to model JSON data; that is, data that doesn't have a set schema but has a truly dynamic schema. JSONB in YSQL is the same as the [JSONB datatype in PostgreSQL](https://www.postgresql.org/docs/11/datatype-json.html). @@ -257,13 +222,9 @@ YSQL also supports JSONB expression indexes, which can be used to speed up data For large or batch SELECT or DELETE that have to scan all tablets, you can parallelize your operation by creating queries that affect only a specific part of the tablet using the `yb_hash_code` function. {{}} -For more details, see [Distributed parallel queries](../../api/ysql/exprs/func_yb_hash_code/#distributed-parallel-queries). +For more details, see [Distributed parallel queries](../../../api/ysql/exprs/func_yb_hash_code/#distributed-parallel-queries). {{}} -## Single availability zone (AZ) deployments - -In single AZ deployments, you need to set the [yb-tserver](../../reference/configuration/yb-tserver) flag `--durable_wal_write=true` to not lose data if the whole data center goes down (For example, power failure). - ## Row size limit Big columns add up when you select full or multiple rows. For consistent latency or performance, it is recommended keeping the size under 10MB or less, and a maximum of 32MB. @@ -274,43 +235,23 @@ For consistent latency or performance, it is recommended to size columns in the ## TRUNCATE tables instead of DELETE -[TRUNCATE](../../api/ysql/the-sql-language/statements/ddl_truncate/) deletes the database files that store the table data and is much faster than [DELETE](../../api/ysql/the-sql-language/statements/dml_delete/), which inserts a _delete marker_ for each row in transactions that are later removed from storage during compaction runs. +[TRUNCATE](../../../api/ysql/the-sql-language/statements/ddl_truncate/) deletes the database files that store the table data and is much faster than [DELETE](../../../api/ysql/the-sql-language/statements/dml_delete/), which inserts a _delete marker_ for each row in transactions that are later removed from storage during compaction runs. {{}} -Currently, TRUNCATE is not transactional. Also, similar to PostgreSQL, TRUNCATE is not MVCC-safe. For more details, see [TRUNCATE](../../api/ysql/the-sql-language/statements/ddl_truncate/). +Currently, TRUNCATE is not transactional. Also, similar to PostgreSQL, TRUNCATE is not MVCC-safe. For more details, see [TRUNCATE](../../../api/ysql/the-sql-language/statements/ddl_truncate/). {{}} ## Minimize the number of tablets you need Each table and index is split into tablets and each tablet has overhead. The more tablets you need, the bigger your universe will need to be. See [allowing for tablet replica overheads](#allowing-for-tablet-replica-overheads) for how the number of tablets affects how big your universe needs to be. -Each table and index consists of several tablets based on the [--ysql_num_shards_per_tserver](../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. +Each table and index consists of several tablets based on the [--ysql_num_shards_per_tserver](../../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. You can try one of the following methods to reduce the number of tablets: -- Use [colocation](../../explore/colocation/) to group small tables into 1 tablet. -- Reduce number of tablets-per-table using the [--ysql_num_shards_per_tserver](../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. -- Use the [SPLIT INTO](../../api/ysql/the-sql-language/statements/ddl_create_table/#split-into) clause when creating a table. -- Start with few tablets and use [automatic tablet splitting](../../architecture/docdb-sharding/tablet-splitting/). +- Use [colocation](../../../explore/colocation/) to group small tables into 1 tablet. +- Reduce number of tablets-per-table using the [--ysql_num_shards_per_tserver](../../../reference/configuration/yb-tserver/#yb-num-shards-per-tserver) flag. +- Use the [SPLIT INTO](../../../api/ysql/the-sql-language/statements/ddl_create_table/#split-into) clause when creating a table. +- Start with few tablets and use [automatic tablet splitting](../../../architecture/docdb-sharding/tablet-splitting/). Note that multiple tablets can allow work to proceed in parallel so you may not want every table to have only one tablet. - -## Allow for tablet replica overheads - -Although you can manually provision the amount of memory each TServer uses using flags ([--memory_limit_hard_bytes](../../reference/configuration/yb-tserver/#memory-limit-hard-bytes) or [--default_memory_limit_to_ram_ratio](../../reference/configuration/yb-tserver/#default-memory-limit-to-ram-ratio)), this can be tricky as you need to take into account how much memory the kernel needs, along with the PostgreSQL processes and any Master process that is going to be colocated with the TServer. - -Accordingly, you should use the [--use_memory_defaults_optimized_for_ysql](../../reference/configuration/yb-tserver/#use-memory-defaults-optimized-for-ysql) flag, which gives good memory division settings for using YSQL, optimized for your node's size. - -If this flag is true, then the [memory division flag defaults](../../reference/configuration/yb-tserver/#memory-division-flags) change to provide much more memory for PostgreSQL; furthermore, they optimize for the node size. - -Note that although the default setting is false, when creating a new universe using yugabyted or YugabyteDB Anywhere, the flag is set to true, unless you explicitly set it to false. - -## Settings for CI and CD integration tests - -You can set certain flags to increase performance using YugabyteDB in CI and CD automated test scenarios as follows: - -- Point the flags `--fs_data_dirs`, and `--fs_wal_dirs` to a RAMDisk directory to make DML, DDL, cluster creation, and cluster deletion faster, ensuring that data is not written to disk. -- Set the flag `--yb_num_shards_per_tserver=1`. Reducing the number of shards lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. -- Use colocated databases in YSQL. Colocation lowers overhead when creating or dropping YSQL tables, and writing or reading small amounts of data. -- Set the flag `--replication_factor=1` for test scenarios, as keeping the data three way replicated (default) is not necessary. Reducing that to 1 reduces space usage and increases performance. -- Use `TRUNCATE table1,table2,table3..tablen;` instead of CREATE TABLE, and DROP TABLE between test cases. diff --git a/docs/content/stable/develop/change-data-capture/using-logical-replication/_index.md b/docs/content/stable/develop/change-data-capture/using-logical-replication/_index.md index 4a2312bcb71c..f61390e3aaf7 100644 --- a/docs/content/stable/develop/change-data-capture/using-logical-replication/_index.md +++ b/docs/content/stable/develop/change-data-capture/using-logical-replication/_index.md @@ -81,7 +81,7 @@ For reference documentation, see [YugabyteDB Connector](./yugabytedb-connector/) ## Limitations -- LSN Comparisons Across Slots. +- Log Sequence Number ([LSN](../using-logical-replication/key-concepts/#lsn-type)) Comparisons Across Slots. In the case of YugabyteDB, the LSN  does not represent the byte offset of a WAL record. Hence, arithmetic on LSN and any other usages of the LSN making this assumption will not work. Also, currently, comparison of LSN values from messages coming from different replication slots is not supported. diff --git a/docs/content/stable/develop/change-data-capture/using-logical-replication/key-concepts.md b/docs/content/stable/develop/change-data-capture/using-logical-replication/key-concepts.md index be355538b081..00ec994d4f7c 100644 --- a/docs/content/stable/develop/change-data-capture/using-logical-replication/key-concepts.md +++ b/docs/content/stable/develop/change-data-capture/using-logical-replication/key-concepts.md @@ -20,7 +20,7 @@ The YugabyteDB logical replication feature uses [PostgreSQL Logical Replication] A replication slot represents a stream of changes that can be replayed to a client in the order they were made on the origin server. Each slot streams a sequence of changes from a single database. -In logical replication, the fundamental unit of data transmission is a transaction. A logical slot emits each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so if a replication process is interrupted and restarts, even if the checkpoint or the starting LSN falls in the middle of a transaction, **the entire transaction is retransmitted**. This behavior guarantees that clients receive complete transactions without missing any intermediate changes, maintaining data integrity across the replication stream​. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or (when using the replication protocol) request that decoding start from that LSN rather than letting the server determine the start point. +In logical replication, the fundamental unit of data transmission is a transaction. A logical slot emits each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so if a replication process is interrupted and restarts, even if the checkpoint or the starting Log Sequence Number ([LSN](#lsn)) falls in the middle of a transaction, **the entire transaction is retransmitted**. This behavior guarantees that clients receive complete transactions without missing any intermediate changes, maintaining data integrity across the replication stream​. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or (when using the replication protocol) request that decoding start from that LSN rather than letting the server determine the start point. For more information, refer to [Replication slots](https://www.postgresql.org/docs/11/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS) in the PostgreSQL documentation. diff --git a/docs/content/stable/develop/change-data-capture/using-logical-replication/monitor.md b/docs/content/stable/develop/change-data-capture/using-logical-replication/monitor.md index 3ae7c7cd31a8..2a7d7f04d28f 100644 --- a/docs/content/stable/develop/change-data-capture/using-logical-replication/monitor.md +++ b/docs/content/stable/develop/change-data-capture/using-logical-replication/monitor.md @@ -64,7 +64,7 @@ Provides a list of all replication slots that currently exist on the database cl | active_pid | integer | The process ID of the session using this slot if the slot is currently actively being used. `NULL` if no replication process is ongoing. | | xmin | xid | The oldest transaction that this slot needs the database to retain. | | catalog_xmin | xid | Not applicable for YSQL. Always set to xmin. | -| restart_lsn | pg_lsn | The LSN of the oldest change record which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints. | +| restart_lsn | pg_lsn | The Log Sequence Number ([LSN](../key-concepts/#lsn-type)) of the oldest change record which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints. | | confirmed_flush_lsn | pg_lsn | The LSN up to which the logical slot's consumer has confirmed receiving data. Data older than this is not available anymore. Transactions with commit LSN lower than the `confirmed_flush_lsn` are not available anymore. | | yb_stream_id | text | UUID of the CDC stream | | yb_restart_commit_ht | int8 | A uint64 representation of the commit Hybrid Time corresponding to the `restart_lsn`. This can be used by the client (like YugabyteDB connector) to perform a consistent snapshot (as of the `consistent_point`) in the case when a replication slot already exists. | diff --git a/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md b/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md index b02e31f8b12e..dc891b946ed3 100644 --- a/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md +++ b/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector-properties.md @@ -480,7 +480,7 @@ No default. Specifies the criteria for performing a snapshot when the connector starts: * `initial` - The connector performs a snapshot only when no offsets have been recorded for the logical server name. -* `never` - The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the YugabyteDB logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL. +* `never` - The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored Log Sequence Number ([LSN](../key-concepts/#lsn-type)) in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the YugabyteDB logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL. * `initial_only` - The connector performs an initial snapshot and then stops, without processing any subsequent changes. Default: `initial` diff --git a/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md b/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md index f5737f909a5d..4fd1c9682525 100644 --- a/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md +++ b/docs/content/stable/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md @@ -35,7 +35,7 @@ The connector produces a change event for every row-level insert, update, and de YugabyteDB normally purges write-ahead log (WAL) segments after some period of time. This means that the connector does not have the complete history of all changes that have been made to the database. Therefore, when the YugabyteDB connector first connects to a particular YugabyteDB database, it starts by performing a consistent snapshot of each of the configured tables. After the connector completes the snapshot, it continues streaming changes from the exact point at which the snapshot was made. This way, the connector starts with a consistent view of all of the data, and does not omit any changes that were made while the snapshot was being taken. -The connector is tolerant of failures. As the connector reads changes and produces events, it records the LSN for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. +The connector is tolerant of failures. As the connector reads changes and produces events, it records the Log Sequence Number ([LSN](../key-concepts/#lsn-type)) for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. {{< tip title="Use UTF-8 encoding" >}} diff --git a/docs/content/stable/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md b/docs/content/stable/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md index d7fedaeb13d1..bf1f60a06f1f 100644 --- a/docs/content/stable/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md +++ b/docs/content/stable/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md @@ -115,7 +115,7 @@ The YugabyteDB gRPC Connector typically spends the vast majority of its time str The connector keeps polling for changes and whenever there is a change, the connector processes them, converts them to a specific format (Protobuf or JSON in the case of the Debezium plugin) and writes them on an output stream, which can then be consumed by clients. -The connector acts as a YugabyteDB client. When the connector receives changes it transforms the events into Debezium create, update, or delete events that include the LSN of the event. The connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic. +The connector acts as a YugabyteDB client. When the connector receives changes it transforms the events into Debezium create, update, or delete events that include the Log Sequence Number ([LSN](../../using-logical-replication/key-concepts/#lsn-type)) of the event. The connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic. Periodically, Kafka Connect records the most recent offset in another Kafka topic. The offset indicates source-specific position information that Debezium includes with each event. diff --git a/docs/content/stable/develop/postgresql-compatibility.md b/docs/content/stable/develop/postgresql-compatibility.md index 958b5cf134bc..6b0117512540 100644 --- a/docs/content/stable/develop/postgresql-compatibility.md +++ b/docs/content/stable/develop/postgresql-compatibility.md @@ -14,7 +14,7 @@ rightNav: YugabyteDB is a [PostgreSQL-compatible](https://www.yugabyte.com/tech/postgres-compatibility/) distributed database that supports the majority of PostgreSQL syntax. YugabyteDB is methodically expanding its features to deliver PostgreSQL-compatible performance that can substantially improve your application's efficiency. -To test and take advantage of features developed for enhanced PostgreSQL compatibility in YugabyteDB that are currently in {{}}, you can enable Enhanced PostgreSQL Compatibility Mode (EPCM). When this mode is turned on, YugabyteDB is configured to use all the latest features developed for feature and performance parity. EPCM is available in [v2024.1](/preview/releases/ybdb-releases/v2024.1/) and later. Here are the features that are part of the EPCM mode. +To test and take advantage of features developed for enhanced PostgreSQL compatibility in YugabyteDB that are currently in {{}}, you can enable Enhanced PostgreSQL Compatibility Mode (EPCM). When this mode is turned on, YugabyteDB is configured to use all the latest features developed for feature and performance parity. EPCM is available in [v2024.1](/preview/releases/ybdb-releases/v2024.1/) and later. The following features are part of EPCM. | Feature | Flag/Configuration Parameter | EA | GA | | :--- | :--- | :--- | :--- | @@ -25,10 +25,7 @@ To test and take advantage of features developed for enhanced PostgreSQL compati | [Ascending indexing by default](#default-ascending-indexing) | [yb_use_hash_splitting_by_default](../../reference/configuration/yb-tserver/#yb-use-hash-splitting-by-default) | {{}} | | | [YugabyteDB bitmap scan](#yugabytedb-bitmap-scan) | [yb_enable_bitmapscan](../../reference/configuration/yb-tserver/#yb-enable-bitmapscan) | {{}} | {{}} | | [Efficient communication
between PostgreSQL and DocDB](#efficient-communication-between-postgresql-and-docdb) | [pg_client_use_shared_memory](../../reference/configuration/yb-tserver/#pg-client-use-shared-memory) | {{}} | {{}} | - -| Planned Feature | Flag/Configuration Parameter | EA | -| :--- | :--- | :--- | -| [Parallel query](#parallel-query) | | Planned | +| [Parallel query](#parallel-query) | [yb_enable_parallel_append](../../reference/configuration/yb-tserver/#yb-enable-parallel-append) | {{}} | v2025.1 | ## Feature availability @@ -107,7 +104,7 @@ Default ascending indexing provides feature compatibility and is the default in Configuration parameter: `yb_enable_bitmapscan=true` -Bitmap scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Bitmap scans can improve the performance of queries containing AND and OR conditions across several index scans. YugabyteDB bitmap scan provides feature compatibility and improved performance parity. For YugabyteDB relations to use a bitmap scan, the PostgreSQL parameter `enable_bitmapscan` must also be true (the default). +Bitmap scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Bitmap scans can improve the performance of queries containing `AND` and `OR` conditions across several index scans. YugabyteDB bitmap scan provides feature compatibility and improved performance parity. For YugabyteDB relations to use a bitmap scan, the PostgreSQL parameter `enable_bitmapscan` must also be true (the default). ### Efficient communication between PostgreSQL and DocDB @@ -115,13 +112,21 @@ Configuration parameter: `pg_client_use_shared_memory=true` Enable more efficient communication between YB-TServer and PostgreSQL using shared memory. This feature provides improved performance parity. -## Planned features +### Parallel query + +{{< note title="Note" >}} -The following features are planned for EPCM in future releases. +Parallel query is {{}} in v2024.2.3 but has not yet been added to EPCM. -### Parallel query +{{< /note >}} -Enables the use of PostgreSQL [parallel queries](https://www.postgresql.org/docs/11/parallel-query.html). Using parallel queries, the query planner can devise plans that leverage multiple CPUs to answer queries faster. Parallel query provides feature compatibility and improved performance parity. +Configuration parameters: `yb_enable_parallel_append=true` `yb_parallel_range_rows` + +Enables the use of [PostgreSQL parallel queries](https://www.postgresql.org/docs/15/parallel-query.html). Using parallel queries, the query planner can devise plans that leverage multiple CPUs to answer queries faster. Currently, YugabyteDB supports parallel query for colocated tables. Support for hash- and range-sharded tables is planned. Parallel query provides feature compatibility and improved performance parity. + +{{}} +To learn about parallel queries, see [Parallel queries](../../explore/ysql-language-features/advanced-features/parallel-query/). +{{}} ## Enable EPCM diff --git a/docs/content/stable/explore/observability/active-session-history.md b/docs/content/stable/explore/observability/active-session-history.md index 0a3ea5937d43..c7bc68262bd5 100644 --- a/docs/content/stable/explore/observability/active-session-history.md +++ b/docs/content/stable/explore/observability/active-session-history.md @@ -48,7 +48,7 @@ You can also use the following flags based on your requirements. ## Limitations -Note that the following limitations are subject to change as the feature is in [Tech Preview](/preview/releases/versioning/#feature-maturity). +Note that the following limitations are subject to change. - ASH is available per node and is not aggregated across the cluster. - ASH is not available for [YB-Master](../../../architecture/yb-master/) processes. diff --git a/docs/content/stable/explore/ysql-language-features/advanced-features/parallel-query.md b/docs/content/stable/explore/ysql-language-features/advanced-features/parallel-query.md new file mode 100644 index 000000000000..c19a7b5d1e28 --- /dev/null +++ b/docs/content/stable/explore/ysql-language-features/advanced-features/parallel-query.md @@ -0,0 +1,39 @@ +--- +title: Parallel queries +linkTitle: Parallel queries +description: Parallel queries in YSQL +tags: + feature: early-access +menu: + stable: + identifier: advanced-features-parallel-query + parent: advanced-features + weight: 600 +type: docs +--- + +YugabyteDB supports the use of [PostgreSQL parallel queries](https://www.postgresql.org/docs/15/parallel-query.html). Using parallel queries, the [query planner](../../../../architecture/query-layer/planner-optimizer/) can devise plans that leverage multiple CPUs to answer queries faster. + +Currently, YugabyteDB supports parallel queries for [colocated tables](../../../colocation/); support for hash- and range-sharded tables is planned. + +To enable and configure parallel queries, set the following configuration parameters. + +| Parameter | Description | Default | +| :--- | :--- | :--- | +| yb_enable_parallel_append | Enables the planner's use of parallel append plans. To enable parallel query, set this to true. | false | +| yb_parallel_range_rows | The number of rows to plan per parallel worker. To enable parallel query, set this to a value other than 0. (Recommended: 10000) | 0 | +| yb_parallel_range_size | Approximate size of parallel range for DocDB relation scans. | 1MB | + +In addition, you can use the following PostgreSQL configuration parameters to configure parallel queries: + +- Optimize the number of workers used by the parallel query. + - [max_parallel_workers](https://www.postgresql.org/docs/15/runtime-config-resource.html#GUC-MAX-PARALLEL-WORKERS) + - [max_parallel_workers_per_gather](https://www.postgresql.org/docs/15/runtime-config-resource.html#GUC-MAX-PARALLEL-WORKERS-PER-GATHER) + - [max_parallel_maintenance_workers](https://www.postgresql.org/docs/15/runtime-config-resource.html#GUC-MAX-PARALLEL-WORKERS-MAINTENANCE) +- Optimize cost of parallel plan to achieve the optimal plan. + - [parallel_setup_cost](https://www.postgresql.org/docs/15/runtime-config-query.html#GUC-PARALLEL-SETUP-COST) + - [parallel_tuple_cost](https://www.postgresql.org/docs/15/runtime-config-query.html#GUC-PARALLEL-TUPLE-COST) +- Enable or disable the query planner's use of hash-join plan types with parallel hash. Has no effect if hash-join plans are not also enabled. The default is on. + - [enable_parallel_hash](https://www.postgresql.org/docs/15/runtime-config-query.html#RUNTIME-CONFIG-QUERY-ENABLE) + +For more information, refer to [How Parallel Query Works](https://www.postgresql.org/docs/15/how-parallel-query-works.html) in the PostgreSQL documentation. diff --git a/docs/content/stable/explore/ysql-language-features/pg-extensions/_index.md b/docs/content/stable/explore/ysql-language-features/pg-extensions/_index.md index 5cb39aa6a987..c978593e7236 100644 --- a/docs/content/stable/explore/ysql-language-features/pg-extensions/_index.md +++ b/docs/content/stable/explore/ysql-language-features/pg-extensions/_index.md @@ -31,6 +31,8 @@ YugabyteDB supports the following [PostgreSQL modules](https://www.postgresql.or | Module | Description | | :----- | :---------- | | [auto_explain](extension-auto-explain/) | Provides a means for logging execution plans of slow statements automatically. | +| cube| Implements a data type cube for representing multidimensional cubes.
For more information, see [cube](https://www.postgresql.org/docs/11/cube.html) in the PostgreSQL documentation. | +| earthdistance| Provides two different approaches to calculating great circle distances on the surface of the Earth.
For more information, see [earthdistance](https://www.postgresql.org/docs/11/earthdistance.html) in the PostgreSQL documentation. | | [file_fdw](extension-file-fdw/) | Provides the foreign-data wrapper file_fdw, which can be used to access data files in the server's file system. | | [fuzzystrmatch](extension-fuzzystrmatch/) | Provides several functions to determine similarities and distance between strings. | | hstore | Implements the hstore data type for storing sets of key-value pairs in a single PostgreSQL value.
For more information, see [hstore](https://www.postgresql.org/docs/11/hstore.html) in the PostgreSQL documentation. | @@ -50,6 +52,7 @@ YugabyteDB supports the following additional extensions, some of which you must | Extension | Status | Description | | :-------- | :----- | :---------- | +| [Anonymizer](extension-pganon/) {{}}| Pre-bundled | Mask or replace personally identifiable information (PII) or commercially sensitive data in a database. | | [HypoPG](extension-hypopg/) | Pre-bundled | Create hypothetical indexes to test whether an index can increase performance for problematic queries without consuming any actual resources. | | Orafce | Pre-bundled | Provides compatibility with Oracle functions and packages that are either missing or implemented differently in YugabyteDB and PostgreSQL. This compatibility layer can help you port your Oracle applications to YugabyteDB.
For more information, see the [Orafce](https://github.com/orafce/orafce) documentation. | | [PGAudit](../../../secure/audit-logging/audit-logging-ysql/) | Pre-bundled | The PostgreSQL Audit Extension (pgaudit) provides detailed session and/or object audit logging via the standard PostgreSQL logging facility. | diff --git a/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pganon.md b/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pganon.md new file mode 100644 index 000000000000..71104346c948 --- /dev/null +++ b/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pganon.md @@ -0,0 +1,318 @@ +--- +title: PostgreSQL Anonymizer extension +headerTitle: PostgreSQL Anonymizer extension +linkTitle: Anonymizer +description: Using the PostgreSQL Anonymizer extension in YugabyteDB +tags: + feature: early-access +menu: + stable: + identifier: extension-pganon + parent: pg-extensions + weight: 20 +type: docs +--- + +The [PostgreSQL Anonymizer](https://postgresql-anonymizer.readthedocs.io/en/stable/) extension can be used for masking or replacing personally identifiable information (PII) or commercially sensitive data in a YSQL database. + +The extension has a declarative approach to anonymization. This means you can declare the masking rules using the PostgreSQL Data Definition Language (DDL) and specify your anonymization policy inside the table definition itself. + +YugabyteDB uses v1.3.1 of PostgreSQL Anonymizer. + +## Enable Anonymizer + +While in early access, to enable the Anonymizer extension, you set the YB-TServer `--enable_pg_anonymizer` flag to true. For example, using [yugabyted](../../../../reference/configuration/yugabyted/), you would do the following: + +```sh +./bin/yugabyted start --tserver_flags="enable_pg_anonymizer=true" +``` + +Note that modifying `--enable_pg_anonymizer` requires restarting the YB-TServer. + +## Customize Anonymizer + +You can customize the following anon parameters: + +| Parameter | Description | Default | +| :--- | :--- | :--- | +| anon.algorithm | The hashing method used by pseudonymizing functions. | sha256 | +| anon.maskschema | The schema (that is, the namespace) where the dynamic masking views will be stored. | mask | +| anon.restrict_to_trusted_schemas | By enabling this parameter, masking rules must be defined using functions located in a limited list of namespaces. | true | +| anon.salt | The salt used by pseudonymizing functions. | (empty) | +| anon.sourceschema | The schema (that is, the namespace) where the tables are masked by the dynamic masking engine. | public | + +For more information, refer to [Configuration](https://postgresql-anonymizer.readthedocs.io/en/stable/configure/) in the Anonymizer documentation. + +## Create the anon extension + +To enable the extension: + +```sql +CREATE EXTENSION anon; +``` + +If you want to use the `anon.fake_*` functions, you need to load the fake data (see [Declare masking rules](#declare-masking-rules)). + +```sql +BEGIN; +SET LOCAL yb_non_ddl_txn_for_sys_tables_allowed TO TRUE; +SELECT anon.init(); -- Loads fake data +COMMIT; +``` + +## Use Anonymizer + +### Declare masking rules + +Too use Anonymizer, you first declare a masking policy. A masking policy is a set of masking rules stored inside the database model and applied to various database objects. + +You declare data masking rules using [security labels](https://www.postgresql.org/docs/15/sql-security-label.html). For example: + +```sql +CREATE TABLE player( id SERIAL, name TEXT, total_points INT, highest_score INT); + +INSERT INTO player VALUES + ( 1, 'Kareem Abdul-Jabbar', 38387, 55), + ( 5, 'Michael Jordan', 32292, 69); + +SECURITY LABEL FOR anon ON COLUMN player.name + IS 'MASKED WITH FUNCTION anon.fake_last_name()'; + +SECURITY LABEL FOR anon ON COLUMN player.id + IS 'MASKED WITH VALUE NULL'; +``` + +Anonymizer provides many different functions that you can use to declare the masking rules. For a list of masking functions, refer to [Masking functions](https://postgresql-anonymizer.readthedocs.io/en/stable/masking_functions/) in the Anonymizer documentation. + +Note that YugabyteDB does not currently support the `anon.dummy_` functions. + +Refer to [Declare masking rules](https://postgresql-anonymizer.readthedocs.io/en/stable/declare_masking_rules/) in the Anonymizer documentation for more information. + +### Dynamic masking + +To enable dynamic masking: + +```sql +SELECT anon.start_dynamic_masking(false); +``` + +You must run this every time a masked security label is created for a user or role. + +The boolean parameter indicates whether fake data should be loaded or not. It is recommended to use `anon.init()` to load fake data. This creates masked views on the `anon.maskschema` for all the tables present in `anon.sourceschema`, and alters the privileges of all users with a masked security label so that a masked user will only be able to read masked data and not the original data. + +To check if a role with a masked security label will see masked values, use the following query: + +```sql +SELECT rolname, rolconfig FROM pg_roles WHERE rolname = '' +``` + +If `rolconfig` contains `search_path=, `, then the masked user will see masked values. + +The following shows output where the `anon.maskschema` and `anon.sourceschema` parameters are set to their default values: + +```output + rolname | rolconfig +---------+------------------------------ + skynet | {"search_path=mask, public"} +``` +Note that [Backup and restore](../../../../manage/backup-restore/) doesn't preserve roles, and will also not restore masked security labels for roles. After a restore, you will need to manually recreate security labels for roles, and then enable dynamic masking. + +To disable dynamic masking: + +```sql +SELECT anon.stop_dynamic_masking(); +``` + +This drops all the masked views, unmasks all the masked roles, and drops `anon.maskschema`. + +### Static masking + +After declaring masking rules for columns, you can use the following parameters to define masks: + +- `anon.anonymize_column(, )` - Anonymize the column of the table with the declared masking rule. +- `anon.anonymize_table()` - Anonymize all the columns with masking rules of the table. +- `anon.anonymize_database()` - Anonymize all the columns with masking rules of all the tables in the given database. + +With static masking, the data is lost forever once it is anonymized. Refer to [Static masking](https://postgresql-anonymizer.readthedocs.io/en/stable/static_masking/) in the Anonymizer documentation for more information. + +## Examples + +### Dynamically mask contact information + +```sql +CREATE EXTENSION anon; + +BEGIN; +SET LOCAL yb_non_ddl_txn_for_sys_tables_allowed TO true; +SELECT anon.init(); -- Loads fake data +COMMIT; + +CREATE TABLE people ( id TEXT, firstname TEXT, lastname TEXT, phone TEXT); +INSERT INTO people VALUES ('1', 'John', 'Doe','1234567890'); +SELECT * FROM people; -- non masked user can read original values +``` + +```output + id | firstname | lastname | phone +----+-----------+----------+------------ + 1 | John | Doe | 1234567890 +``` + +```sql +SECURITY LABEL FOR anon ON COLUMN people.phone +IS 'MASKED WITH FUNCTION anon.partial(phone, 2, $$******$$, 2)'; + +CREATE ROLE skynet LOGIN; +SECURITY LABEL FOR anon ON ROLE skynet IS 'MASKED'; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO skynet; + +\c yugabyte skynet + +SELECT * FROM people; +``` + +```output + id | firstname | lastname | phone +----+-----------+----------+------------ + 1 | John | Doe | 1234567890 +``` + +Note how, as we have not yet started dynamic masking, the data is not masked for the masked user. + +```sql +\c yugabyte yugabyte + +SELECT anon.start_dynamic_masking(false); + +\c yugabyte skynet + +SELECT * FROM people; +``` + +```output + id | firstname | lastname | phone +----+-----------+----------+------------ + 1 | John | Doe | 12******90 +``` + +### Prevent a user from reading values from the source table + +First set the cluster-wide value of `anon.sourcesschema` to `test`: + +```sh +--ysql_pg_conf_csv=shared_preload_libraries=anon,anon.sourceschema=test +``` + +Execute the following commands: + +```sql +CREATE EXTENSION anon; + +BEGIN; +SET LOCAL yb_non_ddl_txn_for_sys_tables_allowed TO true; +SELECT anon.init(); -- Loads fake data +COMMIT; + +CREATE SCHEMA test; + +CREATE TABLE test.people ( id TEXT, firstname TEXT, lastname TEXT, phone TEXT); +INSERT INTO test.people VALUES ('1', 'John', 'Doe','1234567890'); + +CREATE ROLE skynet LOGIN; +SECURITY LABEL FOR anon ON ROLE skynet IS 'MASKED'; + +SECURITY LABEL FOR anon ON COLUMN test.people.phone +IS 'MASKED WITH FUNCTION anon.partial(phone, 2, $$******$$, 2)'; + +SELECT anon.start_dynamic_masking(false); + +\c yugabyte skynet + +SELECT * FROM people; +``` + +```output + id | firstname | lastname | phone +----+-----------+----------+------------ + 1 | John | Doe | 12******90 +``` + +```sql +SELECT * FROM test.people; +``` + +```output +ERROR: permission denied for schema test +LINE 1: SELECT * FROM test.people; +``` + +### Statically mask contact information + +```sql +CREATE EXTENSION anon; + +BEGIN; +SET LOCAL yb_non_ddl_txn_for_sys_tables_allowed TO true; +SELECT anon.init(); +COMMIT; + +CREATE TABLE people ( id TEXT, firstname TEXT, lastname TEXT, phone TEXT); +INSERT INTO people VALUES ('1', 'John', 'Doe','1234567890'); + +SECURITY LABEL FOR anon ON COLUMN people.lastname +IS 'MASKED WITH FUNCTION anon.fake_last_name()'; + +SECURITY LABEL FOR anon ON COLUMN people.phone +IS 'MASKED WITH FUNCTION anon.partial(phone, 2, $$******$$, 2)'; + +SELECT anon.anonymize_table('people'); + +SELECT * FROM people; +``` + +```output + id | firstname | lastname | phone +----+-----------+----------+------------ + 1 | John | Bryant | 12******90 +``` + +### Mask without faking + +```sql +CREATE EXTENSION anon; + +CREATE TABLE people ( id TEXT, firstname TEXT, lastname TEXT, phone TEXT); +INSERT INTO people VALUES ('1', 'John', 'Doe', '1234567890'); + +SECURITY LABEL FOR anon ON COLUMN people.firstname +IS 'MASKED WITH VALUE $$CONFIDENTIAL$$'; + +SECURITY LABEL FOR anon ON COLUMN people.lastname +IS 'MASKED WITH VALUE $$CONFIDENTIAL$$'; + +SECURITY LABEL FOR anon ON COLUMN people.phone +IS 'MASKED WITH FUNCTION anon.partial(phone, 2, $$******$$, 2)'; + +CREATE ROLE skynet LOGIN; +SECURITY LABEL FOR anon ON ROLE skynet IS 'MASKED'; + +SELECT anon.start_dynamic_masking(false); + +\c yugabyte skynet + +SELECT * FROM people; +``` + +```output + id | firstname | lastname | phone +----+--------------+--------------+------------ + 1 | CONFIDENTIAL | CONFIDENTIAL | 12******90 +``` + +## Limitations + +- Masking views and materialized views are not supported. +- The SECURITY LABEL commands on tables and databases are not supported. +- YugabyteDB does not currently support the `anon.dummy_` functions. + +Refer to [Masking rule limitations](https://postgresql-anonymizer.readthedocs.io/en/stable/declare_masking_rules/#limitations) and [Legacy masking rule limitations](https://postgresql-anonymizer.readthedocs.io/en/stable/legacy_dynamic_masking/#limitations) in the Anonymizer documentation for information on the Anonymizer extension limitations. diff --git a/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pgvector.md b/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pgvector.md index cd040721424f..2646c9519dff 100644 --- a/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pgvector.md +++ b/docs/content/stable/explore/ysql-language-features/pg-extensions/extension-pgvector.md @@ -155,6 +155,6 @@ SELECT category_id, AVG(embedding) FROM items GROUP BY category_id; ## Read more - Tutorial: [Build and Learn](/preview/tutorials/build-and-learn/) -- Tutorial: [Build scalable generative AI applications with Azure OpenAI and YugabyteDB](/preview/tutorials/azure/azure-openai/) +- Tutorials: [Build scalable generative AI applications with YugabyteDB](/preview/tutorials/ai/) - [PostgreSQL pgvector: Getting Started and Scaling](https://www.yugabyte.com/blog/postgresql-pgvector-getting-started/) - [Multimodal Search with PostgreSQL pgvector](https://www.yugabyte.com/blog/postgresql-pgvector-multimodal-search/) diff --git a/docs/content/stable/launch-and-manage/monitor-and-alert/_index.md b/docs/content/stable/launch-and-manage/monitor-and-alert/_index.md index b43b1882020d..1facc8fffc64 100644 --- a/docs/content/stable/launch-and-manage/monitor-and-alert/_index.md +++ b/docs/content/stable/launch-and-manage/monitor-and-alert/_index.md @@ -11,16 +11,18 @@ menu: type: indexpage --- - +{{}} + + {{}} + + {{}} + +{{}} diff --git a/docs/content/stable/launch-and-manage/monitor-and-alert/xcluster-monitor.md b/docs/content/stable/launch-and-manage/monitor-and-alert/xcluster-monitor.md new file mode 100644 index 000000000000..bc7414455c7f --- /dev/null +++ b/docs/content/stable/launch-and-manage/monitor-and-alert/xcluster-monitor.md @@ -0,0 +1,207 @@ +--- +title: Monitor xCluster +headerTitle: Monitor xCluster +linkTitle: xCluster +description: Monitoring the health of xCluster replication +headContent: Monitoring the state and health of xCluster replication +menu: + stable: + parent: monitor-and-alert + identifier: xcluster-monitor + weight: 110 +type: docs +--- + +## Metrics + +The list of xCluster metrics is available in the [xCluster metrics](../metrics/replication/). + +## Console + + +
+
+ + + +Use the [xcluster status](../../../reference/configuration/yugabyted/#status-1) sub command to display information about the specified xCluster replication. You can run the command on either the source or target cluster. + +```sh +./bin/yugabyted xcluster status \ + [--replication_id ] +``` + +Example output: + +```output +Outbound xCluster Replications: + ++------------------------------------------------------------------------------------------+ +| yugabyted | ++------------------------------------------------------------------------------------------+ +| Replication 1 : | +| Replication ID : rg1 | +| State : REPLICATING | +| Target Universe UUID : 2e3ff9c4-2c5a-4dc6-a6bc-20e2548b9f09 | +| Databases : Following are the databases included in this replication | +| : Database 1: | +| : Name: yugabyte | +| : State: READY | ++------------------------------------------------------------------------------------------+ + +No Inbound xCluster replications found for this cluster. +``` + +
+ +
+ + + +To list outgoing groups on the Primary universe, use the [list_xcluster_outbound_replication_groups](../../../admin/yb-admin/#list-xcluster-outbound-replication-groups) command: + +```sh +./bin/yb-admin \ + -master_addresses \ + list_xcluster_outbound_replication_groups \ + [namespace_id] +``` + +To list inbound groups on the Standby universe, use the [list_universe_replications](../../../admin/yb-admin/#list-universe-replications) command: + +```sh +./bin/yb-admin \ + -master_addresses \ + list_universe_replications \ + [namespace_id] +``` + +To get the status of the replication group, use [get_replication_status](../../../admin/yb-admin/#get-replication-status): + +```sh +yb-admin \ + -master_addresses \ + get_replication_status \ + [] +``` + +
+ +
+ +## xCluster safe time + +In transactional xCluster replication setups, the current xCluster safe time is the safe time as of which consistent reads are performed on the target universe. You can use the following commands to see the current xCluster safe time: + + +
+
+ +```sh +./bin/yugabyted xcluster status \ + [--replication_id ] +``` + +Example output: + +```output +No Outbound xCluster replications found for this cluster. +Inbound xCluster Replications: + ++------------------------------------------------------------------------------------------+ +| yugabyted | ++------------------------------------------------------------------------------------------+ +| Replication 1 : | +| Replication ID : rg1 | +| State : ACTIVE | +| Source cluster nodes : 127.0.0.1,127.0.0.2,127.0.0.3 | +| Databases : Following are the databases included in this replication | +| : Database 1: | +| : Name: yugabyte | +| : Safe Time: 2025-03-14 20:43:49.723305 | +| : Safe Time Lag(micro secs): 0.88 | +| : Safe Time Skew(micro secs): 0.81 | ++------------------------------------------------------------------------------------------+ +``` + +
+ +
+ + ```sh + yb-admin \ + -master_addresses \ + get_xcluster_safe_time \ + [include_lag_and_skew] + ``` + + Example output: + + ```output + [ + { + "namespace_id": "000034cb000030008000000000000000", + "namespace_name": "yugabyte", + "safe_time": "2025-03-14 20:51:25.915918", + "safe_time_epoch": "1742010685915918", + "safe_time_lag_sec": "0.50", + "safe_time_skew_sec": "0.02" + } + ] + ``` + +
+
+ +## YB-Master and YB-Tserver UI + +You can access the YB-Master and YB-Tserver UIs to monitor the health of xCluster replication at `/xcluster`. + +**YB-Master source** + + +![Source YB-Master outbound](/images/deploy/xcluster/automatic-outbound.jpg) + +**YB-Master target** + +: +![Target YB-Master inbound](/images/deploy/xcluster/automatic-inbound.jpg) + +**YB-Tserver source** + +: +![Source YB-Tserver inbound](/images/deploy/xcluster/tserver-outbound.jpg) + +## Advanced troubleshooting + +For advanced troubleshooting, refer to the [Troubleshooting xCluster Replication](https://support.yugabyte.com/hc/en-us/articles/29809348650381-How-to-troubleshoot-xCluster-replication-lag-and-errors) guide. diff --git a/docs/content/stable/manage/data-migration/migrate-from-postgres.md b/docs/content/stable/manage/data-migration/migrate-from-postgres.md index d8807ee24683..2e400acf4693 100644 --- a/docs/content/stable/manage/data-migration/migrate-from-postgres.md +++ b/docs/content/stable/manage/data-migration/migrate-from-postgres.md @@ -247,8 +247,8 @@ For more details, see [Live migration with fall-back](/preview/yugabyte-voyager/ When porting an existing PostgreSQL application to YugabyteDB you can follow a set of best practices to get the best out of your new deployment. -{{}} -For a full list of tips and tricks for high performance and availability, see [Best practices](../../../develop/best-practices-ysql/). +{{}} +For a full list of tips and tricks for high performance and availability, see [Best practices](../../../develop/best-practices-develop/). {{}} ### Retry transactions on conflicts diff --git a/docs/content/stable/reference/configuration/operating-systems.md b/docs/content/stable/reference/configuration/operating-systems.md index 7b5d455beb14..954283d8b041 100644 --- a/docs/content/stable/reference/configuration/operating-systems.md +++ b/docs/content/stable/reference/configuration/operating-systems.md @@ -21,6 +21,7 @@ Unless otherwise noted, operating systems are supported by all supported version | :--------------- | :------------- | :------------- | :---- | | AlmaLinux 8 | {{}} | {{}} | | | AlmaLinux 9 | {{}} | {{}} | Default for YugabyteDB Anywhere-deployed nodes | +| Amazon Linux 2023 | {{}} | {{}} | Supported in v2024.2.3 and later | | Oracle Linux 8 | {{}} | | | | Red Hat Enterprise Linux 8 | {{}} | | | | Red Hat Enterprise Linux 8 CIS Hardened | {{}} | | | diff --git a/docs/content/stable/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md b/docs/content/stable/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md index b756a0024616..c3c3ba9af14c 100644 --- a/docs/content/stable/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md +++ b/docs/content/stable/yugabyte-platform/back-up-restore-universes/configure-backup-storage.md @@ -24,11 +24,13 @@ You can configure AWS S3 and S3-compatible storage as your backup target. By default, the option to use S3 path style access is not available. To ensure that you can use this feature, navigate to `https:///features` and enable the **enablePathStyleAccess** option. {{< /note >}} +### Create an AWS backup configuration + To configure S3 storage, do the following: 1. Navigate to **Integrations** > **Backup** > **Amazon S3**. -1. Click **Create S3 Backup** to access the configuration form shown in the following illustration: +1. Click **Create S3 Backup**. ![S3 Backup](/images/yp/cloud-provider-configuration-backup-aws.png) @@ -102,38 +104,12 @@ To create a GCP backup configuration, do the following: 1. Enter the URI of your GCS bucket in the **GCS Bucket** field. For example, `gs://gcp-bucket/test_backups`. -1. Select **Use GCP IAM** if you're using [GKE service account](#gke-service-account-based-iam-gcp-iam) for backup and restore. +1. Select **Use GCP IAM** to use the YugabyteDB Anywhere instance's Identity Access Management (IAM) role for the GCS backup. -1. Enter the credentials for your account in JSON format in the **GCS Credentials** field. +1. If **Use GCP IAM** is disabled, enter the credentials for your account in JSON format in the **GCS Credentials** field. 1. Click **Save**. -### GKE service account-based IAM (GCP IAM) - -Google Kubernetes Engine (GKE) uses a concept known as "Workload Identity" to provide a secure way to allow a Kubernetes service account (KSA) in your GKE cluster to act as an IAM service account so that your Kubernetes universes can access GCS for backups. - -In GKE, each pod can be associated with a KSA. The KSA is used to authenticate and authorize the pod to interact with other Google Cloud services. An IAM service account is a Google Cloud resource that allows applications to make authorized calls to Google Cloud APIs. - -Workload Identity links a KSA to an IAM account using annotations in the KSA. Pods that use the configured KSA automatically authenticate as the IAM service account when accessing Google Cloud APIs. - -By using Workload Identity, you avoid the need for manually managing service account keys or tokens in your applications running on GKE. This approach enhances security and simplifies the management of credentials. - -- To enable GCP IAM when installing YugabyteDB Anywhere, refer to [Enable GKE service account-based IAM](../../install-yugabyte-platform/install-software/kubernetes/#enable-gke-service-account-based-iam). - -- To enable GCP IAM during universe creation, refer to [Configure Helm overrides](../../create-deployments/create-universe-multi-zone-kubernetes/#helm-overrides). - -- To upgrade an existing universe with GCP IAM, refer to [Upgrade universes for GKE service account-based IAM support](../../manage-deployments/edit-helm-overrides/#upgrade-universes-for-gke-service-account-based-iam). - -**Prerequisites** - -- The GKE cluster hosting the pods should have Workload Identity enabled. The worker nodes of this GKE cluster should have the GKE metadata server enabled. - -- The IAM service account, which is used to annotate the KSA, should have sufficient permissions to read, write, list, and delete objects in GCS. - -- The KSA, which is annotated with the IAM service account, should be present in the same namespace where the pod resources for YugabyteDB Anywhere and YugabyteDB universes are expected. If you have multiple namespaces, each namespace should include the annotated KSA. - -For instructions on setting up Workload Identity, see [Use Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) in the GKE documentation. - ## Network File System You can configure Network File System (NFS) as your backup target, as follows: diff --git a/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/gcp.md b/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/gcp.md index efc5118ed68f..86125f7a1621 100644 --- a/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/gcp.md +++ b/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/gcp.md @@ -42,7 +42,7 @@ Before you can deploy universes using YugabyteDB Anywhere (YBA), you must create When deploying a universe, YBA uses the provider configuration settings to do the following: - Create instances on GCP using the following: - - your cloud provider credentials + - your GCP service account credentials - specified regions and availability zones (this can be a subset of those specified in the provider configuration) - a Linux image - optionally, an [instance template](#gcp-instance-templates) @@ -51,7 +51,7 @@ When deploying a universe, YBA uses the provider configuration settings to do th ## Prerequisites -- Cloud provider credentials. YBA uses your credentials to automatically provision and de-provision instances that run YugabyteDB. An instance for YugabyteDB includes a compute instance, as well as local or remote disk storage attached to the compute instance. +- GCP service account credentials. YBA uses your credentials to automatically provision and de-provision instances that run YugabyteDB. An instance for YugabyteDB includes a compute instance, as well as local or remote disk storage attached to the compute instance. For more information on setting up a GCP service account, refer to [Cloud permissions to deploy nodes](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/). @@ -75,8 +75,6 @@ To create a GCP provider: The create provider process includes generating a new VPC, a network, subnetworks in all available regions, as well as a new firewall rule, VPC peering for network connectivity, and a custom SSH key pair for YugabyteDB Anywhere-to-YugabyteDB connectivity. -Now you are ready to create a YugabyteDB universe on GCP. - ### View and edit providers To view a provider, select it in the list of GCP Configs to display the **Overview**. @@ -97,6 +95,7 @@ To edit the provider, select **Config Details**, make changes, and click **Apply If `new-project-yb` is a new GCE project, the backend request fails and you will be notified that you can't change the GCE project for an in-use provider. - Regions - You can add regions and zones to an in-use provider. Note that you cannot edit existing region details, delete a region if any of the region's zones are in use, or delete zones that are in use. +- Linux version catalog To view the universes created using the provider, select **Universes**. @@ -110,11 +109,17 @@ Enter a Provider name. The Provider name is an internal tag used for organizing ### Cloud Info -If your YBA instance is not running inside GCP, you need to supply YBA with credentials to the desired GCP project by uploading a configuration file. To do this, set **Credential Type** to **Upload Service Account config** and proceed to upload the JSON file that you obtained when you created your service account, as described in [Cloud permissions](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/). +**Credential Type**. YBA requires the ability to create VMs in GCP. To do this, you can do one of the following: + +- If your YBA instance is not running inside GCP, you need to supply YBA with credentials to the desired GCP project by uploading a configuration file. + + Set **Credential Type** to **Upload Service Account config** and upload the JSON file that you obtained when you created your service account, as described in [Cloud permissions](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/). + +- If your YBA instance is running inside GCP, the preferred method for authentication to the GCP APIs is to add a service account role to the GCP instance running YBA and then configure YBA to use the instance's service account. -If your YBA instance is running inside GCP, the preferred method for authentication to the GCP APIs is to add a service account role to the GCP instance running YBA and then configure YBA to use the instance's service account. To do this, set **Credential Type** to **Use service account from this YBA host's instance**. + Set **Credential Type** to **Use service account from this YBA host's instance**. -### VPC Setup +#### VPC Setup Specify the VPC to use for deploying YugabyteDB nodes. diff --git a/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/kubernetes.md b/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/kubernetes.md index 93859854750f..a12a7137b590 100644 --- a/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/kubernetes.md +++ b/docs/content/stable/yugabyte-platform/configure-yugabyte-platform/kubernetes.md @@ -48,7 +48,7 @@ Before you create a Kubernetes provider, perform the following: - Create a `yugabyte-platform-universe-management` service account. - Create a `kubeconfig` file of the service account you created to configure access to the Kubernetes cluster. -Refer to [To deploy nodes](../../prepare/cloud-permissions/cloud-permissions-nodes/). +See [To deploy nodes](../../prepare/cloud-permissions/cloud-permissions-nodes/). ## Configure Kubernetes @@ -122,7 +122,7 @@ Continue configuring your Kubernetes provider by clicking **Add region** and com 1. Complete the **Overrides** field using one of the provided [options](#overrides). If you do not specify anything, YBA uses defaults specified inside the Helm chart. For additional information, see [Open source Kubernetes](../../../deploy/kubernetes/single-zone/oss/helm-chart/). -1. If you are using [Kubernetes cert-manager](https://cert-manager.io) to manage TLS certificates, specify the issuer kind, enter the issuer name, and optionally provide the issuer group. For more information, refer to [Enable encryption in transit](../../security/enable-encryption-in-transit/add-certificate-kubernetes/). +1. If you are using [Kubernetes cert-manager](https://cert-manager.io) to manage TLS certificates, specify the issuer kind, enter the issuer name, and optionally provide the issuer group. For more information, refer to [Add certificates](../../security/enable-encryption-in-transit/add-certificate-kubernetes/). If required, add a new zone by clicking **Add Zone**, as your configuration may have multiple zones. @@ -377,6 +377,17 @@ tserver: The Kubernetes `labels` are key-value pairs attached to objects. The `labels` are used to specify identifying attributes of objects that are meaningful and relevant to you. For more information, see [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) in the Kubernetes documentation. +### Use common name with cert-manager + +If your certificate issuer (for example, for `aws-privateca-issuer`) requires the certificate to include the common name, set the following override: + +```yml +tls: + certManager: + certificates: + commonNameRequired: true +``` + ### Preflight check Preflight check overrides, such as DNS address resolution, disk IO, available port, ulimit: diff --git a/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md b/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md index 148055a1a537..35accd4e3abf 100644 --- a/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md +++ b/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone-kubernetes.md @@ -159,7 +159,7 @@ If there are any errors in your overrides definitions, a detailed error message #### GKE service account -If you want to enable [GKE service account-based IAM](../../back-up-restore-universes/configure-backup-storage/#gke-service-account-based-iam-gcp-iam) for backup and restore using GCS at the universe level, add the following overrides: +If you want to enable [GKE service account-based IAM](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/#gke-service-account-based-iam-gcp-iam) for backup and restore using GCS at the universe level, add the following overrides: ```yaml tserver: diff --git a/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone.md b/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone.md index aa1bf712b838..62df0f5dc537 100644 --- a/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone.md +++ b/docs/content/stable/yugabyte-platform/create-deployments/create-universe-multi-zone.md @@ -138,6 +138,12 @@ Enable Connection Pooling Enable Systemd Services : To use cron instead of systemd for managing nodes, you can disable systemd services. This is not recommended. +{{< warning title="cron-based support deprecated" >}} + +cron-based universes will no longer be supported in YugabyteDB Anywhere v2025.2 and later. To upgrade to v2025.2 or later, all your universes must be using systemd. Universes that use a cloud provider configuration will be upgraded to systemd automatically. To upgrade on-premises cron-based universes, navigate to the universe and choose **Actions>Upgrade to Systemd**. + +{{< /warning >}} + Override Deployment Ports : To customize the [ports used for the universe](../../prepare/networking/), select the **Override Deployment Ports** option and enter the custom port numbers for the services you want to change. Any value from `1024` to `65535` is valid, as long as it doesn't conflict with anything else running on nodes to be provisioned. diff --git a/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/installer.md b/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/installer.md index 39326f8a9acd..f8bcc0f3ceb9 100644 --- a/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/installer.md +++ b/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/installer.md @@ -279,7 +279,7 @@ To use the data disk with a new installation, do the following: ### Reconfigure -You can use YBA Installer to reconfigure an installed YBA instance. +You can use YBA Installer to make changes to an installed YBA instance. To reconfigure an installation, edit the configuration file with your changes, and then run the command as follows: @@ -294,8 +294,8 @@ For more information, refer to [Configuration options](#configuration-options). YBA Installer provides basic service management, with `start`, `stop`, and `restart` commands. Each of these can be performed for all the services (`platform`, `postgres`, and `prometheus`), or any individual service. ```sh -sudo yba-ctl [start, stop, reconfigure] -sudo yba-ctl [start, stop, reconfigure] prometheus +sudo yba-ctl [start, stop, restart] +sudo yba-ctl [start, stop, restart] prometheus ``` In addition to the state changing operations, you can use the `status` command to show the status of all YugabyteDB Anywhere services, in addition to other information such as the log and configuration location, versions of each service, and the URL to access the YugabyteDB Anywhere UI. @@ -434,6 +434,8 @@ YBA Installer [automatically generates](#configure-yba-installer) the file when | sudo | opt/yba-ctl/ | | non-sudo | ~/opt/yba-ctl/ | +To make changes to an existing installation, edit the configuration file with your changes and run the [reconfigure](#reconfigure) command. Note that some settings (marked with {{}}) cannot be changed after installation. + Note that the file must include all fields. Optional fields may be left blank. ### Configure YBA Installer diff --git a/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md b/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md index 55791919f6b8..55da4c975d85 100644 --- a/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md +++ b/docs/content/stable/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes.md @@ -518,7 +518,7 @@ In addition, it is recommended to set a large initial storage size, because resi If you are using Google Cloud Storage (GCS) for backups, you can enable GKE service account-based IAM (GCP IAM) so that Kubernetes universes can access GCS. -Before enabling GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../../back-up-restore-universes/configure-backup-storage/#gke-service-account-based-iam-gcp-iam). +Before enabling GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/#gke-service-account-based-iam-gcp-iam). To enable GCP IAM, provide the following additional Helm values during installation to a version which supports this feature (v2.18.4 or later): diff --git a/docs/content/stable/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md b/docs/content/stable/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md deleted file mode 100644 index 787aca094510..000000000000 --- a/docs/content/stable/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md +++ /dev/null @@ -1,350 +0,0 @@ - - -Choose the type of YugabyteDB Anywhere installation. - -{{< tabpane text=true >}} - - {{% tab header="Docker-based" lang="docker" %}} - -For a Docker-based installation, YugabyteDB Anywhere uses [Replicated scheduler](https://www.replicated.com/) for software distribution and container management. You need to ensure that the host can pull containers from the [Replicated Docker Registries](https://help.replicated.com/docs/native/getting-started/docker-registries/). - -Replicated installs a compatible Docker version if it is not pre-installed on the host. The currently supported Docker version is 20.10.n. - - {{% /tab %}} - - {{% tab header="Kubernetes-based" lang="kubernetes" %}} - -For a Kubernetes-based installation, you need to ensure that the host can pull container images from the [Quay.io](https://quay.io/) container registry. For details, see [Pull and push YugabyteDB Docker images to private container registry](#pull-and-push-yugabytedb-docker-images-to-private-container-registry). - -In addition, you need to ensure that core dumps are enabled and configured on the underlying Kubernetes node. For details, see [Specify ulimit and remember the location of core dumps](#specify-ulimit-and-remember-the-location-of-core-dumps). - - - -#### Specify ulimit and remember the location of core dumps - -The core dump collection in Kubernetes requires special care due to the fact that `core_pattern` is not isolated in cgroup drivers. - -You need to ensure that core dumps are enabled on the underlying Kubernetes node. Running the `ulimit -c` command within a Kubernetes pod or node must produce a large non-zero value or the `unlimited` value as an output. For more information, see [How to enable core dumps](https://www.ibm.com/support/pages/how-do-i-enable-core-dumps). - -To be able to locate your core dumps, you should be aware of the fact that the location to which core dumps are written depends on the sysctl `kernel.core_pattern` setting. For more information, see [Linux manual: core(5)](https://man7.org/linux/man-pages/man5/core.5.html#:~:text=Naming%20of%20core%20dump%20files). - -To inspect the value of the sysctl within a Kubernetes pod or node, execute the following: - -```sh -cat /proc/sys/kernel/core_pattern -``` - -If the value of `core_pattern` contains a `|` pipe symbol (for example, `|/usr/share/apport/apport -p%p -s%s -c%c -d%d -P%P -u%u -g%g -- %E`), the core dump is being redirected to a specific collector on the underlying Kubernetes node, with the location depending on the exact collector. To be able to retrieve core dump files in case of a crash within the Kubernetes pod, it is important that you understand where these files are written. - -If the value of `core_pattern` is a literal path of the form `/var/tmp/core.%p`, no action is required on your part, as core dumps will be copied by the YugabyteDB node to the persistent volume directory `/mnt/disk0/cores` for future analysis. - -Note the following: - -- ulimits and sysctl are inherited from Kubernetes nodes and cannot be changed for an individual pod. -- New Kubernetes nodes might be using [systemd-coredump](https://www.freedesktop.org/software/systemd/man/systemd-coredump.html) to manage core dumps on the node. - -#### Pull and push YugabyteDB Docker images to private container registry - -Due to security concerns, some Kubernetes environments use internal container registries such as Harbor and Nexus. In this type of setup, YugabyteDB deployment must be able to pull images from and push images to a private registry. - -{{< note title="Note" >}} - -This is not a recommended approach for enterprise environments. You should ask the container registry administrator to add proxy cache to pull the source images to the internal registry automatically. This would allow you to avoid modifying the Helm chart or providing a custom registry inside the YugabyteDB Anywhere cloud provider. - -{{< /note >}} - -Before proceeding, ensure that you have the following: - -- Pull secret consisting of the user name and password or service account to access source (pull permission) and destination (push and pull permissions) container registries. -- Docker installed on a server (desktop) that can access both container registries. For installation instructions, see [Docker Desktop](https://www.docker.com/products/docker-desktop). - -Generally, the process involves the following: - -- Fetching the correct version of the YugabyteDB Helm chart whose `values.yaml` file describes all the image locations. -- Retagging images. -- Pushing images to the private container registry. -- Modifying the Helm chart values to point to the new private location. - -![img](/images/yp/docker-pull.png) - -You need to perform the following steps: - -1. Log in to [Quay.io](https://quay.io/) to access the YugabyteDB private registry using the user name and password provided in the secret `yaml` file. To find the `auth` field, use `base64 -d` to decode the data inside the `yaml` file twice. In this field, the user name and password are separated by a colon. For example, `yugabyte+:ZQ66Z9C1K6AHD5A9VU28B06Q7N0AXZAQSR`. - - ```sh - docker login -u “your_yugabyte_username” -p “yugabyte_provided_password” quay.io - - docker search yugabytedb # You should see images - ``` - -1. Fetch the YugabyteDB Helm chart on your desktop (install Helm on your desktop). Since the images in the `values.yaml` file may vary depending on the version, you need to specify the version you want to pull and push, as follows: - - ```sh - helm repo add yugabytedb https://charts.yugabyte.com - helm repo update - helm fetch yugabytedb/yugaware - - version= {{ version }} - tar zxvf yugaware-{{ version }}.tgz - cd yugaware - cat values.yaml - ``` - - ```properties - image: - commonRegistry: "" - repository: **quay.io/yugabyte/yugaware** - tag: **{{ version.build }}** - pullPolicy: IfNotPresent - pullSecret: yugabyte-k8s-pull-secret - thirdparty-deps: - registry: quay.io - tag: **latest** - name: **yugabyte/thirdparty-deps** - prometheus: - registry: "" - tag: **{{ version.prometheus }}** - name: **prom/prometheus** - nginx: - registry: "" - tag: **{{ version.nginx }}** - name: nginx - ``` - -1. Pull images to your Docker Desktop, as follows: - - ```sh - docker pull quay.io/yugabyte/yugaware:{{ version.build }} - ``` - - ```output - xxxxxxxxx: Pulling from yugabyte/yugaware - c87736221ed0: Pull complete - 4d33fcf3ee85: Pull complete - 60cbb698a409: Pull complete - daaf3bdf903e: Pull complete - eb7b573327ce: Pull complete - 94aa28231788: Pull complete - 16c067af0934: Pull complete - 8ab1e7f695af: Pull complete - 6153ecb58755: Pull complete - c0f981bfb844: Pull complete - 6485543159a8: Pull complete - 811ba76b1d72: Pull complete - e325b2ff3e2a: Pull complete - c351a0ce1ccf: Pull complete - 73765723160d: Pull complete - 588cb609ac0b: Pull complete - af3ae7e64e48: Pull complete - 17fb23853f77: Pull complete - cb799d679e2f: Pull complete - Digest: sha256:0f1cb1fdc1bd4c17699507ffa5a04d3fe5f267049e0675d5d78d77fa632b330c - Status: Downloaded newer image for quay.io/yugabyte/yugaware:xxxxxx - quay.io/yugabyte/yugaware:xxxxxxx - ``` - - ```sh - docker pull quay.io/yugabyte/thirdparty-deps:latest - ``` - - ```output - latest: Pulling from yugabyte/thirdparty-deps - c87736221ed0: Already exists - 4d33fcf3ee85: Already exists - 60cbb698a409: Already exists - d90c5841d133: Pull complete - 8084187ca761: Pull complete - 47e3b9f5c7f5: Pull complete - 64430b56cbd6: Pull complete - 27b03c6bcdda: Pull complete - ae35ebe6caa1: Pull complete - 9a655eedc488: Pull complete - Digest: sha256:286a13eb113398e1c4e63066267db4921c7644dac783836515a783cbd25b2c2a - Status: Downloaded newer image for quay.io/yugabyte/thirdparty-deps:latest - quay.io/yugabyte/thirdparty-deps:latest - ``` - - ```sh - docker pull postgres:11.5 - ``` - - ```output - xxxxxx: Pulling from library/postgres - 80369df48736: Pull complete - b18dd0a6efec: Pull complete - 5c20c5b8227d: Pull complete - c5a7f905c8ec: Pull complete - 5a3f55930dd8: Pull complete - ffc097878b09: Pull complete - 3106d02490d4: Pull complete - 88d1fc513b8f: Pull complete - f7d9cc27056d: Pull complete - afe180d8d5fd: Pull complete - b73e04acbb5f: Pull complete - 1dba81bb6cfd: Pull complete - 26bf23ba2b27: Pull complete - 09ead80f0070: Pull complete - Digest: sha256:b3770d9c4ef11eba1ff5893e28049e98e2b70083e519e0b2bce0a20e7aa832fe - Status: Downloaded newer image for postgres:11.5 - docker.io/library/postgres: - ``` - - ```sh - docker pull prom/prometheus:v2.2.1 - ``` - - ```output - Image docker.io/prom/prometheus:v2.2.1 uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/ - aab39f0bc16d: Pull complete - a3ed95caeb02: Pull complete - 2cd9e239cea6: Pull complete - 0266ca3d0dd9: Pull complete - 341681dba10c: Pull complete - 8f6074d68b9e: Pull complete - 2fa612efb95d: Pull complete - 151829c004a9: Pull complete - 75e765061965: Pull complete - b5a15632e9ab: Pull complete - Digest: sha256:129e16b08818a47259d972767fd834d84fb70ca11b423cc9976c9bce9b40c58f - Status: Downloaded newer image for prom/prometheus: - docker.io/prom/prometheus: - ``` - - ```sh - docker pull nginx:1.17.4 - ``` - - ```output - 1.17.4: Pulling from library/nginx - 8d691f585fa8: Pull complete - 047cb16c0ff6: Pull complete - b0bbed1a78ca: Pull complete - Digest: sha256:77ebc94e0cec30b20f9056bac1066b09fbdc049401b71850922c63fc0cc1762e - Status: Downloaded newer image for nginx:1.17.4 - docker.io/library/nginx:1.17.4 - ``` - - ```sh - docker pull janeczku/go-dnsmasq:release-1.0.7 - ``` - - ```output - release-1.0.7: Pulling from janeczku/go-dnsmasq - 117f30b7ae3d: Pull complete - 504f1e14d6cc: Pull complete - 98e84d0ba41a: Pull complete - Digest: sha256:3a99ad92353b55e97863812470e4f7403b47180f06845fdd06060773fe04184f - Status: Downloaded newer image for janeczku/go-dnsmasq:release-1.0.7 - docker.io/janeczku/go-dnsmasq:release-1.0.7 - ``` - -1. Log in to your target container registry, as per the following example that uses Google Container Registry (GCR) : - - ```sh - docker login -u _json_key --password-stdin https://gcr.io < .ssh/my-service-account-key.json - ``` - -1. Tag the local images to your target registry, as follows: - - ```sh - docker images - ``` - - ```output - REPOSITORY TAG IMAGE ID CREATED SIZE - quay.io/yugabyte/yugaware 2.5.1.0-b153 **a04fef023c7c** 6 weeks ago 2.54GB - quay.io/yugabyte/thirdparty-deps latest **721453480a0f** 2 months ago 447MB - nginx 1.17.4 **5a9061639d0a** 15 months ago 126MB - postgres 11.5 **5f1485c70c9a** 15 months ago 293MB - prom/prometheus v2.2.1 **cc866859f8df** 2 years ago 113MB - janeczku/go-dnsmasq release-1.0.7 **caef6233eac4** 4 years ago 7.38MB - ``` - - ```sh - docker tag a04fef023c7c gcr.io/dataengineeringdemos/yugabyte/yugaware:2.5.1.0-b153 - docker tag 721453480a0f gcr.io/dataengineeringdemos/yugabyte/thirdparty-deps:latest - docker tag 5a9061639d0a gcr.io/dataengineeringdemos/yugabyte/nginx:1.17.4 - docker tag 5f1485c70c9a gcr.io/dataengineeringdemos/yugabyte/postgres:11.5 - docker tag cc866859f8df gcr.io/dataengineeringdemos/prom/prometheus:v2.2.1 - docker tag caef6233eac4 gcr.io/dataengineeringdemos/janeczku/go-dnsmasq:release-1.0.7 - ``` - -1. Push images to the private container registry, as follows: - - ```sh - docker push a04fef023c7c - docker push 721453480a0f - docker push 5a9061639d0a - docker push 5f1485c70c9a - docker push cc866859f8df - docker push caef6233eac4 - ``` - - ![img](/images/yp/docker-image.png) - -1. Modify the Helm chart `values.yaml` file. You can map your private internal repository URI to `commonRegistry` and use the folder or `project/image_name` and tags similar to the following: - - ```properties - image: - commonRegistry: "**gcr.io/dataengineeringdemos**" - repository: **“”** - tag: **2.5.1.0-b153** - pullPolicy: IfNotPresent - pullSecret: yugabyte-k8s-pull-secret - thirdparty-deps: - registry: /yugabyte/thhirdparty-deps - tag: **latest** - name: **yugabyte/thirdparty-deps** - postgres: - registry: "yugabyte/postgres" - tag: 11.5 - name: **postgres** - prometheus: - registry: "prom/prometheus" - tag: **v2.2.1** - name: **prom/prometheus** - nginx: - registry: "yugabyte/nginx" - tag: **1.17.4** - name: nginx - dnsmasq: - registry: "janeczku/go-dnsmasq/" - tag: **release-1.0.7** - name: **janeczku/go-dnsmasq - ``` - -1. Install Helm chart or specify the container registry in YugabyteDB Anywhere cloud provider, as follows: - - ```sh - helm install yugaware **.** -f values.yaml - ``` - - {{% /tab %}} - - {{% tab header="Airgapped" lang="airgapped" %}} - -Installing YugabyteDB Anywhere on Airgapped hosts, without access to any Internet traffic (inbound or outbound) requires the following: - -- Whitelisting endpoints: to install Replicated and YugabyteDB Anywhere on a host with no Internet connectivity, you have to first download the binaries on a computer that has Internet connectivity, and then copy the files over to the appropriate host. In case of restricted connectivity, the following endpoints have to be whitelisted to ensure that they are accessible from the host marked for installation: - `https://downloads.yugabyte.com` - `https://download.docker.com` - -- Ensuring that Docker Engine version 20.10.n is available. If it is not installed, you need to follow the procedure described in [Installing Docker in airgapped](https://www.replicated.com/docs/kb/supporting-your-customers/installing-docker-in-airgapped/). -- Ensuring that the following ports are open on the YugabyteDB Anywhere host: - - 8800 – HTTP access to the Replicated UI - - 80 or 443 – HTTP and HTTPS access to the YugabyteDB Anywhere UI, respectively - - 22 – SSH -- Ensuring that the attached disk storage (such as persistent EBS volumes on AWS) is 100 GB minimum. -- Having YugabyteDB Anywhere airgapped install package. Contact Yugabyte Support for more information. -- Signing the Yugabyte license agreement. Contact Yugabyte Support for more information. - - {{% /tab %}} - -{{< /tabpane >}} diff --git a/docs/content/stable/yugabyte-platform/manage-deployments/edit-helm-overrides.md b/docs/content/stable/yugabyte-platform/manage-deployments/edit-helm-overrides.md index e7bc3d685f7a..c79ea656ae4f 100644 --- a/docs/content/stable/yugabyte-platform/manage-deployments/edit-helm-overrides.md +++ b/docs/content/stable/yugabyte-platform/manage-deployments/edit-helm-overrides.md @@ -29,7 +29,7 @@ To edit Kubernetes overrides, do the following: If you are using Google Cloud Storage (GCS) for backups, you can enable GKE service account-based IAM (GCP IAM) so that Kubernetes universes can access GCS. -Before upgrading a universe for GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../back-up-restore-universes/configure-backup-storage/#gke-service-account-based-iam-gcp-iam). +Before upgrading a universe for GCP IAM, ensure you have the prerequisites. Refer to [GCP IAM](../../prepare/cloud-permissions/cloud-permissions-nodes-gcp/#gke-service-account-based-iam-gcp-iam). To upgrade an existing universe to use GCP IAM, do the following: diff --git a/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-nodes.md b/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-nodes.md index 6fe88e1380de..98ef42db0080 100644 --- a/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-nodes.md +++ b/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-nodes.md @@ -75,10 +75,10 @@ For each node in the universe, use the following general procedure: - Inline patching - You modify the Linux OS binaries in place (for example, using yum). - Boot disk replacement - You create a separate new VM with a virtual disk containing the new Linux OS patch or upgrade, disconnect the virtual disk from the new VM, and use it to replace the DB node's boot disk. This is typically used with a hypervisor or public cloud. - If the node uses assisted or fully manual legacy provisioning, after replacing the boot disk, re-provision the node by following the [manual provisioning steps](../../configure-yugabyte-platform/on-premises-script/). - Ensure that the node retains its IP addresses after the patching of the Linux OS. Also ensure that the existing data volumes on the node remain untouched by the OS patching mechanism. +1. After replacing the boot disk, re-provision the node by following the steps in [Automatically provision on-premises nodes](../../prepare/server-nodes-software/software-on-prem/). + 1. Re-provision the node using the following API command: ```shell diff --git a/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software-prepare.md b/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software-prepare.md index b0800b0121ec..cd4d6ab9617b 100644 --- a/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software-prepare.md +++ b/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software-prepare.md @@ -16,6 +16,20 @@ type: docs If your universe is running on a [deprecated OS](../../../reference/configuration/operating-systems/), you need to update your OS before you can upgrade to the next major release of YugabyteDB. Refer to [Patch and upgrade the Linux operating system](../upgrade-nodes/). +## cron-based universes + +cron and root-level systemd have been deprecated in favor of user-level systemd with node agent for management of universe nodes. + +In particular, cron-based universes will no longer be supported in YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later. Before you will be able to upgrade to v2025.2 or later, all your universes must be using systemd. YugabyteDB Anywhere will automatically upgrade universes that use a cloud provider configuration to systemd. + +However, on-premises cron-based universes must be upgraded manually. To do this, in YugabyteDB Anywhere v2024.2.2 or later, navigate to the universe and choose **Actions>Upgrade to Systemd**. + +## Node agent + +YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later require universes have node agent running on their nodes. Before you will be able to upgrade to v2025.2 or later, all your universes must be using node agent. + +YugabyteDB Anywhere will attempt to automatically update universes. If it is unable to update a universe, make sure the universe nodes satisfy the [prerequisites](../../prepare/server-nodes-software/) and re-try the install by clicking **Actions>Install Node Agent**. + ## Review major changes in previous YugabyteDB releases {{< warning title="For YugabyteDB upgrades in YugabyteDB Anywhere" >}} diff --git a/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software.md b/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software.md index 769c876775d1..1aaf8c369dc2 100644 --- a/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software.md +++ b/docs/content/stable/yugabyte-platform/manage-deployments/upgrade-software.md @@ -45,18 +45,18 @@ When performing a database upgrade, do the following: title="Prepare to upgrade" body="Review changes that may affect your automation." href="../upgrade-software-prepare/" - icon="/images/section_icons/quick_start/install.png">}} + icon="fa-thin fa-diamond-exclamation">}} {{}} + icon="fa-thin fa-download">}} {{}} + icon="fa-thin fa-up-from-bracket">}} {{}} diff --git a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md index 226d8724621d..2f89f64992ac 100644 --- a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md +++ b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-aws.md @@ -48,6 +48,10 @@ type: docs For YugabyteDB Anywhere (YBA) to be able to deploy and manage YugabyteDB clusters, you need to provide YBA with privileges on your cloud infrastructure to create, delete, and modify VMs, mount and unmount disk volumes, and so on. The more permissions that you can provide, the more YBA can automate. +{{}} +If you can't provide YBA with the necessary permissions, you can still deploy to AWS using an [on-premises provider](../cloud-permissions-nodes/). +{{}} + ## AWS The following permissions are required for AWS. @@ -112,7 +116,7 @@ The following permissions are required for AWS. To grant the required access, you do one of the following: -- Create a service account with the permissions. You'll later provide YBA with the service account Access key ID and Secret Access Key when creating the provider. +- Create a service account with the permissions. You'll later provide YBA with the service account Access key ID and Secret Access Key when creating the AWS provider configuration. - Attach an IAM role with the required permissions to the EC2 VM instance where YugabyteDB Anywhere will be running. ### Service account diff --git a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md index 4d4b33ccdf12..c18248d5f4a5 100644 --- a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md +++ b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-azure.md @@ -48,6 +48,10 @@ type: docs For YugabyteDB Anywhere (YBA) to be able to deploy and manage YugabyteDB clusters, you need to provide YBA with privileges on your cloud infrastructure to create, delete, and modify VMs, mount and unmount disk volumes, and so on. The more permissions that you can provide, the more YBA can automate. +{{}} +If you can't provide YBA with the necessary permissions, you can still deploy to Azure using an [on-premises provider](../cloud-permissions-nodes/). +{{}} + ## Azure ### Application and resource group diff --git a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md index de564895a06a..5b2e8858ee52 100644 --- a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md +++ b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes-gcp.md @@ -48,6 +48,10 @@ type: docs For YugabyteDB Anywhere (YBA) to be able to deploy and manage YugabyteDB clusters, you need to provide YBA with privileges on your cloud infrastructure to create, delete, and modify VMs, mount and unmount disk volumes, and so on. The more permissions that you can provide, the more YBA can automate. +{{}} +If you can't provide YBA with the necessary permissions, you can still deploy to GCP using an [on-premises provider](../cloud-permissions-nodes/). +{{}} + ## GCP The [Compute Admin role](https://cloud.google.com/compute/docs/access/iam#compute.admin) permission is required on the GCP service account where you will deploy: @@ -63,7 +67,7 @@ To grant the required access, you must do the following: Then use one of the following methods: -- Obtain a file containing a JSON that describes the service account credentials. You will need to provide this file later to YBA. +- Obtain a file containing a JSON that describes the service account credentials. You will need to provide this file later when creating the GCP provider configuration. - [Attach the service account](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#using) to the GCP VM that will run YBA. | Save for later | To configure | @@ -82,3 +86,31 @@ If you will be using your own custom SSH keys, then ensure that you have them wh | Save for later | To configure | | :--- | :--- | | Custom SSH keys | [GCP provider configuration](../../../configure-yugabyte-platform/gcp/) | + +## GKE service account-based IAM (GCP IAM) + +Google Kubernetes Engine (GKE) uses a concept known as "Workload Identity" to provide a secure way to allow a Kubernetes service account ([KSA](https://kubernetes.io/docs/concepts/security/service-accounts/)) in your GKE cluster to act as an IAM service account so that your Kubernetes universes can access GCS for backups. + +In GKE, each pod can be associated with a KSA. The KSA is used to authenticate and authorize the pod to interact with other Google Cloud services. An IAM service account is a Google Cloud resource that allows applications to make authorized calls to Google Cloud APIs. + +Workload Identity links a KSA to an IAM account using annotations in the KSA. Pods that use the configured KSA automatically authenticate as the IAM service account when accessing Google Cloud APIs. + +By using Workload Identity, you avoid the need for manually managing service account keys or tokens in your applications running on GKE. This approach enhances security and simplifies the management of credentials. + +- To enable GCP IAM when installing YugabyteDB Anywhere on Kubernetes, refer to [Enable GKE service account-based IAM](../../../install-yugabyte-platform/install-software/kubernetes/#enable-gke-service-account-based-iam). + +- To enable GCP IAM during universe creation on Kubernetes, refer to [Configure Helm overrides](../../../create-deployments/create-universe-multi-zone-kubernetes/#helm-overrides). + +- To enable GCP IAM for Google Cloud Storage backup configuration with Kubernetes, refer to [Configure backup storage](../../../back-up-restore-universes/configure-backup-storage/#google-cloud-storage). + +- To upgrade an existing universe with GCP IAM, refer to [Upgrade universes for GKE service account-based IAM support](../../../manage-deployments/edit-helm-overrides/#upgrade-universes-for-gke-service-account-based-iam). + +**Prerequisites** + +- The GKE cluster hosting the pods should have Workload Identity enabled. The worker nodes of this GKE cluster should have the GKE metadata server enabled. + +- The IAM service account, which is used to annotate the KSA, should have sufficient permissions to read, write, list, and delete objects in GCS. + +- The KSA, which is annotated with the IAM service account, should be present in the same namespace where the pod resources for YugabyteDB Anywhere and YugabyteDB universes are expected. If you have multiple namespaces, each namespace should include the annotated KSA. + +For instructions on setting up Workload Identity, see [Use Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) in the GKE documentation. diff --git a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md index b53a2c9da014..ec0c4a487a9f 100644 --- a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md +++ b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-nodes.md @@ -48,7 +48,7 @@ type: docs ## On-premises -Because you are creating the VMs manually, nodes for on-premises providers don't require any cloud permissions. +Because you are creating the VMs manually (on a private cloud, bare metal, or cloud provider), nodes for on-premises providers don't require any cloud permissions. With an on-premises provider, permissions against your infrastructure are generally not needed to deploy VMs, modify VMs, and so on. diff --git a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md index 01c85c62ac1f..c95cfbd14ebe 100644 --- a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md +++ b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-storage.md @@ -91,7 +91,12 @@ The Access key ID and Secret Access Key for the service account are used when cr When backing up to and/or restoring from GCP GCS, YBA and database nodes must be able to write to and read from the GCS storage bucket. -To grant the required access, create a GCP service account with [IAM roles for cloud storage](https://cloud.google.com/storage/docs/access-control/iam-roles) with the following permissions: +To grant the required access, you can do one of the following: + +- Provide a GCP service account with [IAM roles for cloud storage](https://cloud.google.com/storage/docs/access-control/iam-roles) with the required permissions. +- Create the VM instances (for both the YBA VM and the DB nodes VMs) with an IAM role that has the required permissions. + +The following permissions are required: ```sh roles/storage.admin @@ -101,9 +106,7 @@ The credentials for this account (in JSON format) are used when creating a backu | Save for later | To configure | | :--- | :--- | -| Storage service account JSON credentials | [Storage configuration](../../../back-up-restore-universes/configure-backup-storage/#google-cloud-storage) for GCS | - -For database clusters deployed to GKE, you can alternatively assign the appropriate IAM roles to the YugabyteDB Anywhere VM and the YugabyteDB nodes. +| Service account JSON credentials | [Storage configuration](../../../back-up-restore-universes/configure-backup-storage/#google-cloud-storage) for GCS | diff --git a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md index 2f6e5a2aaafd..17f258e0adbe 100644 --- a/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md +++ b/docs/content/stable/yugabyte-platform/prepare/cloud-permissions/cloud-permissions-yba.md @@ -63,6 +63,8 @@ Linux OS root permissions are required for the server, see [Servers for YBA](../ When installing YugabyteDB Anywhere on an AWS VM, no cloud permissions are required. +If you attach an IAM role with the appropriate permissions to the EC2 VM instance where YugabyteDB Anywhere will be running, you can use the YugabyteDB Anywhere instance's IAM role when setting up node servers and S3 backup. Refer to [Permissions to deploy nodes](../cloud-permissions-nodes-aws/) and [Permissions to back up and restore](../cloud-permissions-storage/). + Linux OS root permissions are required for the server, see [Servers for YBA](../../server-yba/). @@ -71,6 +73,8 @@ Linux OS root permissions are required for the server, see [Servers for YBA](../ When installing YugabyteDB Anywhere on a GCP VM, no cloud permissions are required. +If you attach a service account with appropriate permissions to the GCP VM where YugabyteDB Anywhere will be running, you can use the YugabyteDB Anywhere instance's role when setting up node servers and S3 backup. Refer to [Permissions to deploy nodes](../cloud-permissions-nodes-gcp/) and [Permissions to back up and restore](../cloud-permissions-storage/). + Linux OS root permissions are required for the server, see [Servers for YBA](../../server-yba/). diff --git a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md index 7df7a847ac2a..e779367010e8 100644 --- a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md +++ b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/_index.md @@ -48,7 +48,7 @@ YugabyteDB Anywhere requires the following additional software to be pre-install #### Python -Install Python 3.8 on the nodes. (If you are using [Legacy on-premises provisioning](software-on-prem-legacy/), Python 3.5-3.9 is supported, and 3.6 is recommended.) +Install Python 3.6-3.11 on the nodes. (If you are using [Legacy on-premises provisioning](software-on-prem-legacy/), Python 3.5-3.9 is supported, and 3.6 is recommended.) Install the Python SELinux package corresponding to your version of Python. You can use pip to do this. Ensure the version of pip matches the version of Python. diff --git a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md index 1e03525660b1..63238f6a5d81 100644 --- a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md +++ b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-cloud-provider.md @@ -12,7 +12,7 @@ menu: type: docs --- -When deploying database clusters using a public cloud provider (AWS, GCP, or Azure), YugabyteDB Anywhere (YBA) creates cloud VMs directly. +When deploying database clusters using a public cloud provider configuration (AWS, GCP, or Azure), YugabyteDB Anywhere (YBA) creates cloud VMs directly. You have two options for provisioning the operating system: diff --git a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md index 1c6ce69b93f5..045902b7e550 100644 --- a/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md +++ b/docs/content/stable/yugabyte-platform/prepare/server-nodes-software/software-kubernetes.md @@ -72,6 +72,12 @@ helm repo add prometheus-community https://prometheus-community.github.io/helm-c helm install -n kube-system --version 5.0.0 kube-state-metrics prometheus-community/kube-state-metrics ``` +## Install cert-manager + +You can use [cert-manager](https://cert-manager.io/) to manage certificates for your cluster. To use cert-manager to manage universe certificates, ensure that it is running on your Kubernetes cluster before you create your [Kubernetes provider configuration](../../../configure-yugabyte-platform/kubernetes/). + +For more information, refer to [Add certificates](../../../security/enable-encryption-in-transit/add-certificate-kubernetes/). + ## Pull and push YugabyteDB Docker images to private container registry Due to security concerns, some Kubernetes environments use internal container registries such as Harbor and Nexus. In this type of setup, YugabyteDB deployment must be able to pull images from and push images to a private registry. diff --git a/docs/content/stable/yugabyte-platform/prepare/server-yba.md b/docs/content/stable/yugabyte-platform/prepare/server-yba.md index 01049bfb14a4..c78b1252b9c8 100644 --- a/docs/content/stable/yugabyte-platform/prepare/server-yba.md +++ b/docs/content/stable/yugabyte-platform/prepare/server-yba.md @@ -47,7 +47,7 @@ YugabyteDB Anywhere has the following software requirements: - x86 Linux operating system - License -- Python 3.8-3.11 +- Python - Sudo root permissions for installation ### Linux OS diff --git a/docs/content/stable/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md b/docs/content/stable/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md index 6c53f046d6ac..9385c8ce6dc0 100644 --- a/docs/content/stable/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md +++ b/docs/content/stable/yugabyte-platform/security/enable-encryption-in-transit/add-certificate-kubernetes.md @@ -50,13 +50,24 @@ Add TLS certificates issued by the cert-manager as follows: ## Configure the provider -After the certificate is added to YugabyteDB Anywhere, configure the Kubernetes provider configuration by following instructions provided in [Configure region and zones](../../../configure-yugabyte-platform/kubernetes/#configure-region-and-zones). +After the certificate is added to YugabyteDB Anywhere, set up the Kubernetes provider configuration by following the instructions in [Configure region and zones](../../../configure-yugabyte-platform/kubernetes/#configure-region-and-zones). -In the **Add new region** dialog shown in the following illustration, you can specify the Issuer kind, Issuer name, and optionally the Issuer group for each zone. +When adding a region, you can specify the Issuer kind, Issuer name, and optionally the Issuer group for each zone. ![Add new region](/images/yp/security/kubernetes-cert-manager-add-region-2024-2-2.png) -If you need the certificate to include the common name, set the **Common Name Required for Certificates** Universe Runtime Configuration option (config key `yb.tls.cert_manager.common_name_required`) to true. Refer to [Manage runtime configuration settings](../../../administer-yugabyte-platform/manage-runtime-config/). When configured, YugabyteDB Anywhere sets the common name to the name of the service created for the pod. +### Including the common name + +If your certificate issuer (for example, for `aws-privateca-issuer`) requires the certificate to include the common name, set the following [override](../../../create-deployments/create-universe-multi-zone-kubernetes/#helm-overrides) for the provider region: + +```yml +tls: + certManager: + certificates: + commonNameRequired: true +``` + +When configured, YugabyteDB Anywhere sets the common name to the name of the service created for the pod, and adds common name to the certificate request sent to cert-manager. ## Troubleshoot diff --git a/docs/content/stable/yugabyte-platform/upgrade/_index.md b/docs/content/stable/yugabyte-platform/upgrade/_index.md index 4638c0f411e1..e04e7cad4f7f 100644 --- a/docs/content/stable/yugabyte-platform/upgrade/_index.md +++ b/docs/content/stable/yugabyte-platform/upgrade/_index.md @@ -13,38 +13,26 @@ type: indexpage Keep YugabyteDB Anywhere (YBA) up to date for the latest fixes and improvements, and to be able to [upgrade your universes](../manage-deployments/upgrade-software/) to the latest version of YugabyteDB. You cannot upgrade a universe to a version of YugabyteDB that is later than the version of YugabyteDB Anywhere. -For information on which versions of YugabyteDB are compatible with your version of YBA, see [YugabyteDB Anywhere releases](/preview/releases/yba-releases/). - You can upgrade YBA using the following methods: | Method | Using | Use If | | :--- | :--- | :--- | -| [YBA Installer](./upgrade-yp-installer/) | yba-ctl CLI | Your installation already uses YBA Installer. | -| [Replicated](./upgrade-yp-replicated/) | Replicated Admin Console | Your installation already uses Replicated.
Before you can migrate from a Replicated installation, upgrade to v2.20.1.3 or later using Replicated.
Note that you must migrate from Replicated to YBA Installer if you are upgrading YBA to v2024.1 or later. | -| [Kubernetes](./upgrade-yp-kubernetes/) | Helm chart | You're deploying in Kubernetes. | - -If you are upgrading a YBA installation with high availability enabled, follow the instructions provided in [Upgrade instances](../administer-yugabyte-platform/high-availability/#upgrade-instances). - -If you have upgraded YBA to version 2.12 or later and [xCluster replication](../../explore/going-beyond-sql/asynchronous-replication-ysql/) for your universe was set up via yb-admin instead of the UI, follow the instructions provided in [Synchronize replication after upgrade](upgrade-yp-xcluster-ybadmin/). - -{{< note title="Upgrading YBA on deprecated operating systems" >}} - -If you are running YBA on a [deprecated OS](../../reference/configuration/operating-systems/), you will need to update your OS before you can upgrade YBA to the next major release. - -{{< /note >}} +| YBA Installer | yba-ctl CLI | Your installation already uses YBA Installer. | +| Replicated | Replicated Admin Console | Your installation already uses Replicated.
Before you can migrate from a Replicated installation, upgrade to v2.20.1.3 or later using Replicated.
Note that you must migrate from Replicated to YBA Installer if you are upgrading YugabyteDB Anywhere to v2024.1 or later. | +| Kubernetes | Helm chart | You're deploying in Kubernetes. | {{}} + {{}} + {{}} - - {{}} + icon="fa-thin fa-up-from-bracket">}} {{}} diff --git a/docs/content/stable/yugabyte-platform/upgrade/prepare-to-upgrade.md b/docs/content/stable/yugabyte-platform/upgrade/prepare-to-upgrade.md new file mode 100644 index 000000000000..4ad8fffaa7ee --- /dev/null +++ b/docs/content/stable/yugabyte-platform/upgrade/prepare-to-upgrade.md @@ -0,0 +1,46 @@ +--- +title: Prepare to upgrade YugabyteDB Anywhere +headerTitle: Prepare to upgrade YugabyteDB Anywhere +linkTitle: Prepare to upgrade +description: Review changes that may affect installation +menu: + stable_yugabyte-platform: + identifier: prepare-to-upgrade + parent: upgrade + weight: 50 +type: docs +--- + +For information on which versions of YugabyteDB are compatible with your version of YugabyteDB Anywhere, see [YugabyteDB Anywhere releases](/preview/releases/yba-releases/). + +## High availability + +If you are upgrading a YugabyteDB Anywhere installation with high availability enabled, follow the instructions provided in [Upgrade instances](../../administer-yugabyte-platform/high-availability/#upgrade-instances). + +## Operating system + +If you are running YugabyteDB Anywhere on a [deprecated OS](../../../reference/configuration/operating-systems/), you need to update your OS before you can upgrade YugabyteDB Anywhere to the next major release. + +## Python + +YugabyteDB Anywhere v25.1 and later requires Python v3.10-3.12. If you are running YugabyteDB Anywhere on a system with Python earlier than 3.10, you will need to update Python on your system before you can upgrade YugabyteDB Anywhere to v25.1 or later. + +## cron-based universes + +cron and root-level systemd have been deprecated in favor of user-level systemd with node agent for management of universe nodes. + +In particular, cron-based universes will no longer be supported in YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later. Before you will be able to upgrade to v2025.2 or later, all your universes must be using systemd. YugabyteDB Anywhere will automatically upgrade universes that use a cloud provider configuration to systemd. + +However, on-premises cron-based universes must be upgraded manually. To do this, in YugabyteDB Anywhere v2024.2.2 or later, navigate to the universe and choose **Actions>Upgrade to Systemd**. + +## Node provisioning + +[Legacy provisioning](../../prepare/server-nodes-software/software-on-prem-legacy/) workflows have been deprecated. Provision nodes for on-premises universes using the `node-agent-provision.sh` script. Refer to [Automatically provision on-premises nodes](../../prepare/server-nodes-software/software-on-prem/). + +YugabyteDB Anywhere v2025.2 (LTS release planned for end of 2025) and later require universes have node agent running on their nodes. Before you will be able to upgrade to v2025.2 or later, all your universes must be using node agent. + +To upgrade a universe to node agent, first make sure the universe is not cron-based and if necessary [update the universe to systemd](#cron-based-universes). Then navigate to the universe and click **Actions>Install Node Agent**. If installation fails on a node, make sure the node satisfies the [prerequisites](../../prepare/server-nodes-software/) and re-try the install. + +## xCluster + +If you have upgraded YugabyteDB Anywhere to version 2.12 or later and [xCluster replication](../../../explore/going-beyond-sql/asynchronous-replication-ysql/) for your universe was set up via yb-admin instead of the UI, follow the instructions provided in [Synchronize replication after upgrade](../upgrade-yp-xcluster-ybadmin/). diff --git a/docs/content/v2.20/explore/ysql-language-features/pg-extensions/_index.md b/docs/content/v2.20/explore/ysql-language-features/pg-extensions/_index.md index 46a5e1e95234..668e7dff4ae8 100644 --- a/docs/content/v2.20/explore/ysql-language-features/pg-extensions/_index.md +++ b/docs/content/v2.20/explore/ysql-language-features/pg-extensions/_index.md @@ -31,6 +31,8 @@ YugabyteDB supports the following [PostgreSQL modules](https://www.postgresql.or | Module | Description | | :----- | :---------- | | [auto_explain](extension-auto-explain/) | Provides a means for logging execution plans of slow statements automatically. | +| cube| Implements a data type cube for representing multidimensional cubes.
For more information, see [cube](https://www.postgresql.org/docs/11/cube.html) in the PostgreSQL documentation. | +| earthdistance| Provides two different approaches to calculating great circle distances on the surface of the Earth.
For more information, see [earthdistance](https://www.postgresql.org/docs/11/earthdistance.html) in the PostgreSQL documentation. | | [file_fdw](extension-file-fdw/) | Provides the foreign-data wrapper file_fdw, which can be used to access data files in the server's file system. | | [fuzzystrmatch](extension-fuzzystrmatch/) | Provides several functions to determine similarities and distance between strings. | | hstore | Implements the hstore data type for storing sets of key-value pairs in a single PostgreSQL value.
For more information, see [hstore](https://www.postgresql.org/docs/11/hstore.html) in the PostgreSQL documentation. | diff --git a/docs/content/v2.20/yugabyte-platform/install-yugabyte-platform/install-software/installer.md b/docs/content/v2.20/yugabyte-platform/install-yugabyte-platform/install-software/installer.md index 31179c3005e9..76ea23f41934 100644 --- a/docs/content/v2.20/yugabyte-platform/install-yugabyte-platform/install-software/installer.md +++ b/docs/content/v2.20/yugabyte-platform/install-yugabyte-platform/install-software/installer.md @@ -276,7 +276,7 @@ To use the data disk with a new installation, do the following: ### Reconfigure -You can use YBA Installer to reconfigure an installed YBA instance. +You can use YBA Installer to make changes to an installed YBA instance. To reconfigure an installation, edit the configuration file with your changes, and then run the command as follows: @@ -291,8 +291,8 @@ For more information, refer to [Configuration options](#configuration-options). YBA Installer provides basic service management, with `start`, `stop`, and `restart` commands. Each of these can be performed for all the services (`platform`, `postgres`, and `prometheus`), or any individual service. ```sh -sudo yba-ctl [start, stop, reconfigure] -sudo yba-ctl [start, stop, reconfigure] prometheus +sudo yba-ctl [start, stop, restart] +sudo yba-ctl [start, stop, restart] prometheus ``` In addition to the state changing operations, you can use the `status` command to show the status of all YugabyteDB Anywhere services, in addition to other information such as the log and configuration location, versions of each service, and the URL to access the YugabyteDB Anywhere UI. @@ -433,6 +433,8 @@ YBA Installer [automatically generates](#configure-yba-installer) the file when | sudo | opt/yba-ctl/ | | non-sudo | ~/opt/yba-ctl/ | +To make changes to an existing installation, edit the configuration file with your changes and run the [reconfigure](#reconfigure) command. Note that some settings (marked with {{}}) cannot be changed after installation. + Note that the file must include all fields. Optional fields may be left blank. ### Configure YBA Installer diff --git a/docs/content/v2024.1/architecture/docdb-replication/cdc-logical-replication.md b/docs/content/v2024.1/architecture/docdb-replication/cdc-logical-replication.md index 04285ec8eebe..0a1ad4ccacb1 100644 --- a/docs/content/v2024.1/architecture/docdb-replication/cdc-logical-replication.md +++ b/docs/content/v2024.1/architecture/docdb-replication/cdc-logical-replication.md @@ -68,7 +68,7 @@ Each tablet sends changes in transaction commit time order. Further, in a transa ![VWAL-walsender](/images/architecture/vwal_walsender_interaction.png) -VWAL collects changes across multiple tablets, assembles the transactions, assigns LSN to each change and transaction boundary (BEGIN, COMMIT) record, and sends the changes to the walsender in transaction commit time order. +VWAL collects changes across multiple tablets, assembles the transactions, assigns a Log Sequence Number ([LSN](../../../develop/change-data-capture/using-logical-replication/key-concepts/#lsn)) to each change and transaction boundary (BEGIN, COMMIT) record, and sends the changes to the walsender in transaction commit time order. **Step 3 - walsender to client** diff --git a/docs/content/v2024.1/deploy/kubernetes/single-zone/oss/helm-chart.md b/docs/content/v2024.1/deploy/kubernetes/single-zone/oss/helm-chart.md index 2494e4d724b9..933d56e274f8 100644 --- a/docs/content/v2024.1/deploy/kubernetes/single-zone/oss/helm-chart.md +++ b/docs/content/v2024.1/deploy/kubernetes/single-zone/oss/helm-chart.md @@ -422,6 +422,15 @@ helm repo update helm upgrade yb-demo yugabytedb/yugabyte --version {{}} --wait -n yb-demo ``` +Then finalize the upgrade as follows: + +```sh +kubectl exec -it yb-master-0 -- /home/yugabyte/bin/yb-admin --master_addresses yb-master-0.yb-masters.default.svc.cluster.local:7100 promote_auto_flags +kubectl exec -it yb-master-0 -- /home/yugabyte/bin/yb-admin --master_addresses yb-master-0.yb-masters.default.svc.cluster.local:7100 upgrade_ysql +``` + +The `upgrade_ysql` command is only needed if YSQL is enabled. + ## Update the configuration of YugabyteDB pods You can update most settings in the helm chart by running a `helm upgrade` with the new values. By default, this performs a [rolling update](https://github.com/yugabyte/charts/blob/853d7ac744cf6d637b5877f4681940825beda8f6/stable/yugabyte/values.yaml#L60) of the pods. diff --git a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/_index.md b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/_index.md index 6d0ea30fe92e..4e2f05069dcb 100644 --- a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/_index.md +++ b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/_index.md @@ -81,7 +81,7 @@ For reference documentation, see [YugabyteDB Connector](./yugabytedb-connector/) ## Limitations -- LSN Comparisons Across Slots. +- Log Sequence Number ([LSN](../using-logical-replication/key-concepts/#lsn)) Comparisons Across Slots. In the case of YugabyteDB, the LSN  does not represent the byte offset of a WAL record. Hence, arithmetic on LSN and any other usages of the LSN making this assumption will not work. Also, currently, comparison of LSN values from messages coming from different replication slots is not supported. diff --git a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/key-concepts.md b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/key-concepts.md index 9057520d33ba..777b9f1174a4 100644 --- a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/key-concepts.md +++ b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/key-concepts.md @@ -20,7 +20,7 @@ The YugabyteDB logical replication feature uses [PostgreSQL Logical Replication] A replication slot represents a stream of changes that can be replayed to a client in the order they were made on the origin server. Each slot streams a sequence of changes from a single database. -In logical replication, the fundamental unit of data transmission is a transaction. A logical slot emits each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so if a replication process is interrupted and restarts, even if the checkpoint or the starting LSN falls in the middle of a transaction, **the entire transaction is retransmitted**. This behavior guarantees that clients receive complete transactions without missing any intermediate changes, maintaining data integrity across the replication stream​. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or (when using the replication protocol) request that decoding start from that LSN rather than letting the server determine the start point. +In logical replication, the fundamental unit of data transmission is a transaction. A logical slot emits each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so if a replication process is interrupted and restarts, even if the checkpoint or the starting Log Sequence Number ([LSN](#lsn)) falls in the middle of a transaction, **the entire transaction is retransmitted**. This behavior guarantees that clients receive complete transactions without missing any intermediate changes, maintaining data integrity across the replication stream​. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or (when using the replication protocol) request that decoding start from that LSN rather than letting the server determine the start point. For more information, refer to [Replication slots](https://www.postgresql.org/docs/11/logicaldecoding-explanation.html#LOGICALDECODING-REPLICATION-SLOTS) in the PostgreSQL documentation. diff --git a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/monitor.md b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/monitor.md index 7b0fdd339569..d8be0f5aff5f 100644 --- a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/monitor.md +++ b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/monitor.md @@ -64,7 +64,7 @@ Provides a list of all replication slots that currently exist on the database cl | active_pid | integer | The process ID of the session using this slot if the slot is currently actively being used. `NULL` if no replication process is ongoing. | | xmin | xid | The oldest transaction that this slot needs the database to retain. | | catalog_xmin | xid | Not applicable for YSQL. Always set to xmin. | -| restart_lsn | pg_lsn | The LSN of the oldest change record which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints. | +| restart_lsn | pg_lsn | The Log Sequence Number ([LSN](../key-concepts/#lsn)) of the oldest change record which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints. | | confirmed_flush_lsn | pg_lsn | The LSN up to which the logical slot's consumer has confirmed receiving data. Data older than this is not available anymore. Transactions with commit LSN lower than the `confirmed_flush_lsn` are not available anymore. | | yb_stream_id | text | UUID of the CDC stream | | yb_restart_commit_ht | int8 | A uint64 representation of the commit Hybrid Time corresponding to the `restart_lsn`. This can be used by the client (like YugabyteDB connector) to perform a consistent snapshot (as of the `consistent_point`) in the case when a replication slot already exists. | diff --git a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md index 60f4993f1e72..efb00f8fadd0 100644 --- a/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md +++ b/docs/content/v2024.1/develop/change-data-capture/using-logical-replication/yugabytedb-connector.md @@ -35,7 +35,7 @@ The connector produces a change event for every row-level insert, update, and de YugabyteDB normally purges write-ahead log (WAL) segments after some period of time. This means that the connector does not have the complete history of all changes that have been made to the database. Therefore, when the YugabyteDB connector first connects to a particular YugabyteDB database, it starts by performing a consistent snapshot of each of the configured tables. After the connector completes the snapshot, it continues streaming changes from the exact point at which the snapshot was made. This way, the connector starts with a consistent view of all of the data, and does not omit any changes that were made while the snapshot was being taken. -The connector is tolerant of failures. As the connector reads changes and produces events, it records the LSN for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. +The connector is tolerant of failures. As the connector reads changes and produces events, it records the The Log Sequence Number ([LSN](../key-concepts/#lsn)) for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. {{< tip title="Use UTF-8 encoding" >}} diff --git a/docs/content/v2024.1/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md b/docs/content/v2024.1/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md index bc8d6157865b..57dcf57a9956 100644 --- a/docs/content/v2024.1/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md +++ b/docs/content/v2024.1/develop/change-data-capture/using-yugabytedb-grpc-replication/debezium-connector-yugabytedb.md @@ -114,7 +114,7 @@ The YugabyteDB gRPC Connector typically spends the vast majority of its time str The connector keeps polling for changes and whenever there is a change, the connector processes them, converts them to a specific format (Protobuf or JSON in the case of the Debezium plugin) and writes them on an output stream, which can then be consumed by clients. -The connector acts as a YugabyteDB client. When the connector receives changes it transforms the events into Debezium create, update, or delete events that include the LSN of the event. The connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic. +The connector acts as a YugabyteDB client. When the connector receives changes it transforms the events into Debezium create, update, or delete events that include the Log Sequence Number ([LSN](../../using-logical-replication/key-concepts/#lsn)) of the event. The connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic. Periodically, Kafka Connect records the most recent offset in another Kafka topic. The offset indicates source-specific position information that Debezium includes with each event. diff --git a/docs/content/v2024.1/develop/postgresql-compatibility.md b/docs/content/v2024.1/develop/postgresql-compatibility.md index 20a87d4f7978..024c51991513 100644 --- a/docs/content/v2024.1/develop/postgresql-compatibility.md +++ b/docs/content/v2024.1/develop/postgresql-compatibility.md @@ -14,7 +14,7 @@ rightNav: YugabyteDB is a [PostgreSQL-compatible](https://www.yugabyte.com/tech/postgres-compatibility/) distributed database that supports the majority of PostgreSQL syntax. YugabyteDB is methodically expanding its features to deliver PostgreSQL-compatible performance that can substantially improve your application's efficiency. -To test and take advantage of features developed for enhanced PostgreSQL compatibility in YugabyteDB that are currently in {{}}, you can enable Enhanced PostgreSQL Compatibility Mode (EPCM). When this mode is turned on, YugabyteDB is configured to use all the latest features developed for feature and performance parity. EPCM is available in [v2024.1](/preview/releases/ybdb-releases/v2024.1/) and later. Here are the features that are part of the EPCM mode. +To test and take advantage of features developed for enhanced PostgreSQL compatibility in YugabyteDB that are currently in {{}}, you can enable Enhanced PostgreSQL Compatibility Mode (EPCM). When this mode is turned on, YugabyteDB is configured to use all the latest features developed for feature and performance parity. EPCM is available in [v2024.1](/preview/releases/ybdb-releases/v2024.1/) and later. The following features are part of EPCM. | Feature | Flag/Configuration Parameter | EA | GA | | :--- | :--- | :--- | :--- | @@ -107,7 +107,7 @@ Default ascending indexing provides feature compatibility and is the default in Configuration parameter: `yb_enable_bitmapscan=true` -Bitmap scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Bitmap scans can improve the performance of queries containing AND and OR conditions across several index scans. YugabyteDB bitmap scan provides feature compatibility and improved performance parity. For YugabyteDB relations to use a bitmap scan, the PostgreSQL parameter `enable_bitmapscan` must also be true (the default). +Bitmap scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Bitmap scans can improve the performance of queries containing `AND` and `OR` conditions across several index scans. YugabyteDB bitmap scan provides feature compatibility and improved performance parity. For YugabyteDB relations to use a bitmap scan, the PostgreSQL parameter `enable_bitmapscan` must also be true (the default). ### Efficient communication between PostgreSQL and DocDB diff --git a/docs/content/v2024.1/explore/ysql-language-features/pg-extensions/_index.md b/docs/content/v2024.1/explore/ysql-language-features/pg-extensions/_index.md index ff9a024ad25a..64c87a390732 100644 --- a/docs/content/v2024.1/explore/ysql-language-features/pg-extensions/_index.md +++ b/docs/content/v2024.1/explore/ysql-language-features/pg-extensions/_index.md @@ -31,6 +31,8 @@ YugabyteDB supports the following [PostgreSQL modules](https://www.postgresql.or | Module | Description | | :----- | :---------- | | [auto_explain](extension-auto-explain/) | Provides a means for logging execution plans of slow statements automatically. | +| cube| Implements a data type cube for representing multidimensional cubes.
For more information, see [cube](https://www.postgresql.org/docs/11/cube.html) in the PostgreSQL documentation. | +| earthdistance| Provides two different approaches to calculating great circle distances on the surface of the Earth.
For more information, see [earthdistance](https://www.postgresql.org/docs/11/earthdistance.html) in the PostgreSQL documentation. | | [file_fdw](extension-file-fdw/) | Provides the foreign-data wrapper file_fdw, which can be used to access data files in the server's file system. | | [fuzzystrmatch](extension-fuzzystrmatch/) | Provides several functions to determine similarities and distance between strings. | | hstore | Implements the hstore data type for storing sets of key-value pairs in a single PostgreSQL value.
For more information, see [hstore](https://www.postgresql.org/docs/11/hstore.html) in the PostgreSQL documentation. | diff --git a/docs/content/v2024.1/reference/configuration/yb-tserver.md b/docs/content/v2024.1/reference/configuration/yb-tserver.md index 160e34a67639..bcd11068c755 100644 --- a/docs/content/v2024.1/reference/configuration/yb-tserver.md +++ b/docs/content/v2024.1/reference/configuration/yb-tserver.md @@ -1730,7 +1730,7 @@ Default: `1GB` PostgreSQL parameter to enable or disable the query planner's use of bitmap-scan plan types. -Bitmap Scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Multiple bitmaps can be combined with AND or OR operators to create a final bitmap that is used to collect rows from the main table. +Bitmap Scans use multiple indexes to answer a query, with only one scan of the main table. Each index produces a "bitmap" indicating which rows of the main table are interesting. Multiple bitmaps can be combined with `AND` or `OR` operators to create a final bitmap that is used to collect rows from the main table. Bitmap scans follow the same `work_mem` behavior as PostgreSQL: each individual bitmap is bounded by `work_mem`. If there are n bitmaps, it means we may use `n * work_mem` memory. @@ -1759,13 +1759,13 @@ Default: 1024 ##### yb_enable_batchednl -{{}} Enable or disable the query planner's use of batched nested loop join. +Enable or disable the query planner's use of batched nested loop join. Default: true ##### yb_enable_base_scans_cost_model -{{}} Enables the YugabyteDB cost model for sequential and index scans. When enabling this parameter, you must run ANALYZE on user tables to maintain up-to-date statistics. +{{}} Enables the YugabyteDB cost model for sequential and index scans. When enabling this parameter, you must run ANALYZE on user tables to maintain up-to-date statistics. When enabling the cost based optimizer, ensure that [packed row](../../../architecture/docdb/packed-rows) for colocated tables is enabled by setting `ysql_enable_packed_row_for_colocated_table = true`. diff --git a/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/install-software/installer.md b/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/install-software/installer.md index 3aa51a3bd18c..0e943d88ebde 100644 --- a/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/install-software/installer.md +++ b/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/install-software/installer.md @@ -275,7 +275,7 @@ To use the data disk with a new installation, do the following: ### Reconfigure -You can use YBA Installer to reconfigure an installed YBA instance. +You can use YBA Installer to make changes to an installed YBA instance. To reconfigure an installation, edit the configuration file with your changes, and then run the command as follows: @@ -290,8 +290,8 @@ For more information, refer to [Configuration options](#configuration-options). YBA Installer provides basic service management, with `start`, `stop`, and `restart` commands. Each of these can be performed for all the services (`platform`, `postgres`, and `prometheus`), or any individual service. ```sh -sudo yba-ctl [start, stop, reconfigure] -sudo yba-ctl [start, stop, reconfigure] prometheus +sudo yba-ctl [start, stop, restart] +sudo yba-ctl [start, stop, restart] prometheus ``` In addition to the state changing operations, you can use the `status` command to show the status of all YugabyteDB Anywhere services, in addition to other information such as the log and configuration location, versions of each service, and the URL to access the YugabyteDB Anywhere UI. @@ -432,6 +432,8 @@ YBA Installer [automatically generates](#configure-yba-installer) the file when | sudo | opt/yba-ctl/ | | non-sudo | ~/opt/yba-ctl/ | +To make changes to an existing installation, edit the configuration file with your changes and run the [reconfigure](#reconfigure) command. Note that some settings (marked with {{}}) cannot be changed after installation. + Note that the file must include all fields. Optional fields may be left blank. ### Configure YBA Installer diff --git a/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md b/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md deleted file mode 100644 index 787aca094510..000000000000 --- a/docs/content/v2024.1/yugabyte-platform/install-yugabyte-platform/prerequisites-include.md +++ /dev/null @@ -1,350 +0,0 @@ - - -Choose the type of YugabyteDB Anywhere installation. - -{{< tabpane text=true >}} - - {{% tab header="Docker-based" lang="docker" %}} - -For a Docker-based installation, YugabyteDB Anywhere uses [Replicated scheduler](https://www.replicated.com/) for software distribution and container management. You need to ensure that the host can pull containers from the [Replicated Docker Registries](https://help.replicated.com/docs/native/getting-started/docker-registries/). - -Replicated installs a compatible Docker version if it is not pre-installed on the host. The currently supported Docker version is 20.10.n. - - {{% /tab %}} - - {{% tab header="Kubernetes-based" lang="kubernetes" %}} - -For a Kubernetes-based installation, you need to ensure that the host can pull container images from the [Quay.io](https://quay.io/) container registry. For details, see [Pull and push YugabyteDB Docker images to private container registry](#pull-and-push-yugabytedb-docker-images-to-private-container-registry). - -In addition, you need to ensure that core dumps are enabled and configured on the underlying Kubernetes node. For details, see [Specify ulimit and remember the location of core dumps](#specify-ulimit-and-remember-the-location-of-core-dumps). - - - -#### Specify ulimit and remember the location of core dumps - -The core dump collection in Kubernetes requires special care due to the fact that `core_pattern` is not isolated in cgroup drivers. - -You need to ensure that core dumps are enabled on the underlying Kubernetes node. Running the `ulimit -c` command within a Kubernetes pod or node must produce a large non-zero value or the `unlimited` value as an output. For more information, see [How to enable core dumps](https://www.ibm.com/support/pages/how-do-i-enable-core-dumps). - -To be able to locate your core dumps, you should be aware of the fact that the location to which core dumps are written depends on the sysctl `kernel.core_pattern` setting. For more information, see [Linux manual: core(5)](https://man7.org/linux/man-pages/man5/core.5.html#:~:text=Naming%20of%20core%20dump%20files). - -To inspect the value of the sysctl within a Kubernetes pod or node, execute the following: - -```sh -cat /proc/sys/kernel/core_pattern -``` - -If the value of `core_pattern` contains a `|` pipe symbol (for example, `|/usr/share/apport/apport -p%p -s%s -c%c -d%d -P%P -u%u -g%g -- %E`), the core dump is being redirected to a specific collector on the underlying Kubernetes node, with the location depending on the exact collector. To be able to retrieve core dump files in case of a crash within the Kubernetes pod, it is important that you understand where these files are written. - -If the value of `core_pattern` is a literal path of the form `/var/tmp/core.%p`, no action is required on your part, as core dumps will be copied by the YugabyteDB node to the persistent volume directory `/mnt/disk0/cores` for future analysis. - -Note the following: - -- ulimits and sysctl are inherited from Kubernetes nodes and cannot be changed for an individual pod. -- New Kubernetes nodes might be using [systemd-coredump](https://www.freedesktop.org/software/systemd/man/systemd-coredump.html) to manage core dumps on the node. - -#### Pull and push YugabyteDB Docker images to private container registry - -Due to security concerns, some Kubernetes environments use internal container registries such as Harbor and Nexus. In this type of setup, YugabyteDB deployment must be able to pull images from and push images to a private registry. - -{{< note title="Note" >}} - -This is not a recommended approach for enterprise environments. You should ask the container registry administrator to add proxy cache to pull the source images to the internal registry automatically. This would allow you to avoid modifying the Helm chart or providing a custom registry inside the YugabyteDB Anywhere cloud provider. - -{{< /note >}} - -Before proceeding, ensure that you have the following: - -- Pull secret consisting of the user name and password or service account to access source (pull permission) and destination (push and pull permissions) container registries. -- Docker installed on a server (desktop) that can access both container registries. For installation instructions, see [Docker Desktop](https://www.docker.com/products/docker-desktop). - -Generally, the process involves the following: - -- Fetching the correct version of the YugabyteDB Helm chart whose `values.yaml` file describes all the image locations. -- Retagging images. -- Pushing images to the private container registry. -- Modifying the Helm chart values to point to the new private location. - -![img](/images/yp/docker-pull.png) - -You need to perform the following steps: - -1. Log in to [Quay.io](https://quay.io/) to access the YugabyteDB private registry using the user name and password provided in the secret `yaml` file. To find the `auth` field, use `base64 -d` to decode the data inside the `yaml` file twice. In this field, the user name and password are separated by a colon. For example, `yugabyte+:ZQ66Z9C1K6AHD5A9VU28B06Q7N0AXZAQSR`. - - ```sh - docker login -u “your_yugabyte_username” -p “yugabyte_provided_password” quay.io - - docker search yugabytedb # You should see images - ``` - -1. Fetch the YugabyteDB Helm chart on your desktop (install Helm on your desktop). Since the images in the `values.yaml` file may vary depending on the version, you need to specify the version you want to pull and push, as follows: - - ```sh - helm repo add yugabytedb https://charts.yugabyte.com - helm repo update - helm fetch yugabytedb/yugaware - - version= {{ version }} - tar zxvf yugaware-{{ version }}.tgz - cd yugaware - cat values.yaml - ``` - - ```properties - image: - commonRegistry: "" - repository: **quay.io/yugabyte/yugaware** - tag: **{{ version.build }}** - pullPolicy: IfNotPresent - pullSecret: yugabyte-k8s-pull-secret - thirdparty-deps: - registry: quay.io - tag: **latest** - name: **yugabyte/thirdparty-deps** - prometheus: - registry: "" - tag: **{{ version.prometheus }}** - name: **prom/prometheus** - nginx: - registry: "" - tag: **{{ version.nginx }}** - name: nginx - ``` - -1. Pull images to your Docker Desktop, as follows: - - ```sh - docker pull quay.io/yugabyte/yugaware:{{ version.build }} - ``` - - ```output - xxxxxxxxx: Pulling from yugabyte/yugaware - c87736221ed0: Pull complete - 4d33fcf3ee85: Pull complete - 60cbb698a409: Pull complete - daaf3bdf903e: Pull complete - eb7b573327ce: Pull complete - 94aa28231788: Pull complete - 16c067af0934: Pull complete - 8ab1e7f695af: Pull complete - 6153ecb58755: Pull complete - c0f981bfb844: Pull complete - 6485543159a8: Pull complete - 811ba76b1d72: Pull complete - e325b2ff3e2a: Pull complete - c351a0ce1ccf: Pull complete - 73765723160d: Pull complete - 588cb609ac0b: Pull complete - af3ae7e64e48: Pull complete - 17fb23853f77: Pull complete - cb799d679e2f: Pull complete - Digest: sha256:0f1cb1fdc1bd4c17699507ffa5a04d3fe5f267049e0675d5d78d77fa632b330c - Status: Downloaded newer image for quay.io/yugabyte/yugaware:xxxxxx - quay.io/yugabyte/yugaware:xxxxxxx - ``` - - ```sh - docker pull quay.io/yugabyte/thirdparty-deps:latest - ``` - - ```output - latest: Pulling from yugabyte/thirdparty-deps - c87736221ed0: Already exists - 4d33fcf3ee85: Already exists - 60cbb698a409: Already exists - d90c5841d133: Pull complete - 8084187ca761: Pull complete - 47e3b9f5c7f5: Pull complete - 64430b56cbd6: Pull complete - 27b03c6bcdda: Pull complete - ae35ebe6caa1: Pull complete - 9a655eedc488: Pull complete - Digest: sha256:286a13eb113398e1c4e63066267db4921c7644dac783836515a783cbd25b2c2a - Status: Downloaded newer image for quay.io/yugabyte/thirdparty-deps:latest - quay.io/yugabyte/thirdparty-deps:latest - ``` - - ```sh - docker pull postgres:11.5 - ``` - - ```output - xxxxxx: Pulling from library/postgres - 80369df48736: Pull complete - b18dd0a6efec: Pull complete - 5c20c5b8227d: Pull complete - c5a7f905c8ec: Pull complete - 5a3f55930dd8: Pull complete - ffc097878b09: Pull complete - 3106d02490d4: Pull complete - 88d1fc513b8f: Pull complete - f7d9cc27056d: Pull complete - afe180d8d5fd: Pull complete - b73e04acbb5f: Pull complete - 1dba81bb6cfd: Pull complete - 26bf23ba2b27: Pull complete - 09ead80f0070: Pull complete - Digest: sha256:b3770d9c4ef11eba1ff5893e28049e98e2b70083e519e0b2bce0a20e7aa832fe - Status: Downloaded newer image for postgres:11.5 - docker.io/library/postgres: - ``` - - ```sh - docker pull prom/prometheus:v2.2.1 - ``` - - ```output - Image docker.io/prom/prometheus:v2.2.1 uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/ - aab39f0bc16d: Pull complete - a3ed95caeb02: Pull complete - 2cd9e239cea6: Pull complete - 0266ca3d0dd9: Pull complete - 341681dba10c: Pull complete - 8f6074d68b9e: Pull complete - 2fa612efb95d: Pull complete - 151829c004a9: Pull complete - 75e765061965: Pull complete - b5a15632e9ab: Pull complete - Digest: sha256:129e16b08818a47259d972767fd834d84fb70ca11b423cc9976c9bce9b40c58f - Status: Downloaded newer image for prom/prometheus: - docker.io/prom/prometheus: - ``` - - ```sh - docker pull nginx:1.17.4 - ``` - - ```output - 1.17.4: Pulling from library/nginx - 8d691f585fa8: Pull complete - 047cb16c0ff6: Pull complete - b0bbed1a78ca: Pull complete - Digest: sha256:77ebc94e0cec30b20f9056bac1066b09fbdc049401b71850922c63fc0cc1762e - Status: Downloaded newer image for nginx:1.17.4 - docker.io/library/nginx:1.17.4 - ``` - - ```sh - docker pull janeczku/go-dnsmasq:release-1.0.7 - ``` - - ```output - release-1.0.7: Pulling from janeczku/go-dnsmasq - 117f30b7ae3d: Pull complete - 504f1e14d6cc: Pull complete - 98e84d0ba41a: Pull complete - Digest: sha256:3a99ad92353b55e97863812470e4f7403b47180f06845fdd06060773fe04184f - Status: Downloaded newer image for janeczku/go-dnsmasq:release-1.0.7 - docker.io/janeczku/go-dnsmasq:release-1.0.7 - ``` - -1. Log in to your target container registry, as per the following example that uses Google Container Registry (GCR) : - - ```sh - docker login -u _json_key --password-stdin https://gcr.io < .ssh/my-service-account-key.json - ``` - -1. Tag the local images to your target registry, as follows: - - ```sh - docker images - ``` - - ```output - REPOSITORY TAG IMAGE ID CREATED SIZE - quay.io/yugabyte/yugaware 2.5.1.0-b153 **a04fef023c7c** 6 weeks ago 2.54GB - quay.io/yugabyte/thirdparty-deps latest **721453480a0f** 2 months ago 447MB - nginx 1.17.4 **5a9061639d0a** 15 months ago 126MB - postgres 11.5 **5f1485c70c9a** 15 months ago 293MB - prom/prometheus v2.2.1 **cc866859f8df** 2 years ago 113MB - janeczku/go-dnsmasq release-1.0.7 **caef6233eac4** 4 years ago 7.38MB - ``` - - ```sh - docker tag a04fef023c7c gcr.io/dataengineeringdemos/yugabyte/yugaware:2.5.1.0-b153 - docker tag 721453480a0f gcr.io/dataengineeringdemos/yugabyte/thirdparty-deps:latest - docker tag 5a9061639d0a gcr.io/dataengineeringdemos/yugabyte/nginx:1.17.4 - docker tag 5f1485c70c9a gcr.io/dataengineeringdemos/yugabyte/postgres:11.5 - docker tag cc866859f8df gcr.io/dataengineeringdemos/prom/prometheus:v2.2.1 - docker tag caef6233eac4 gcr.io/dataengineeringdemos/janeczku/go-dnsmasq:release-1.0.7 - ``` - -1. Push images to the private container registry, as follows: - - ```sh - docker push a04fef023c7c - docker push 721453480a0f - docker push 5a9061639d0a - docker push 5f1485c70c9a - docker push cc866859f8df - docker push caef6233eac4 - ``` - - ![img](/images/yp/docker-image.png) - -1. Modify the Helm chart `values.yaml` file. You can map your private internal repository URI to `commonRegistry` and use the folder or `project/image_name` and tags similar to the following: - - ```properties - image: - commonRegistry: "**gcr.io/dataengineeringdemos**" - repository: **“”** - tag: **2.5.1.0-b153** - pullPolicy: IfNotPresent - pullSecret: yugabyte-k8s-pull-secret - thirdparty-deps: - registry: /yugabyte/thhirdparty-deps - tag: **latest** - name: **yugabyte/thirdparty-deps** - postgres: - registry: "yugabyte/postgres" - tag: 11.5 - name: **postgres** - prometheus: - registry: "prom/prometheus" - tag: **v2.2.1** - name: **prom/prometheus** - nginx: - registry: "yugabyte/nginx" - tag: **1.17.4** - name: nginx - dnsmasq: - registry: "janeczku/go-dnsmasq/" - tag: **release-1.0.7** - name: **janeczku/go-dnsmasq - ``` - -1. Install Helm chart or specify the container registry in YugabyteDB Anywhere cloud provider, as follows: - - ```sh - helm install yugaware **.** -f values.yaml - ``` - - {{% /tab %}} - - {{% tab header="Airgapped" lang="airgapped" %}} - -Installing YugabyteDB Anywhere on Airgapped hosts, without access to any Internet traffic (inbound or outbound) requires the following: - -- Whitelisting endpoints: to install Replicated and YugabyteDB Anywhere on a host with no Internet connectivity, you have to first download the binaries on a computer that has Internet connectivity, and then copy the files over to the appropriate host. In case of restricted connectivity, the following endpoints have to be whitelisted to ensure that they are accessible from the host marked for installation: - `https://downloads.yugabyte.com` - `https://download.docker.com` - -- Ensuring that Docker Engine version 20.10.n is available. If it is not installed, you need to follow the procedure described in [Installing Docker in airgapped](https://www.replicated.com/docs/kb/supporting-your-customers/installing-docker-in-airgapped/). -- Ensuring that the following ports are open on the YugabyteDB Anywhere host: - - 8800 – HTTP access to the Replicated UI - - 80 or 443 – HTTP and HTTPS access to the YugabyteDB Anywhere UI, respectively - - 22 – SSH -- Ensuring that the attached disk storage (such as persistent EBS volumes on AWS) is 100 GB minimum. -- Having YugabyteDB Anywhere airgapped install package. Contact Yugabyte Support for more information. -- Signing the Yugabyte license agreement. Contact Yugabyte Support for more information. - - {{% /tab %}} - -{{< /tabpane >}} diff --git a/docs/data/currentVersions.json b/docs/data/currentVersions.json index 6339de3e2751..884a398bdedf 100644 --- a/docs/data/currentVersions.json +++ b/docs/data/currentVersions.json @@ -5,9 +5,9 @@ "series": "v2024.2", "alias": "stable", "display": "v2024.2 (LTS)", - "version": "2024.2.2.3", + "version": "2024.2.3.0", "versionShort": "2024.2.3", - "appVersion": "2024.2.2.3-b1", + "appVersion": "2024.2.3.0-b116", "isStable": true, "isLTS": true, "initialRelease": "2024-12-09", @@ -19,9 +19,9 @@ "series": "v2.25", "alias": "preview", "display": "v2.25 (Preview)", - "version": "2.25.1.0", - "versionShort": "2.25.1", - "appVersion": "2.25.1.0-b381", + "version": "2.25.2.0", + "versionShort": "2.25.2", + "appVersion": "2.25.2.0-b359", "isStable": false, "initialRelease": "2025-01-17" }, diff --git a/docs/eslint.config.mjs b/docs/eslint.config.mjs new file mode 100644 index 000000000000..84ec92ac48de --- /dev/null +++ b/docs/eslint.config.mjs @@ -0,0 +1,38 @@ +import { defineConfig } from "eslint/config"; +import _import from "eslint-plugin-import"; +import stylistic from "@stylistic/eslint-plugin"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +export default defineConfig([ + { + languageOptions: { + parserOptions: { + ecmaVersion: "latest", + sourceType: "module", + ecmaFeatures: { + jsx: true, + }, + }, + }, + + plugins: { + import: _import, + stylistic, + }, + + settings: { + "import/resolver": { + node: { + paths: ["./src"], + }, + }, + }, + + rules: { + "no-console": "warn", + "no-debugger": "warn", + "no-unused-vars": "warn", + }, + }, +]); diff --git a/docs/go.mod b/docs/go.mod index cd1fdaac7b57..e3be6e940265 100644 --- a/docs/go.mod +++ b/docs/go.mod @@ -3,7 +3,9 @@ module github.com/yugabyte/yugabyte-db/docs go 1.20 require ( + github.com/FortAwesome/Font-Awesome v0.0.0-20240716171331-37eff7fa00de // indirect github.com/google/docsy v0.11.0 // indirect github.com/google/docsy/dependencies v0.7.2 // indirect github.com/trunkcode/hugo-seo v0.2.2 // indirect + github.com/twbs/bootstrap v5.3.5+incompatible // indirect ) diff --git a/docs/go.sum b/docs/go.sum index bf0dd7236abd..b16713a2f9f9 100644 --- a/docs/go.sum +++ b/docs/go.sum @@ -1,5 +1,6 @@ github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= +github.com/FortAwesome/Font-Awesome v0.0.0-20240716171331-37eff7fa00de h1:JvHOfdSqvArF+7cffH9oWU8oLhn6YFYI60Pms8M/6tI= github.com/FortAwesome/Font-Awesome v0.0.0-20240716171331-37eff7fa00de/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= github.com/google/docsy v0.10.0 h1:6tMDacPwAyRWNCfvsn/9qGOZDQ8b0aRzjRZvnZPY5dg= github.com/google/docsy v0.10.0/go.mod h1:c0nIAqmRTOuJ01F85U/wJPQtc3Zj9N58Kea9bOT2AJc= @@ -11,3 +12,5 @@ github.com/trunkcode/hugo-seo v0.2.2 h1:ywfWzmde21QktGKxs5hfdbMXhAEY0cANP/oaXKDg github.com/trunkcode/hugo-seo v0.2.2/go.mod h1:L66E4t0yxaJE8YyS97iCHYwDYkapDuIBaGwCpvC5bWM= github.com/twbs/bootstrap v5.2.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= github.com/twbs/bootstrap v5.3.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= +github.com/twbs/bootstrap v5.3.5+incompatible h1:6XrrFNMsiTTFcVTBf2886FO2XUNtwSE+QPv1os0uAA4= +github.com/twbs/bootstrap v5.3.5+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= diff --git a/docs/layouts/shortcodes/tags/feature/deprecated.html b/docs/layouts/shortcodes/tags/feature/deprecated.html new file mode 100644 index 000000000000..97c20dcfba64 --- /dev/null +++ b/docs/layouts/shortcodes/tags/feature/deprecated.html @@ -0,0 +1 @@ +Deprecated diff --git a/docs/layouts/shortcodes/tags/feature/restart-needed.html b/docs/layouts/shortcodes/tags/feature/restart-needed.html new file mode 100644 index 000000000000..79fc999ab75f --- /dev/null +++ b/docs/layouts/shortcodes/tags/feature/restart-needed.html @@ -0,0 +1 @@ +Restart Needed diff --git a/docs/layouts/shortcodes/tags/feature/t-server.html b/docs/layouts/shortcodes/tags/feature/t-server.html new file mode 100644 index 000000000000..09c7d3f7b74e --- /dev/null +++ b/docs/layouts/shortcodes/tags/feature/t-server.html @@ -0,0 +1 @@ +T-Server / Master Match diff --git a/docs/layouts/shortcodes/tags/wrap.html b/docs/layouts/shortcodes/tags/wrap.html new file mode 100644 index 000000000000..70d511aa0e63 --- /dev/null +++ b/docs/layouts/shortcodes/tags/wrap.html @@ -0,0 +1 @@ +
{{ .Inner }}
diff --git a/docs/netlify.toml b/docs/netlify.toml index ef5a60c73689..a92c4fd562fc 100644 --- a/docs/netlify.toml +++ b/docs/netlify.toml @@ -12,21 +12,21 @@ # all api_keys were generated from btoa(orig_key) [context.deploy-preview.environment] GO_VERSION = "1.20" - HUGO_VERSION = "0.143.1" + HUGO_VERSION = "0.145.0" NODE_VERSION = "22" CXXFLAGS = "-std=c++17" RUDDERSTACK_API_KEY = "Mmo5Zmp5M1lONWFLM2xYS044N3k5cGhic3R4" [context.branch-deploy.environment] GO_VERSION = "1.20" - HUGO_VERSION = "0.143.1" + HUGO_VERSION = "0.145.0" NODE_VERSION = "22" CXXFLAGS = "-std=c++17" RUDDERSTACK_API_KEY = "Mmo5Zmp5M1lONWFLM2xYS044N3k5cGhic3R4" [context.production.environment] GO_VERSION = "1.20" - HUGO_VERSION = "0.143.1" + HUGO_VERSION = "0.145.0" NODE_VERSION = "22" CXXFLAGS = "-std=c++17" RUDDERSTACK_API_KEY = "Mmo5Y252MjZ6R290ZjhPa3BROUx4cFl6VFVK" diff --git a/docs/package-lock.json b/docs/package-lock.json index ed30db353963..a3d5dd5c86a9 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -9,36 +9,31 @@ "version": "1.3.0", "license": "Apache License 2.0", "dependencies": { - "@babel/core": "7.26.8", - "@babel/eslint-parser": "7.26.8", + "@babel/core": "7.26.10", "@babel/plugin-proposal-decorators": "7.25.9", - "@babel/preset-env": "7.26.8", + "@babel/preset-env": "7.26.9", "@fortawesome/fontawesome-pro": "6.7.2", + "@stylistic/eslint-plugin": "4.2.0", "algoliasearch": "4.23.3", - "babel-loader": "9.2.1", + "babel-loader": "10.0.0", "clipboard": "2.0.11", "detect-external-link": "2.0.1", - "eslint": "8.56.0", - "eslint-config-airbnb": "19.0.4", - "eslint-config-xo-space": "0.35.0", + "eslint": "9.25.1", "eslint-plugin-import": "2.31.0", - "eslint-plugin-jsx-a11y": "6.10.2", - "eslint-webpack-plugin": "4.2.0", - "hugo-algolia": "1.2.14", - "npm-run-all": "4.1.5", - "react-dev-tools": "0.0.1", - "react-dev-utils": "12.0.1", - "run-p": "0.0.0", - "sass": "1.84.0", - "webpack": "5.97.1", + "eslint-webpack-plugin": "5.0.1", + "globals": "16.0.0", + "webpack": "5.99.7", "webpack-cli": "6.0.1", - "webpack-dev-server": "5.2.0", + "webpack-dev-server": "5.2.1", "yb-rrdiagram": "0.0.7" }, "devDependencies": { - "autoprefixer": "10.4.20", - "postcss": "8.5.1", - "postcss-cli": "11.0.0" + "autoprefixer": "10.4.21", + "npm-run-all": "4.1.5", + "postcss": "8.5.3", + "postcss-cli": "11.0.1", + "run-p": "0.0.0", + "sass": "1.87.0" } }, "node_modules/@algolia/cache-browser-local-storage": { @@ -226,22 +221,21 @@ } }, "node_modules/@babel/core": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.8.tgz", - "integrity": "sha512-l+lkXCHS6tQEc5oUpK28xBOZ6+HwaH7YwoYQbLFiYb4nS2/l1tKnZEtEWkD0GuiYdvArf9qBS0XlQGXzPMsNqQ==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.10.tgz", + "integrity": "sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==", "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.8", + "@babel/generator": "^7.26.10", "@babel/helper-compilation-targets": "^7.26.5", "@babel/helper-module-transforms": "^7.26.0", - "@babel/helpers": "^7.26.7", - "@babel/parser": "^7.26.8", - "@babel/template": "^7.26.8", - "@babel/traverse": "^7.26.8", - "@babel/types": "^7.26.8", - "@types/gensync": "^1.0.0", + "@babel/helpers": "^7.26.10", + "@babel/parser": "^7.26.10", + "@babel/template": "^7.26.9", + "@babel/traverse": "^7.26.10", + "@babel/types": "^7.26.10", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -256,32 +250,14 @@ "url": "https://opencollective.com/babel" } }, - "node_modules/@babel/eslint-parser": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.26.8.tgz", - "integrity": "sha512-3tBctaHRW6xSub26z7n8uyOTwwUsCdvIug/oxBH9n6yCO5hMj2vwDJAo7RbBMKrM7P+W2j61zLKviJQFGOYKMg==", - "license": "MIT", - "dependencies": { - "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", - "eslint-visitor-keys": "^2.1.0", - "semver": "^6.3.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || >=14.0.0" - }, - "peerDependencies": { - "@babel/core": "^7.11.0", - "eslint": "^7.5.0 || ^8.0.0 || ^9.0.0" - } - }, "node_modules/@babel/generator": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.8.tgz", - "integrity": "sha512-ef383X5++iZHWAXX0SXQR6ZyQhw/0KtTkrTz61WXRhFM6dhpHulO/RJz79L8S6ugZHJkOOkUrUdxgdF2YiPFnA==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.27.0.tgz", + "integrity": "sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.8", - "@babel/types": "^7.26.8", + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^3.0.2" @@ -303,12 +279,12 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.26.5", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz", - "integrity": "sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.0.tgz", + "integrity": "sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA==", "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.26.5", + "@babel/compat-data": "^7.26.8", "@babel/helper-validator-option": "^7.25.9", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", @@ -319,17 +295,17 @@ } }, "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.9.tgz", - "integrity": "sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.27.0.tgz", + "integrity": "sha512-vSGCvMecvFCd/BdpGlhpXYNhhC4ccxyvQWpbGL4CWbvfEoLFWUZuSuf7s9Aw70flgQF+6vptvgK2IfOnKlRmBg==", "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.25.9", "@babel/helper-member-expression-to-functions": "^7.25.9", "@babel/helper-optimise-call-expression": "^7.25.9", - "@babel/helper-replace-supers": "^7.25.9", + "@babel/helper-replace-supers": "^7.26.5", "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", - "@babel/traverse": "^7.25.9", + "@babel/traverse": "^7.27.0", "semver": "^6.3.1" }, "engines": { @@ -340,9 +316,9 @@ } }, "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.26.3.tgz", - "integrity": "sha512-G7ZRb40uUgdKOQqPLjfD12ZmGA54PzqDFUv2BKImnC9QIfGhIHKvVML0oN8IUiDq4iRqpq74ABpvOaerfWdong==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.27.0.tgz", + "integrity": "sha512-fO8l08T76v48BhpNRW/nQ0MxfnSdoSKUJBMjubOAYffsVuGG5qOfMq7N6Es7UJvi7Y8goXXo07EfcHZXDPuELQ==", "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.25.9", @@ -357,9 +333,9 @@ } }, "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.3.tgz", - "integrity": "sha512-HK7Bi+Hj6H+VTHA3ZvBis7V/6hu9QuTrnMXNybfUf2iiuU/N97I8VjB+KbhFF8Rld/Lx5MzoCwPCpPjfK+n8Cg==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.4.tgz", + "integrity": "sha512-jljfR1rGnXXNWnmQg2K3+bvhkxB51Rl32QRaOTuwwjviGrHzIbSc8+x9CpraDtbT7mfyjXObULP4w/adunNwAw==", "license": "MIT", "dependencies": { "@babel/helper-compilation-targets": "^7.22.6", @@ -525,25 +501,25 @@ } }, "node_modules/@babel/helpers": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.7.tgz", - "integrity": "sha512-8NHiL98vsi0mbPQmYAGWwfcFaOy4j2HY49fXJCfuDcdE7fMIsH9a7GdaeXpIBsbT7307WU8KCMp5pUVDNL4f9A==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz", + "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==", "license": "MIT", "dependencies": { - "@babel/template": "^7.25.9", - "@babel/types": "^7.26.7" + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.8.tgz", - "integrity": "sha512-TZIQ25pkSoaKEYYaHbbxkfL36GNsQ6iFiBbeuzAkLnXayKR1yP1zFe+NxuZWWsUyvt8icPU9CCq0sgWGXR1GEw==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz", + "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==", "license": "MIT", "dependencies": { - "@babel/types": "^7.26.8" + "@babel/types": "^7.27.0" }, "bin": { "parser": "bin/babel-parser.js" @@ -786,12 +762,12 @@ } }, "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.9.tgz", - "integrity": "sha512-1F05O7AYjymAtqbsFETboN1NvBdcnzMerO+zlMyJBEz6WkMdejvGWw9p05iTSjC85RLlBseHHQpYaM4gzJkBGg==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.27.0.tgz", + "integrity": "sha512-u1jGphZ8uDI2Pj/HJj6YQ6XQLZCNjOlprjxB5SVz6rq2T6SwAR+CdrWK0CP7F+9rDVMXdB0+r6Am5G5aobOjAQ==", "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" + "@babel/helper-plugin-utils": "^7.26.5" }, "engines": { "node": ">=6.9.0" @@ -852,6 +828,15 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-transform-classes/node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/plugin-transform-computed-properties": { "version": "7.25.9", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.9.tgz", @@ -976,12 +961,12 @@ } }, "node_modules/@babel/plugin-transform-for-of": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.25.9.tgz", - "integrity": "sha512-LqHxduHoaGELJl2uhImHwRQudhCM50pT46rIBNvtT/Oql3nqiS3wOwP+5ten7NpYSXrrVLgtZU3DZmPtWZo16A==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.26.9.tgz", + "integrity": "sha512-Hry8AusVm8LW5BVFgiyUReuoGzPUpdHQQqJY5bZnbbf+ngOHWuCuYFKw/BqaaWlvEUrF91HMhDtEaI1hZzNbLg==", "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-plugin-utils": "^7.26.5", "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" }, "engines": { @@ -1323,12 +1308,12 @@ } }, "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.25.9.tgz", - "integrity": "sha512-vwDcDNsgMPDGP0nMqzahDWE5/MLcX8sv96+wfX7as7LoF/kr97Bo/7fI00lXY4wUXYfVmwIIyG80fGZ1uvt2qg==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.27.0.tgz", + "integrity": "sha512-LX/vCajUJQDqE7Aum/ELUMZAY19+cDpghxrnyt5I1tV6X5PyC86AOoWXWFYFeIvauyeSA6/ktn4tQVn/3ZifsA==", "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-plugin-utils": "^7.26.5", "regenerator-transform": "^0.15.2" }, "engines": { @@ -1431,9 +1416,9 @@ } }, "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.26.7.tgz", - "integrity": "sha512-jfoTXXZTgGg36BmhqT3cAYK5qkmqvJpvNrPhaK/52Vgjhw4Rq29s9UqpWWV0D6yuRmgiFH/BUVlkl96zJWqnaw==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.0.tgz", + "integrity": "sha512-+LLkxA9rKJpNoGsbLnAgOCdESl73vwYn+V6b+5wHbrE7OGKVDPHIQvbFSzqE6rwqaCw2RE+zdJrlLkcf8YOA0w==", "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.26.5" @@ -1509,9 +1494,9 @@ } }, "node_modules/@babel/preset-env": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.26.8.tgz", - "integrity": "sha512-um7Sy+2THd697S4zJEfv/U5MHGJzkN2xhtsR3T/SWRbVSic62nbISh51VVfU9JiO/L/Z97QczHTaFVkOU8IzNg==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.26.9.tgz", + "integrity": "sha512-vX3qPGE8sEKEAZCWk05k3cpTAE3/nOYca++JA+Rd0z2NCNzabmYvEiSShKzm10zdquOIAVXsy2Ei/DTW34KlKQ==", "license": "MIT", "dependencies": { "@babel/compat-data": "^7.26.8", @@ -1543,7 +1528,7 @@ "@babel/plugin-transform-dynamic-import": "^7.25.9", "@babel/plugin-transform-exponentiation-operator": "^7.26.3", "@babel/plugin-transform-export-namespace-from": "^7.25.9", - "@babel/plugin-transform-for-of": "^7.25.9", + "@babel/plugin-transform-for-of": "^7.26.9", "@babel/plugin-transform-function-name": "^7.25.9", "@babel/plugin-transform-json-strings": "^7.25.9", "@babel/plugin-transform-literals": "^7.25.9", @@ -1606,9 +1591,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.7.tgz", - "integrity": "sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz", + "integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" @@ -1618,30 +1603,30 @@ } }, "node_modules/@babel/template": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.8.tgz", - "integrity": "sha512-iNKaX3ZebKIsCvJ+0jd6embf+Aulaa3vNBqZ41kM7iTWjx5qzWKXGHiJUW3+nTpQ18SG11hdF8OAzKrpXkb96Q==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz", + "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==", "license": "MIT", "dependencies": { "@babel/code-frame": "^7.26.2", - "@babel/parser": "^7.26.8", - "@babel/types": "^7.26.8" + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.8.tgz", - "integrity": "sha512-nic9tRkjYH0oB2dzr/JoGIm+4Q6SuYeLEiIiZDwBscRMYFJ+tMAz98fuel9ZnbXViA2I0HVSSRRK8DW5fjXStA==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.0.tgz", + "integrity": "sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA==", "license": "MIT", "dependencies": { "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.8", - "@babel/parser": "^7.26.8", - "@babel/template": "^7.26.8", - "@babel/types": "^7.26.8", + "@babel/generator": "^7.27.0", + "@babel/parser": "^7.27.0", + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -1649,10 +1634,19 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/traverse/node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/types": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.8.tgz", - "integrity": "sha512-eUuWapzEGWFEpHFxgEaBG8e3n6S8L3MSu0oda755rOfabWPnh0Our1AozNFVUxGFIhbKgd1ksprsoDGMinTOTA==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz", + "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==", "license": "MIT", "dependencies": { "@babel/helper-string-parser": "^7.25.9", @@ -1672,9 +1666,9 @@ } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.1.tgz", - "integrity": "sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA==", + "version": "4.6.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.6.1.tgz", + "integrity": "sha512-KTsJMmobmbrFLe3LDh0PC2FXpcSYJt/MLjlkh/9LEnmKYLSYmT/0EW9JWANjeoemiuZrmogti0tW5Ch+qNUYDw==", "license": "MIT", "dependencies": { "eslint-visitor-keys": "^3.4.3" @@ -1710,16 +1704,51 @@ "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, + "node_modules/@eslint/config-array": { + "version": "0.20.0", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.20.0.tgz", + "integrity": "sha512-fxlS1kkIjx8+vy2SjuCB94q3htSNrufYTXubwiBFeaQHbH6Ipi43gFJq2zCMt6PHhImH3Xmr0NksKDvchWlpQQ==", + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.6", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.2.1.tgz", + "integrity": "sha512-RI17tsD2frtDu/3dmI7QRrD4bedNKPM08ziRYaC5AhkGrzIAJelm9kJU1TznK+apx6V+cqRz8tfpEeG3oIyjxw==", + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.13.0.tgz", + "integrity": "sha512-yfkgDw1KR66rkT5A8ci4irzDysN7FRpq3ttJolR88OqQikAWqwA8j5VZyas+vjyBNFIJ7MfybJ9plMILI2UrCw==", + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@eslint/eslintrc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", - "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", + "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", "license": "MIT", "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.6.0", - "globals": "^13.19.0", + "espree": "^10.0.1", + "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.0", @@ -1727,34 +1756,53 @@ "strip-json-comments": "^3.1.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "13.24.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", - "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", "license": "MIT", - "dependencies": { - "type-fest": "^0.20.2" - }, "engines": { - "node": ">=8" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/@eslint/js": { - "version": "8.56.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", - "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", + "version": "9.25.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.25.1.tgz", + "integrity": "sha512-dEIwmjntEx8u3Uvv+kr3PDeeArL8Hw07H9kyYxCjnM9pBjfEhk6uLXSchxxzgiwtRhhzVzqmUSDFBOi1TuZ7qg==", "license": "MIT", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.8.tgz", + "integrity": "sha512-ZAoA40rNMPwSm+AeHpCq8STiNAwzWLJuP8Xv4CHIc9wv/PSuExjMrmjfYNj682vW0OOiZ1HKxzvjQr9XZIisQA==", + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.13.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@fortawesome/fontawesome-pro": { @@ -1766,19 +1814,39 @@ "node": ">=6" } }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.11.14", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", - "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", - "deprecated": "Use @eslint/config-array instead", + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.6", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", + "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", "license": "Apache-2.0", "dependencies": { - "@humanwhocodes/object-schema": "^2.0.2", - "debug": "^4.3.1", - "minimatch": "^3.0.5" + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.3.0" }, "engines": { - "node": ">=10.10.0" + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", + "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" } }, "node_modules/@humanwhocodes/module-importer": { @@ -1794,12 +1862,18 @@ "url": "https://github.com/sponsors/nzakas" } }, - "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", - "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", - "deprecated": "Use @eslint/object-schema instead", - "license": "BSD-3-Clause" + "node_modules/@humanwhocodes/retry": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.2.tgz", + "integrity": "sha512-xeO57FpIu4p1Ri3Jq/EXq4ClRm86dVF2z/+kvFnyqVYRavTZmaFaUBbWCOuuTh0o/g7DSsk6kc2vrS4Vl5oPOQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } }, "node_modules/@jest/schemas": { "version": "29.6.3", @@ -1905,9 +1979,9 @@ } }, "node_modules/@jsonjoy.com/json-pack": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.1.1.tgz", - "integrity": "sha512-osjeBqMJ2lb/j/M8NCPjs1ylqWIcTRTycIhVB5pt6LgzgeRSb0YRZ7j9RfA8wIUrsr/medIuhVyonXRZWLyfdw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.2.0.tgz", + "integrity": "sha512-io1zEbbYcElht3tdlqEOFxZ0dMTYrHz9iMf0gqn1pPjZFTCgM5R4R5IMA20Chb2UPYYsxjzs8CgZ7Nb5n2K2rA==", "license": "Apache-2.0", "dependencies": { "@jsonjoy.com/base64": "^1.1.1", @@ -1948,15 +2022,6 @@ "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", "license": "MIT" }, - "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { - "version": "5.1.1-v1", - "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz", - "integrity": "sha512-54/JRvkLIzzDWshCWfuhadfrfZVPiElY8Fcgmg1HroEly/EDSszzhBAsarCux+D/kOslTRquNzuyGSmUSTTHGg==", - "license": "MIT", - "dependencies": { - "eslint-scope": "5.1.1" - } - }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -1996,6 +2061,7 @@ "version": "2.5.1", "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz", "integrity": "sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==", + "dev": true, "hasInstallScript": true, "license": "MIT", "optional": true, @@ -2035,6 +2101,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2055,6 +2122,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2075,6 +2143,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2095,6 +2164,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2115,6 +2185,7 @@ "cpu": [ "arm" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2135,6 +2206,7 @@ "cpu": [ "arm" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2155,6 +2227,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2175,6 +2248,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2195,6 +2269,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2215,6 +2290,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2235,6 +2311,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2255,6 +2332,7 @@ "cpu": [ "ia32" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2275,6 +2353,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -2300,17 +2379,56 @@ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", "license": "MIT" }, - "node_modules/@sindresorhus/merge-streams": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", - "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", - "dev": true, + "node_modules/@stylistic/eslint-plugin": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@stylistic/eslint-plugin/-/eslint-plugin-4.2.0.tgz", + "integrity": "sha512-8hXezgz7jexGHdo5WN6JBEIPHCSFyyU4vgbxevu4YLVS5vl+sxqAAGyXSzfNDyR6xMNSH5H1x67nsXcYMOHtZA==", "license": "MIT", + "dependencies": { + "@typescript-eslint/utils": "^8.23.0", + "eslint-visitor-keys": "^4.2.0", + "espree": "^10.3.0", + "estraverse": "^5.3.0", + "picomatch": "^4.0.2" + }, "engines": { - "node": ">=18" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "peerDependencies": { + "eslint": ">=9.0.0" + } + }, + "node_modules/@stylistic/eslint-plugin/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@stylistic/eslint-plugin/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/@stylistic/eslint-plugin/node_modules/picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/@types/body-parser": { @@ -2352,9 +2470,9 @@ } }, "node_modules/@types/eslint": { - "version": "8.56.12", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.12.tgz", - "integrity": "sha512-03ruubjWyOHlmljCVoxSuNDdmfZDzsrrz0P2LeJsOXr+ZwFQ+0yQIwNCwt/GYhV7Z31fgtXJTAEs+FYlEL851g==", + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", "license": "MIT", "dependencies": { "@types/estree": "*", @@ -2372,9 +2490,9 @@ } }, "node_modules/@types/estree": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz", + "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", "license": "MIT" }, "node_modules/@types/express": { @@ -2390,18 +2508,6 @@ } }, "node_modules/@types/express-serve-static-core": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.0.6.tgz", - "integrity": "sha512-3xhRnjJPkULekpSzgtoNYYcTWgEZkp4myc+Saevii5JPnHNvHMRlBSHDbs7Bh1iPPoVTERHEZXyhyLbMEsExsA==", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*", - "@types/send": "*" - } - }, - "node_modules/@types/express/node_modules/@types/express-serve-static-core": { "version": "4.19.6", "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", @@ -2413,12 +2519,6 @@ "@types/send": "*" } }, - "node_modules/@types/gensync": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@types/gensync/-/gensync-1.0.4.tgz", - "integrity": "sha512-C3YYeRQWp2fmq9OryX+FoDy8nXS6scQ7dPptD8LnFDAUNcKWJjXQKDNJD3HVm+kOUsXhTOkpi69vI4EuAr95bA==", - "license": "MIT" - }, "node_modules/@types/http-errors": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", @@ -2477,12 +2577,12 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.13.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.1.tgz", - "integrity": "sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew==", + "version": "22.15.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.15.3.tgz", + "integrity": "sha512-lX7HFZeHf4QG/J7tBZqrCAXwz9J5RD56Y6MpP0eJkka8p+K0RY/yBTW7CYFJ4VGCclxqOLKmiGP5juQc6MKgcw==", "license": "MIT", "dependencies": { - "undici-types": "~6.20.0" + "undici-types": "~6.21.0" } }, "node_modules/@types/node-forge": { @@ -2494,12 +2594,6 @@ "@types/node": "*" } }, - "node_modules/@types/parse-json": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", - "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", - "license": "MIT" - }, "node_modules/@types/qs": { "version": "6.9.18", "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.18.tgz", @@ -2558,9 +2652,9 @@ } }, "node_modules/@types/ws": { - "version": "8.5.14", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.14.tgz", - "integrity": "sha512-bd/YFLW+URhBzMXurx7lWByOu+xzU9+kb3RboOteXYDfW+tr+JZa99OyNmPINEGB/ahzKrEuc8rcv4gnpJmxTw==", + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", "license": "MIT", "dependencies": { "@types/node": "*" @@ -2581,72 +2675,210 @@ "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", "license": "MIT" }, - "node_modules/@ungap/structured-clone": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", - "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", - "license": "ISC" - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", - "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.31.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.31.0.tgz", + "integrity": "sha512-knO8UyF78Nt8O/B64i7TlGXod69ko7z6vJD9uhSlm0qkAbGeRUSudcm0+K/4CrRjrpiHfBCjMWlc08Vav1xwcw==", "license": "MIT", "dependencies": { - "@webassemblyjs/helper-numbers": "1.13.2", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + "@typescript-eslint/types": "8.31.0", + "@typescript-eslint/visitor-keys": "8.31.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", - "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", - "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", - "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", - "license": "MIT" + "node_modules/@typescript-eslint/types": { + "version": "8.31.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.31.0.tgz", + "integrity": "sha512-Ch8oSjVyYyJxPQk8pMiP2FFGYatqXQfQIaMp+TpuuLlDachRWpUAeEu1u9B/v/8LToehUIWyiKcA/w5hUFRKuQ==", + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", - "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.31.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.31.0.tgz", + "integrity": "sha512-xLmgn4Yl46xi6aDSZ9KkyfhhtnYI15/CvHbpOy/eR5NWhK/BK8wc709KKwhAR0m4ZKRP7h07bm4BWUYOCuRpQQ==", "license": "MIT", "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.13.2", - "@webassemblyjs/helper-api-error": "1.13.2", - "@xtuc/long": "4.2.2" + "@typescript-eslint/types": "8.31.0", + "@typescript-eslint/visitor-keys": "8.31.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.0.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <5.9.0" } }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", - "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", - "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@webassemblyjs/helper-buffer": "1.14.1", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2", - "@webassemblyjs/wasm-gen": "1.14.1" + "balanced-match": "^1.0.0" } }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.31.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.31.0.tgz", + "integrity": "sha512-qi6uPLt9cjTFxAb1zGNgTob4x9ur7xC6mHQJ8GwEzGMGE9tYniublmJaowOJ9V2jUzxrltTPfdG2nKlWsq0+Ww==", + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.31.0", + "@typescript-eslint/types": "8.31.0", + "@typescript-eslint/typescript-estree": "8.31.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.9.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.31.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.31.0.tgz", + "integrity": "sha512-QcGHmlRHWOl93o64ZUMNewCdwKGU6WItOU52H0djgNmn1EOrhVudrDzXz4OycCRSCPwFCDrE2iIt5vmuUdHxuQ==", + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.31.0", + "eslint-visitor-keys": "^4.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", "license": "MIT", "dependencies": { @@ -2812,9 +3044,9 @@ } }, "node_modules/acorn": { - "version": "8.14.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", - "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", "license": "MIT", "bin": { "acorn": "bin/acorn" @@ -2832,24 +3064,6 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, - "node_modules/address": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", - "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/agentkeepalive": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz", - "integrity": "sha512-TnB6ziK363p7lR8QpeLC8aMr8EGYBKZTpgzQLfqTs3bR0Oo5VbKdwKf8h0dSzsYrB7lSCgfJnMZKqShvlq5Oyg==", - "license": "MIT", - "engines": { - "node": ">= 0.10.0" - } - }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -2905,15 +3119,6 @@ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "license": "MIT" }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, "node_modules/algoliasearch": { "version": "4.23.3", "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.23.3.tgz", @@ -2953,6 +3158,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -2992,15 +3198,6 @@ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", "license": "Python-2.0" }, - "node_modules/aria-query": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", - "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", - "license": "Apache-2.0", - "engines": { - "node": ">= 0.4" - } - }, "node_modules/array-buffer-byte-length": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", @@ -3043,48 +3240,19 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/array.prototype.findlast": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", - "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", - "license": "MIT", - "peer": true, - "dependencies": { - "call-bind": "^1.0.7", - "define-properties": "^1.2.1", - "es-abstract": "^1.23.2", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", - "es-shim-unscopables": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/array.prototype.findlastindex": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz", - "integrity": "sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==", + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.6.tgz", + "integrity": "sha512-F/TKATkzseUExPlfvmwQKGITM3DGTK+vkAsCZoDc5daVygbJBnjEUCbgkAvVFsgfXfX4YIqZ/27G3k3tdXrTxQ==", "license": "MIT", "dependencies": { - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", "define-properties": "^1.2.1", - "es-abstract": "^1.23.2", + "es-abstract": "^1.23.9", "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", - "es-shim-unscopables": "^1.0.2" + "es-object-atoms": "^1.1.1", + "es-shim-unscopables": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -3129,23 +3297,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/array.prototype.tosorted": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", - "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", - "license": "MIT", - "peer": true, - "dependencies": { - "call-bind": "^1.0.7", - "define-properties": "^1.2.1", - "es-abstract": "^1.23.3", - "es-errors": "^1.3.0", - "es-shim-unscopables": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/arraybuffer.prototype.slice": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", @@ -3167,12 +3318,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/ast-types-flow": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", - "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", - "license": "MIT" - }, "node_modules/async-function": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", @@ -3182,19 +3327,10 @@ "node": ">= 0.4" } }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "license": "ISC", - "engines": { - "node": ">= 4.0.0" - } - }, "node_modules/autoprefixer": { - "version": "10.4.20", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", - "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", + "version": "10.4.21", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz", + "integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==", "dev": true, "funding": [ { @@ -3212,11 +3348,11 @@ ], "license": "MIT", "dependencies": { - "browserslist": "^4.23.3", - "caniuse-lite": "^1.0.30001646", + "browserslist": "^4.24.4", + "caniuse-lite": "^1.0.30001702", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", - "picocolors": "^1.0.1", + "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "bin": { @@ -3244,49 +3380,30 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/axe-core": { - "version": "4.10.2", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.2.tgz", - "integrity": "sha512-RE3mdQ7P3FRSe7eqCWoeQ/Z9QXrtniSjp1wUjt5nRC3WIpz5rSCve6o3fsZ2aCpJtrZjSZgjwXAoTO5k4tEI0w==", - "license": "MPL-2.0", - "engines": { - "node": ">=4" - } - }, - "node_modules/axobject-query": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", - "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", - "license": "Apache-2.0", - "engines": { - "node": ">= 0.4" - } - }, "node_modules/babel-loader": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.2.1.tgz", - "integrity": "sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==", + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-10.0.0.tgz", + "integrity": "sha512-z8jt+EdS61AMw22nSfoNJAZ0vrtmhPRVi6ghL3rCeRZI8cdNYFiV5xeV3HbE7rlZZNmGH8BVccwWt8/ED0QOHA==", "license": "MIT", "dependencies": { - "find-cache-dir": "^4.0.0", - "schema-utils": "^4.0.0" + "find-up": "^5.0.0" }, "engines": { - "node": ">= 14.15.0" + "node": "^18.20.0 || ^20.10.0 || >=22.0.0" }, "peerDependencies": { "@babel/core": "^7.12.0", - "webpack": ">=5" + "webpack": ">=5.61.0" } }, "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.4.12", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.12.tgz", - "integrity": "sha512-CPWT6BwvhrTO2d8QVorhTCQw9Y43zOu7G9HigcfxvepOU6b8o3tcWad6oVgZIsZCTt42FFv97aA7ZJsbM4+8og==", + "version": "0.4.13", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.13.tgz", + "integrity": "sha512-3sX/eOms8kd3q2KZ6DAhKPc0dgm525Gqq5NtWKZ7QYYZEv57OQ54KtblzJzH1lQF/eQxO8KjWGIK9IPUJNus5g==", "license": "MIT", "dependencies": { "@babel/compat-data": "^7.22.6", - "@babel/helper-define-polyfill-provider": "^0.6.3", + "@babel/helper-define-polyfill-provider": "^0.6.4", "semver": "^6.3.1" }, "peerDependencies": { @@ -3307,12 +3424,12 @@ } }, "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.3.tgz", - "integrity": "sha512-LiWSbl4CRSIa5x/JAU6jZiG9eit9w6mz+yVMFwDE83LAWvt0AfGBoZ7HS/mkhrKuh2ZlzfVZYKoLjXdqw6Yt7Q==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.4.tgz", + "integrity": "sha512-7gD3pRadPrbjhjLyxebmx/WrFYcuSjZ0XbdUujQMZ/fcE9oeewk2U/7PCvez84UeuK3oSjmPZ0Ch0dlupQvGzw==", "license": "MIT", "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.6.3" + "@babel/helper-define-polyfill-provider": "^0.6.4" }, "peerDependencies": { "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" @@ -3494,9 +3611,9 @@ } }, "node_modules/call-bind-apply-helpers": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.1.tgz", - "integrity": "sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", "license": "MIT", "dependencies": { "es-errors": "^1.3.0", @@ -3507,13 +3624,13 @@ } }, "node_modules/call-bound": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.3.tgz", - "integrity": "sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", "license": "MIT", "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "get-intrinsic": "^1.2.6" + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" }, "engines": { "node": ">= 0.4" @@ -3532,9 +3649,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001699", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001699.tgz", - "integrity": "sha512-b+uH5BakXZ9Do9iK+CkDmctUSEqZl+SP056vc5usa0PL+ev5OHw003rZXcnjNDv3L8P5j6rwT6C0BPKSikW08w==", + "version": "1.0.30001715", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001715.tgz", + "integrity": "sha512-7ptkFGMm2OAOgvZpwgA4yjQ5SQbrNVGdRjzH0pBdy1Fasvcr+KAeECmbCAECzTuDuoX0FCY8KzUxjf9+9kfZEw==", "funding": [ { "type": "opencollective", @@ -3706,12 +3823,6 @@ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "license": "MIT" }, - "node_modules/common-path-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", - "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==", - "license": "ISC" - }, "node_modules/compressible": { "version": "2.0.18", "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", @@ -3725,9 +3836,9 @@ } }, "node_modules/compression": { - "version": "1.7.5", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.5.tgz", - "integrity": "sha512-bQJ0YRck5ak3LgtnpKkiabX5pNF7tMUh1BSy2ZBOTh0Dim0BUu6aPPwByIns6/A5Prh8PufSPerMDUklpzes2Q==", + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.8.0.tgz", + "integrity": "sha512-k6WLKfunuqCYD3t6AsuPGvQWaKwuLLh2/xHNcX4qE+vIfDNXpSqnrhwA7O53R7WVQUnt8dVAIW+YHr7xTgOgGA==", "license": "MIT", "dependencies": { "bytes": "3.1.2", @@ -3763,12 +3874,6 @@ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "license": "MIT" }, - "node_modules/confusing-browser-globals": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz", - "integrity": "sha512-JsPKdmh8ZkmnHxDk55FZ1TqVLvEQTvoByJZRN9jzI0UjxK/QgAmsphz7PGtqgPieQZ/CQcHWXCR7ATDNhGe+YA==", - "license": "MIT" - }, "node_modules/connect-history-api-fallback": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", @@ -3821,12 +3926,12 @@ "license": "MIT" }, "node_modules/core-js-compat": { - "version": "3.40.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.40.0.tgz", - "integrity": "sha512-0XEDpr5y5mijvw8Lbc6E5AkjrHfp7eEoPlu36SWeAbcL8fn1G1ANe8DBlo2XoNN89oVpxWwOjYIPVzR4ZvsKCQ==", + "version": "3.41.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.41.0.tgz", + "integrity": "sha512-RFsU9LySVue9RTwdDVX/T0e2Y6jRYWXERKElIjpuEOEnxaXffI0X7RUwVzfYLfzuLXSNJDYoRYUAmRUcyln20A==", "license": "MIT", "dependencies": { - "browserslist": "^4.24.3" + "browserslist": "^4.24.4" }, "funding": { "type": "opencollective", @@ -3839,40 +3944,6 @@ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", "license": "MIT" }, - "node_modules/cosmiconfig": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", - "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.7.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cosmiconfig/node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/cosmiconfig/node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "license": "ISC", - "engines": { - "node": ">= 6" - } - }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -3887,12 +3958,6 @@ "node": ">= 8" } }, - "node_modules/damerau-levenshtein": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", - "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", - "license": "BSD-2-Clause" - }, "node_modules/data-view-buffer": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", @@ -3967,15 +4032,6 @@ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "license": "MIT" }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/default-browser": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz", @@ -4021,15 +4077,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/define-properties": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", @@ -4063,13 +4110,13 @@ } }, "node_modules/dependency-graph": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz", - "integrity": "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-1.0.0.tgz", + "integrity": "sha512-cW3gggJ28HZ/LExwxP2B++aiKxhJXMSIt9K48FOXQkm+vuG5gyatXnLsONRJdzO/7VfjDIiaOOa/bs4l464Lwg==", "dev": true, "license": "MIT", "engines": { - "node": ">= 0.6.0" + "node": ">=4" } }, "node_modules/destroy": { @@ -4092,6 +4139,7 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "dev": true, "license": "Apache-2.0", "optional": true, "bin": { @@ -4107,88 +4155,30 @@ "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", "license": "MIT" }, - "node_modules/detect-port-alt": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", - "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", "license": "MIT", "dependencies": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "bin": { - "detect": "bin/detect-port", - "detect-port": "bin/detect-port" + "@leichtgewicht/ip-codec": "^2.0.1" }, "engines": { - "node": ">= 4.2.1" - } - }, - "node_modules/detect-port-alt/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/detect-port-alt/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dir-glob/node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/dns-packet": { - "version": "5.6.1", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", - "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", - "license": "MIT", - "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" + "node": ">=6" } }, "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", "license": "Apache-2.0", "dependencies": { "esutils": "^2.0.2" }, "engines": { - "node": ">=6.0.0" + "node": ">=0.10.0" } }, - "node_modules/dom-walk": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", - "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" - }, "node_modules/dunder-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", @@ -4203,12 +4193,6 @@ "node": ">= 0.4" } }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", - "license": "MIT" - }, "node_modules/ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", @@ -4216,17 +4200,11 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.96", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.96.tgz", - "integrity": "sha512-8AJUW6dh75Fm/ny8+kZKJzI1pgoE8bKLZlzDU2W1ENd+DXKJrx7I7l9hb8UWR4ojlnb5OlixMt00QWiYJoVw1w==", + "version": "1.5.143", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.143.tgz", + "integrity": "sha512-QqklJMOFBMqe46k8iIOwA9l2hz57V2OKMmP5eSWcUvwx+mASAsbU+wkF1pHjn9ZVSBPrsYWr4/W/95y5SwYg2g==", "license": "ISC" }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "license": "MIT" - }, "node_modules/encodeurl": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", @@ -4258,19 +4236,6 @@ "node": ">=6" } }, - "node_modules/envify": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz", - "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==", - "license": "MIT", - "dependencies": { - "esprima": "^4.0.0", - "through": "~2.3.4" - }, - "bin": { - "envify": "bin/envify" - } - }, "node_modules/envinfo": { "version": "7.14.0", "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.14.0.tgz", @@ -4287,6 +4252,7 @@ "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, "license": "MIT", "dependencies": { "is-arrayish": "^0.2.1" @@ -4375,38 +4341,10 @@ "node": ">= 0.4" } }, - "node_modules/es-iterator-helpers": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.1.tgz", - "integrity": "sha512-uDn+FE1yrDzyC0pCo961B2IHbdM8y/ACZsKD4dG6WqrjV53BADjwa7D+1aom2rsNVfLyDgU/eigvlJGJ08OQ4w==", - "license": "MIT", - "peer": true, - "dependencies": { - "call-bind": "^1.0.8", - "call-bound": "^1.0.3", - "define-properties": "^1.2.1", - "es-abstract": "^1.23.6", - "es-errors": "^1.3.0", - "es-set-tostringtag": "^2.0.3", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.6", - "globalthis": "^1.0.4", - "gopd": "^1.2.0", - "has-property-descriptors": "^1.0.2", - "has-proto": "^1.2.0", - "has-symbols": "^1.1.0", - "internal-slot": "^1.1.0", - "iterator.prototype": "^1.1.4", - "safe-array-concat": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/es-module-lexer": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.6.0.tgz", - "integrity": "sha512-qqnD1yMU6tk/jnaMosogGySTZP8YtUgAffA9nMN+E/rjxcfRQ6IEk7IiozUjgxKoFHBGjTLnrHB/YC45r/59EQ==", + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", "license": "MIT" }, "node_modules/es-object-atoms": { @@ -4437,12 +4375,15 @@ } }, "node_modules/es-shim-unscopables": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", - "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", + "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", "license": "MIT", "dependencies": { - "hasown": "^2.0.0" + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" } }, "node_modules/es-to-primitive": { @@ -4462,12 +4403,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/es6-promise": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==", - "license": "MIT" - }, "node_modules/escalade": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", @@ -4496,135 +4431,63 @@ } }, "node_modules/eslint": { - "version": "8.56.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.56.0.tgz", - "integrity": "sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==", - "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "version": "9.25.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.25.1.tgz", + "integrity": "sha512-E6Mtz9oGQWDCpV12319d59n4tx9zOTXSTmc8BLVxBx+G/0RdM5MvEEJLU9c0+aleoePYYgVTOsRblx433qmhWQ==", "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.6.1", - "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.56.0", - "@humanwhocodes/config-array": "^0.11.13", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.20.0", + "@eslint/config-helpers": "^0.2.1", + "@eslint/core": "^0.13.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.25.1", + "@eslint/plugin-kit": "^0.2.8", + "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "@ungap/structured-clone": "^1.2.0", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", + "cross-spawn": "^7.0.6", "debug": "^4.3.2", - "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.2", - "eslint-visitor-keys": "^3.4.3", - "espree": "^9.6.1", - "esquery": "^1.4.2", + "eslint-scope": "^8.3.0", + "eslint-visitor-keys": "^4.2.0", + "espree": "^10.3.0", + "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", + "file-entry-cache": "^8.0.0", "find-up": "^5.0.0", "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "graphemer": "^1.4.0", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", - "optionator": "^0.9.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" + "optionator": "^0.9.3" }, "bin": { "eslint": "bin/eslint.js" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint-config-airbnb": { - "version": "19.0.4", - "resolved": "https://registry.npmjs.org/eslint-config-airbnb/-/eslint-config-airbnb-19.0.4.tgz", - "integrity": "sha512-T75QYQVQX57jiNgpF9r1KegMICE94VYwoFQyMGhrvc+lB8YF2E/M/PYDaQe1AJcWaEgqLE+ErXV1Og/+6Vyzew==", - "license": "MIT", - "dependencies": { - "eslint-config-airbnb-base": "^15.0.0", - "object.assign": "^4.1.2", - "object.entries": "^1.1.5" - }, - "engines": { - "node": "^10.12.0 || ^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "eslint": "^7.32.0 || ^8.2.0", - "eslint-plugin-import": "^2.25.3", - "eslint-plugin-jsx-a11y": "^6.5.1", - "eslint-plugin-react": "^7.28.0", - "eslint-plugin-react-hooks": "^4.3.0" - } - }, - "node_modules/eslint-config-airbnb-base": { - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-airbnb-base/-/eslint-config-airbnb-base-15.0.0.tgz", - "integrity": "sha512-xaX3z4ZZIcFLvh2oUNvcX5oEofXda7giYmuplVxoOg5A7EXJMrUyqRgR+mhDhPK8LZ4PttFOBvCYDbX3sUoUig==", - "license": "MIT", - "dependencies": { - "confusing-browser-globals": "^1.0.10", - "object.assign": "^4.1.2", - "object.entries": "^1.1.5", - "semver": "^6.3.0" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - }, - "peerDependencies": { - "eslint": "^7.32.0 || ^8.2.0", - "eslint-plugin-import": "^2.25.2" - } - }, - "node_modules/eslint-config-xo": { - "version": "0.44.0", - "resolved": "https://registry.npmjs.org/eslint-config-xo/-/eslint-config-xo-0.44.0.tgz", - "integrity": "sha512-YG4gdaor0mJJi8UBeRJqDPO42MedTWYMaUyucF5bhm2pi/HS98JIxfFQmTLuyj6hGpQlAazNfyVnn7JuDn+Sew==", - "license": "MIT", - "dependencies": { - "confusing-browser-globals": "1.0.11" - }, - "engines": { - "node": ">=18" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://eslint.org/donate" }, "peerDependencies": { - "eslint": ">=8.56.0" - } - }, - "node_modules/eslint-config-xo-space": { - "version": "0.35.0", - "resolved": "https://registry.npmjs.org/eslint-config-xo-space/-/eslint-config-xo-space-0.35.0.tgz", - "integrity": "sha512-+79iVcoLi3PvGcjqYDpSPzbLfqYpNcMlhsCBRsnmDoHAn4npJG6YxmHpelQKpXM7v/EeZTUKb4e1xotWlei8KA==", - "license": "MIT", - "dependencies": { - "eslint-config-xo": "^0.44.0" + "jiti": "*" }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - }, - "peerDependencies": { - "eslint": ">=8.56.0" + "peerDependenciesMeta": { + "jiti": { + "optional": true + } } }, "node_modules/eslint-import-resolver-node": { @@ -4715,124 +4578,6 @@ "ms": "^2.1.1" } }, - "node_modules/eslint-plugin-import/node_modules/doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "license": "Apache-2.0", - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint-plugin-jsx-a11y": { - "version": "6.10.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz", - "integrity": "sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==", - "license": "MIT", - "dependencies": { - "aria-query": "^5.3.2", - "array-includes": "^3.1.8", - "array.prototype.flatmap": "^1.3.2", - "ast-types-flow": "^0.0.8", - "axe-core": "^4.10.0", - "axobject-query": "^4.1.0", - "damerau-levenshtein": "^1.0.8", - "emoji-regex": "^9.2.2", - "hasown": "^2.0.2", - "jsx-ast-utils": "^3.3.5", - "language-tags": "^1.0.9", - "minimatch": "^3.1.2", - "object.fromentries": "^2.0.8", - "safe-regex-test": "^1.0.3", - "string.prototype.includes": "^2.0.1" - }, - "engines": { - "node": ">=4.0" - }, - "peerDependencies": { - "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9" - } - }, - "node_modules/eslint-plugin-react": { - "version": "7.37.4", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.4.tgz", - "integrity": "sha512-BGP0jRmfYyvOyvMoRX/uoUeW+GqNj9y16bPQzqAHf3AYII/tDs+jMN0dBVkl88/OZwNGwrVFxE7riHsXVfy/LQ==", - "license": "MIT", - "peer": true, - "dependencies": { - "array-includes": "^3.1.8", - "array.prototype.findlast": "^1.2.5", - "array.prototype.flatmap": "^1.3.3", - "array.prototype.tosorted": "^1.1.4", - "doctrine": "^2.1.0", - "es-iterator-helpers": "^1.2.1", - "estraverse": "^5.3.0", - "hasown": "^2.0.2", - "jsx-ast-utils": "^2.4.1 || ^3.0.0", - "minimatch": "^3.1.2", - "object.entries": "^1.1.8", - "object.fromentries": "^2.0.8", - "object.values": "^1.2.1", - "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.5", - "semver": "^6.3.1", - "string.prototype.matchall": "^4.0.12", - "string.prototype.repeat": "^1.0.0" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" - } - }, - "node_modules/eslint-plugin-react-hooks": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", - "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" - } - }, - "node_modules/eslint-plugin-react/node_modules/doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint-plugin-react/node_modules/resolve": { - "version": "2.0.0-next.5", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", - "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", - "license": "MIT", - "peer": true, - "dependencies": { - "is-core-module": "^2.13.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", @@ -4846,38 +4591,20 @@ "node": ">=8.0.0" } }, - "node_modules/eslint-scope/node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/eslint-visitor-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", - "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", - "license": "Apache-2.0", - "engines": { - "node": ">=10" - } - }, "node_modules/eslint-webpack-plugin": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/eslint-webpack-plugin/-/eslint-webpack-plugin-4.2.0.tgz", - "integrity": "sha512-rsfpFQ01AWQbqtjgPRr2usVRxhWDuG0YDYcG8DJOteD3EFnpeuYuOwk0PQiN7PRBTqS6ElNdtPZPggj8If9WnA==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eslint-webpack-plugin/-/eslint-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-Ur100Vi+z0uP7j4Z8Ccah0pXmNHhl3f7P2hCYZj3mZCOSc33G5c1R/vZ4KCapwWikPgRyD4dkangx6JW3KaVFQ==", "license": "MIT", "dependencies": { - "@types/eslint": "^8.56.10", + "@types/eslint": "^9.6.1", "jest-worker": "^29.7.0", - "micromatch": "^4.0.5", + "micromatch": "^4.0.8", "normalize-path": "^3.0.0", - "schema-utils": "^4.2.0" + "schema-utils": "^4.3.0" }, "engines": { - "node": ">= 14.15.0" + "node": ">= 18.12.0" }, "funding": { "type": "opencollective", @@ -4889,90 +4616,71 @@ } }, "node_modules/eslint/node_modules/eslint-scope": { - "version": "7.2.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", - "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.3.0.tgz", + "integrity": "sha512-pUNxi75F8MJ/GdeKtVLSbYg4ZI34J6C0C7sbL4YOp2exGwen7ZsuBqKzUhXd0qMQ362yET3z+uPwKeg/0C2XCQ==", "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, "node_modules/eslint/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", "license": "Apache-2.0", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/globals": { - "version": "13.24.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", - "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", - "license": "MIT", - "dependencies": { - "type-fest": "^0.20.2" - }, + "node_modules/eslint/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4.0" } }, "node_modules/espree": { - "version": "9.6.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", - "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz", + "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==", "license": "BSD-2-Clause", "dependencies": { - "acorn": "^8.9.0", + "acorn": "^8.14.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.1" + "eslint-visitor-keys": "^4.2.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, "node_modules/espree/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", "license": "Apache-2.0", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "license": "BSD-2-Clause", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/esquery": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", @@ -4985,6 +4693,15 @@ "node": ">=0.10" } }, + "node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, "node_modules/esrecurse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", @@ -4997,7 +4714,7 @@ "node": ">=4.0" } }, - "node_modules/estraverse": { + "node_modules/esrecurse/node_modules/estraverse": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", @@ -5006,6 +4723,15 @@ "node": ">=4.0" } }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, "node_modules/esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", @@ -5100,18 +4826,6 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", - "license": "MIT", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -5184,9 +4898,9 @@ } }, "node_modules/fastq": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.0.tgz", - "integrity": "sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", "license": "ISC", "dependencies": { "reusify": "^1.0.4" @@ -5205,24 +4919,15 @@ } }, "node_modules/file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "license": "MIT", "dependencies": { - "flat-cache": "^3.0.4" + "flat-cache": "^4.0.0" }, "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/filesize": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", - "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", - "license": "BSD-3-Clause", - "engines": { - "node": ">= 0.4.0" + "node": ">=16.0.0" } }, "node_modules/fill-range": { @@ -5270,22 +4975,6 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, - "node_modules/find-cache-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", - "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", - "license": "MIT", - "dependencies": { - "common-path-prefix": "^3.0.0", - "pkg-dir": "^7.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/find-up": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", @@ -5312,23 +5001,22 @@ } }, "node_modules/flat-cache": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", - "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "license": "MIT", "dependencies": { "flatted": "^3.2.9", - "keyv": "^4.5.3", - "rimraf": "^3.0.2" + "keyv": "^4.5.4" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=16" } }, "node_modules/flatted": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.2.tgz", - "integrity": "sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", "license": "ISC" }, "node_modules/follow-redirects": { @@ -5352,9 +5040,9 @@ } }, "node_modules/for-each": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.4.tgz", - "integrity": "sha512-kKaIINnFpzW6ffJNDjjyjrk21BkDx38c0xa/klsT8VzLCaMEefv4ZTacrcVR4DmgTeBra++jMDAfS/tS799YDw==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", "license": "MIT", "dependencies": { "is-callable": "^1.2.7" @@ -5366,96 +5054,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/foreach": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.6.tgz", - "integrity": "sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg==", - "license": "MIT" - }, - "node_modules/fork-ts-checker-webpack-plugin": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", - "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "glob": "^7.1.6", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=10", - "yarn": ">=1.0.0" - }, - "peerDependencies": { - "eslint": ">= 6", - "typescript": ">= 2.7", - "vue-template-compiler": "*", - "webpack": ">= 4" - }, - "peerDependenciesMeta": { - "eslint": { - "optional": true - }, - "vue-template-compiler": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "license": "MIT", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", - "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/semver": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", - "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -5503,18 +5101,6 @@ "node": ">=14.14" } }, - "node_modules/fs-monkey": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", - "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==", - "license": "Unlicense" - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "license": "ISC" - }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -5587,17 +5173,17 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.7.tgz", - "integrity": "sha512-VW6Pxhsrk0KAOqs3WEd0klDiF/+V7gQOpAvY1jVU/LHmaD/kQO4523aiJuikX/QAKYiW6x8Jh+RJej1almdtCA==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "license": "MIT", "dependencies": { - "call-bind-apply-helpers": "^1.0.1", + "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", + "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", - "get-proto": "^1.0.0", + "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", @@ -5623,19 +5209,6 @@ "node": ">= 0.4" } }, - "node_modules/get-stdin": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-9.0.0.tgz", - "integrity": "sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/get-symbol-description": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", @@ -5653,27 +5226,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -5692,70 +5244,16 @@ "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", "license": "BSD-2-Clause" }, - "node_modules/global": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz", - "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==", - "license": "MIT", - "dependencies": { - "min-document": "^2.19.0", - "process": "^0.11.10" - } - }, - "node_modules/global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", - "license": "MIT", - "dependencies": { - "global-prefix": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", - "license": "MIT", - "dependencies": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix/node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/global-prefix/node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.0.0.tgz", + "integrity": "sha512-iInW14XItCXET01CQFqudPOWP2jYMl7T+QRQT+UNcR/iQncN/F0UNpgd76iFkBPgNQb4+X3LV9tLJYzwh+Gl3A==", "license": "MIT", "engines": { - "node": ">=4" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/globalthis": { @@ -5774,122 +5272,33 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/globby": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-14.1.0.tgz", - "integrity": "sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@sindresorhus/merge-streams": "^2.1.0", - "fast-glob": "^3.3.3", - "ignore": "^7.0.3", - "path-type": "^6.0.0", - "slash": "^5.1.0", - "unicorn-magic": "^0.3.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/globby/node_modules/ignore": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.3.tgz", - "integrity": "sha512-bAH5jbK/F3T3Jls4I0SO1hmPR0dKU0a7+SY6n1yzRtG54FLO8d6w/nxLFX2Nb7dBu6cCWXPaAME6cYqFUMmuCA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, "node_modules/good-listener": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha512-goW1b+d9q/HIwbVYZzZ6SsTr4IgE+WA44A0GmPIQstuOrgsFcT7VEJ48nmr9GaRtNu0XTKacFLGnBPAM6Afouw==", - "license": "MIT", - "dependencies": { - "delegate": "^3.1.2" - } - }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "license": "ISC" - }, - "node_modules/graphemer": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", - "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", - "license": "MIT" - }, - "node_modules/gray-matter": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-3.1.1.tgz", - "integrity": "sha512-nZ1qjLmayEv0/wt3sHig7I0s3/sJO0dkAaKYQ5YAOApUtYEOonXSFdWvL1khvnZMTvov4UufkqlFsilPnejEXA==", - "license": "MIT", - "dependencies": { - "extend-shallow": "^2.0.1", - "js-yaml": "^3.10.0", - "kind-of": "^5.0.2", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/gray-matter/node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/gray-matter/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "integrity": "sha512-goW1b+d9q/HIwbVYZzZ6SsTr4IgE+WA44A0GmPIQstuOrgsFcT7VEJ48nmr9GaRtNu0XTKacFLGnBPAM6Afouw==", "license": "MIT", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" + "delegate": "^3.1.2" } }, - "node_modules/gzip-size": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", - "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", "license": "MIT", - "dependencies": { - "duplexer": "^0.1.2" - }, "engines": { - "node": ">=10" + "node": ">= 0.4" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, "node_modules/handle-thing": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", @@ -5987,6 +5396,7 @@ "version": "2.8.9", "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true, "license": "ISC" }, "node_modules/hpack.js": { @@ -6060,9 +5470,9 @@ } }, "node_modules/http-parser-js": { - "version": "0.5.9", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.9.tgz", - "integrity": "sha512-n1XsPy3rXVxlqxVioEWdC+0+M+SQw0DpJynwtOPo1X+ZlvdzTLtDBIJJlDQTnwZIFJrZSzSGmIOUdP8tu+SgLw==", + "version": "0.5.10", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.10.tgz", + "integrity": "sha512-Pysuw9XpUq5dVc/2SMHpuTY01RFl8fttgcyunjL7eEMhGM3cI4eOmiCycJDVCo/7O7ClfQD3SaI6ftDzqOXYMA==", "license": "MIT" }, "node_modules/http-proxy": { @@ -6080,9 +5490,9 @@ } }, "node_modules/http-proxy-middleware": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz", - "integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==", + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz", + "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==", "license": "MIT", "dependencies": { "@types/http-proxy": "^1.17.8", @@ -6103,89 +5513,6 @@ } } }, - "node_modules/hugo-algolia": { - "version": "1.2.14", - "resolved": "https://registry.npmjs.org/hugo-algolia/-/hugo-algolia-1.2.14.tgz", - "integrity": "sha512-VHDnKmvWZRQ/MGgWFFDlEGO+8arfL4X3kP6xWCY637/GHuc9Y1dlZ1F5GrFa+LAJ/5JiFo0e8E002SKXVv9CNQ==", - "license": "ISC", - "dependencies": { - "algoliasearch": "^3.24.1", - "bytes": "^3.0.0", - "commander": "^2.11.0", - "glob": "^7.1.2", - "gray-matter": "^3.0.2", - "lodash": "^4.17.11", - "pos": "^0.4.2", - "remove-markdown": "^0.2.0", - "stopword": "^0.1.8", - "striptags": "^3.0.1", - "to-snake-case": "^1.0.0", - "toml": "^2.3.2", - "truncate-utf8-bytes": "^1.0.2" - }, - "bin": { - "hugo-algolia": "bin/index.js" - } - }, - "node_modules/hugo-algolia/node_modules/algoliasearch": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", - "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", - "license": "MIT", - "dependencies": { - "agentkeepalive": "^2.2.0", - "debug": "^2.6.9", - "envify": "^4.0.0", - "es6-promise": "^4.1.0", - "events": "^1.1.0", - "foreach": "^2.0.5", - "global": "^4.3.2", - "inherits": "^2.0.1", - "isarray": "^2.0.1", - "load-script": "^1.0.0", - "object-keys": "^1.0.11", - "querystring-es3": "^0.2.1", - "reduce": "^1.0.1", - "semver": "^5.1.0", - "tunnel-agent": "^0.6.0" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/hugo-algolia/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/hugo-algolia/node_modules/events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha512-kEcvvCBByWXGnZy6JUlgAp2gBIUjfCAV6P6TgT1/aaQKcmuAEC4OZTV1I4EWQLz2gxZw76atuVyvHhTxvi0Flw==", - "license": "MIT", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/hugo-algolia/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/hugo-algolia/node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "license": "ISC", - "bin": { - "semver": "bin/semver" - } - }, "node_modules/hyperdyperid": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/hyperdyperid/-/hyperdyperid-1.2.0.tgz", @@ -6216,20 +5543,11 @@ "node": ">= 4" } }, - "node_modules/immer": { - "version": "9.0.21", - "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", - "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/immer" - } - }, "node_modules/immutable": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.0.3.tgz", - "integrity": "sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.1.1.tgz", + "integrity": "sha512-3jatXi9ObIsPGr3N5hGw/vWWcTkq6hUYhpQz4k0wLC+owqWi/LiugIw9x0EdNZ2yGedKN/HzePiBvaJRXa0Ujg==", + "dev": true, "license": "MIT" }, "node_modules/import-fresh": { @@ -6267,70 +5585,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/import-local/node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "license": "MIT", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/import-local/node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "license": "MIT", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/import-local/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "license": "MIT", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-local/node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "license": "MIT", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/import-local/node_modules/pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "license": "MIT", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/imurmurhash": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", @@ -6340,29 +5594,12 @@ "node": ">=0.8.19" } }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "license": "ISC" }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "license": "ISC" - }, "node_modules/internal-slot": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", @@ -6416,6 +5653,7 @@ "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, "license": "MIT" }, "node_modules/is-async-function": { @@ -6540,30 +5778,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -6710,15 +5924,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/is-plain-obj": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", @@ -6761,15 +5966,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-root": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", - "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/is-set": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", @@ -6888,18 +6084,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/isarray": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", @@ -6921,24 +6105,6 @@ "node": ">=0.10.0" } }, - "node_modules/iterator.prototype": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", - "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", - "license": "MIT", - "peer": true, - "dependencies": { - "define-data-property": "^1.1.4", - "es-object-atoms": "^1.0.0", - "get-intrinsic": "^1.2.6", - "get-proto": "^1.0.0", - "has-symbols": "^1.1.0", - "set-function-name": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/jest-util": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", @@ -7026,6 +6192,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true, "license": "MIT" }, "node_modules/json-parse-even-better-errors": { @@ -7062,6 +6229,7 @@ "version": "6.1.0", "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dev": true, "license": "MIT", "dependencies": { "universalify": "^2.0.0" @@ -7070,21 +6238,6 @@ "graceful-fs": "^4.1.6" } }, - "node_modules/jsx-ast-utils": { - "version": "3.3.5", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", - "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", - "license": "MIT", - "dependencies": { - "array-includes": "^3.1.6", - "array.prototype.flat": "^1.3.1", - "object.assign": "^4.1.4", - "object.values": "^1.1.6" - }, - "engines": { - "node": ">=4.0" - } - }, "node_modules/keyv": { "version": "4.5.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", @@ -7094,46 +6247,10 @@ "json-buffer": "3.0.1" } }, - "node_modules/kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/kleur": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", - "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/language-subtag-registry": { - "version": "0.3.23", - "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", - "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", - "license": "CC0-1.0" - }, - "node_modules/language-tags": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", - "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", - "license": "MIT", - "dependencies": { - "language-subtag-registry": "^0.3.20" - }, - "engines": { - "node": ">=0.10" - } - }, "node_modules/launch-editor": { - "version": "2.9.1", - "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.9.1.tgz", - "integrity": "sha512-Gcnl4Bd+hRO9P9icCP/RVVT2o8SFlPXofuCxvA2SaZuH45whSvf5p8x5oih5ftLiVhEI4sp5xDY+R+b3zJBh5w==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.10.0.tgz", + "integrity": "sha512-D7dBRJo/qcGX9xlvt/6wUYzQxjh5G1RvZPgPv8vi4KRU99DVQL/oW7tnVOCCTm2HGeo3C5HvGE5Yrh6UBoZ0vA==", "license": "MIT", "dependencies": { "picocolors": "^1.0.0", @@ -7166,16 +6283,11 @@ "url": "https://github.com/sponsors/antonk52" } }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "license": "MIT" - }, "node_modules/load-json-file": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", "integrity": "sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==", + "dev": true, "license": "MIT", "dependencies": { "graceful-fs": "^4.1.2", @@ -7187,34 +6299,16 @@ "node": ">=4" } }, - "node_modules/load-json-file/node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", - "license": "MIT", - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/load-json-file/node_modules/pify": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "dev": true, "license": "MIT", "engines": { "node": ">=4" } }, - "node_modules/load-script": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", - "integrity": "sha512-kPEjMFtZvwL9TaZo0uZ2ml+Ye9HUMmPwbYRJ324qF9tqMejwykJ5ggTyvzmrbBeapCAbk98BSbTeovHEEP1uCA==", - "license": "MIT" - }, "node_modules/loader-runner": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", @@ -7224,15 +6318,6 @@ "node": ">=6.11.5" } }, - "node_modules/loader-utils": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.3.1.tgz", - "integrity": "sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==", - "license": "MIT", - "engines": { - "node": ">= 12.13.0" - } - }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -7248,12 +6333,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "license": "MIT" - }, "node_modules/lodash.debounce": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", @@ -7266,19 +6345,6 @@ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "license": "MIT" }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "license": "MIT", - "peer": true, - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } - }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", @@ -7306,22 +6372,11 @@ "node": ">= 0.6" } }, - "node_modules/memfs": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", - "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", - "license": "Unlicense", - "dependencies": { - "fs-monkey": "^1.0.4" - }, - "engines": { - "node": ">= 4.0.0" - } - }, "node_modules/memorystream": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/memorystream/-/memorystream-0.3.1.tgz", "integrity": "sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==", + "dev": true, "engines": { "node": ">= 0.10.0" } @@ -7405,14 +6460,6 @@ "node": ">= 0.6" } }, - "node_modules/min-document": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", - "integrity": "sha512-9Wy1B3m3f66bPPmU5hdA4DR4PB2OfDU/+GS3yAB7IQozE3tqXaVv2zOjgla7MEGSRv95+ILmOuvhLkOK6wJtCQ==", - "dependencies": { - "dom-walk": "^0.1.0" - } - }, "node_modules/minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", @@ -7460,9 +6507,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.8", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", - "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "dev": true, "funding": [ { @@ -7503,12 +6550,14 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", + "dev": true, "license": "MIT" }, "node_modules/node-addon-api": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", + "dev": true, "license": "MIT", "optional": true }, @@ -7531,6 +6580,7 @@ "version": "2.5.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, "license": "BSD-2-Clause", "dependencies": { "hosted-git-info": "^2.1.4", @@ -7543,6 +6593,7 @@ "version": "5.7.2", "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, "license": "ISC", "bin": { "semver": "bin/semver" @@ -7571,6 +6622,7 @@ "version": "4.1.5", "resolved": "https://registry.npmjs.org/npm-run-all/-/npm-run-all-4.1.5.tgz", "integrity": "sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==", + "dev": true, "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", @@ -7596,6 +6648,7 @@ "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, "license": "MIT", "dependencies": { "color-convert": "^1.9.0" @@ -7608,6 +6661,7 @@ "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", @@ -7622,6 +6676,7 @@ "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, "license": "MIT", "dependencies": { "color-name": "1.1.3" @@ -7631,12 +6686,14 @@ "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true, "license": "MIT" }, "node_modules/npm-run-all/node_modules/cross-spawn": { "version": "6.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.6.tgz", "integrity": "sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw==", + "dev": true, "license": "MIT", "dependencies": { "nice-try": "^1.0.4", @@ -7653,6 +6710,7 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, "license": "MIT", "engines": { "node": ">=0.8.0" @@ -7662,6 +6720,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, "license": "MIT", "engines": { "node": ">=4" @@ -7671,6 +6730,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "dev": true, "license": "MIT", "engines": { "node": ">=4" @@ -7680,6 +6740,7 @@ "version": "5.7.2", "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, "license": "ISC", "bin": { "semver": "bin/semver" @@ -7689,6 +6750,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "dev": true, "license": "MIT", "dependencies": { "shebang-regex": "^1.0.0" @@ -7701,6 +6763,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" @@ -7710,6 +6773,7 @@ "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, "license": "MIT", "dependencies": { "has-flag": "^3.0.0" @@ -7722,6 +6786,7 @@ "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, "license": "ISC", "dependencies": { "isexe": "^2.0.0" @@ -7730,16 +6795,6 @@ "which": "bin/which" } }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "license": "MIT", - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/object-inspect": { "version": "1.13.4", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", @@ -7781,20 +6836,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object.entries": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz", - "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.7", - "define-properties": "^1.2.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/object.fromentries": { "version": "2.0.8", "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", @@ -7872,32 +6913,6 @@ "node": ">= 0.8" } }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/open": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", - "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", - "license": "MIT", - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/optionator": { "version": "0.9.4", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", @@ -8001,21 +7016,17 @@ } }, "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", + "dev": true, "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" + "json-parse-better-errors": "^1.0.1" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, "node_modules/parseurl": { @@ -8036,15 +7047,6 @@ "node": ">=8" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/path-key": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", @@ -8067,16 +7069,26 @@ "license": "MIT" }, "node_modules/path-type": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-6.0.0.tgz", - "integrity": "sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", + "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", "dev": true, "license": "MIT", - "engines": { - "node": ">=18" + "dependencies": { + "pify": "^3.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=4" + } + }, + "node_modules/path-type/node_modules/pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" } }, "node_modules/picocolors": { @@ -8101,6 +7113,7 @@ "version": "0.3.1", "resolved": "https://registry.npmjs.org/pidtree/-/pidtree-0.3.1.tgz", "integrity": "sha512-qQbW94hLHEqCg7nhby4yRC7G2+jYHY4Rguc2bjw7Uug4GIJuu1tvf2uHaZv5Q8zdt+WKJ6qK1FOI6amaWUo5FA==", + "dev": true, "license": "MIT", "bin": { "pidtree": "bin/pidtree.js" @@ -8120,140 +7133,43 @@ } }, "node_modules/pkg-dir": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", - "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "license": "MIT", "dependencies": { - "find-up": "^6.3.0" + "find-up": "^4.0.0" }, "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/pkg-dir/node_modules/find-up": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", - "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "license": "MIT", "dependencies": { - "locate-path": "^7.1.0", - "path-exists": "^5.0.0" + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/pkg-dir/node_modules/locate-path": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", - "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", - "license": "MIT", - "dependencies": { - "p-locate": "^6.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-dir/node_modules/p-limit": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", - "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", - "license": "MIT", - "dependencies": { - "yocto-queue": "^1.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-dir/node_modules/p-locate": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", - "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", - "license": "MIT", - "dependencies": { - "p-limit": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-dir/node_modules/path-exists": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", - "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, - "node_modules/pkg-dir/node_modules/yocto-queue": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.1.1.tgz", - "integrity": "sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==", - "license": "MIT", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-up": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", - "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "license": "MIT", "dependencies": { - "find-up": "^3.0.0" + "p-locate": "^4.1.0" }, "engines": { "node": ">=8" } }, - "node_modules/pkg-up/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "license": "MIT", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "license": "MIT", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/p-limit": { + "node_modules/pkg-dir/node_modules/p-limit": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", @@ -8268,33 +7184,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/pkg-up/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "license": "MIT", "dependencies": { - "p-limit": "^2.0.0" + "p-limit": "^2.2.0" }, "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/pos": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/pos/-/pos-0.4.2.tgz", - "integrity": "sha512-5HtivCe1HaOqjQZZNhtKrIR1zBvm2FLVVGl4b1poHPZDbXq1BEqYOlmWmetbzqrkRFITxPbEpVgpB03qNS4cSw==", - "engines": { - "node": ">=0" + "node": ">=8" } }, "node_modules/possible-typed-array-names": { @@ -8307,9 +7206,9 @@ } }, "node_modules/postcss": { - "version": "8.5.1", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz", - "integrity": "sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ==", + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", + "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", "dev": true, "funding": [ { @@ -8336,23 +7235,22 @@ } }, "node_modules/postcss-cli": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-11.0.0.tgz", - "integrity": "sha512-xMITAI7M0u1yolVcXJ9XTZiO9aO49mcoKQy6pCDFdMh9kGqhzLVpWxeD/32M/QBmkhcGypZFFOLNLmIW4Pg4RA==", + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-11.0.1.tgz", + "integrity": "sha512-0UnkNPSayHKRe/tc2YGW6XnSqqOA9eqpiRMgRlV1S6HdGi16vwJBx7lviARzbV1HpQHqLLRH3o8vTcB0cLc+5g==", "dev": true, "license": "MIT", "dependencies": { "chokidar": "^3.3.0", - "dependency-graph": "^0.11.0", + "dependency-graph": "^1.0.0", "fs-extra": "^11.0.0", - "get-stdin": "^9.0.0", - "globby": "^14.0.0", "picocolors": "^1.0.0", "postcss-load-config": "^5.0.0", "postcss-reporter": "^7.0.0", "pretty-hrtime": "^1.0.3", "read-cache": "^1.0.0", "slash": "^5.0.0", + "tinyglobby": "^0.2.12", "yargs": "^17.0.0" }, "bin": { @@ -8458,46 +7356,12 @@ "node": ">= 0.8" } }, - "node_modules/process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", - "license": "MIT", - "engines": { - "node": ">= 0.6.0" - } - }, "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", "license": "MIT" }, - "node_modules/prompts": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", - "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", - "license": "MIT", - "dependencies": { - "kleur": "^3.0.3", - "sisteransi": "^1.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "license": "MIT", - "peer": true, - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - } - }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -8544,14 +7408,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/querystring-es3": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==", - "engines": { - "node": ">=0.4.x" - } - }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -8585,108 +7441,26 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/react-dev-tools": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/react-dev-tools/-/react-dev-tools-0.0.1.tgz", - "integrity": "sha512-V82SL/Y3/YLRZHIqzyBEauIFASeTOD4ZllvGhhG0Q4Npc06ElbMFPFgDYt+M9uW+CJrGoufS4kOQ6GSzTf3AJw==" - }, - "node_modules/react-dev-utils": { - "version": "12.0.1", - "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", - "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.16.0", - "address": "^1.1.2", - "browserslist": "^4.18.1", - "chalk": "^4.1.2", - "cross-spawn": "^7.0.3", - "detect-port-alt": "^1.1.6", - "escape-string-regexp": "^4.0.0", - "filesize": "^8.0.6", - "find-up": "^5.0.0", - "fork-ts-checker-webpack-plugin": "^6.5.0", - "global-modules": "^2.0.0", - "globby": "^11.0.4", - "gzip-size": "^6.0.0", - "immer": "^9.0.7", - "is-root": "^2.1.0", - "loader-utils": "^3.2.0", - "open": "^8.4.0", - "pkg-up": "^3.1.0", - "prompts": "^2.4.2", - "react-error-overlay": "^6.0.11", - "recursive-readdir": "^2.2.2", - "shell-quote": "^1.7.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, + "license": "MIT", "engines": { - "node": ">=14" + "node": ">= 0.6" } }, - "node_modules/react-dev-utils/node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "license": "MIT", "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-dev-utils/node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "license": "MIT", "engines": { - "node": ">=8" + "node": ">= 0.8" } }, - "node_modules/react-error-overlay": { - "version": "6.0.11", - "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", - "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==", - "license": "MIT" - }, - "node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT", - "peer": true - }, "node_modules/read-cache": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", @@ -8701,6 +7475,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", "integrity": "sha512-BLq/cCO9two+lBgiTYNqD6GdtK8s4NpaWrl6/rCO9w0TUS8oJl7cmToOZfRYllKTISY6nt1U7jQ53brmKqY6BA==", + "dev": true, "license": "MIT", "dependencies": { "load-json-file": "^4.0.0", @@ -8711,27 +7486,6 @@ "node": ">=4" } }, - "node_modules/read-pkg/node_modules/path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "license": "MIT", - "dependencies": { - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", @@ -8770,30 +7524,6 @@ "node": ">= 10.13.0" } }, - "node_modules/recursive-readdir": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", - "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", - "license": "MIT", - "dependencies": { - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/reduce": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.3.tgz", - "integrity": "sha512-0Dtt3Bgj34/yKFzE5N9V6/HYyP3gb+E3TLs/hMr/wGgkCIzYa+7G4hNrE/P+en52OJT+pLUgmba9DQF3AB+2LQ==", - "license": "MIT", - "dependencies": { - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", @@ -8916,12 +7646,6 @@ "node": ">=6" } }, - "node_modules/remove-markdown": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/remove-markdown/-/remove-markdown-0.2.2.tgz", - "integrity": "sha512-jwgEf3Yh/xi4WodWi/vPlasa9C9pMv1kz5ITOIAGjBW7PeZ/CHZCdBfJzQnn2VX2cBvf1xCuJv0tUJqn/FCMNA==", - "license": "MIT" - }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -9007,31 +7731,15 @@ } }, "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", "license": "MIT", "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" } }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/run-applescript": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz", @@ -9048,6 +7756,7 @@ "version": "0.0.0", "resolved": "https://registry.npmjs.org/run-p/-/run-p-0.0.0.tgz", "integrity": "sha512-ZLiUUVOXJcM/S1hMnm6Ooc1zAgAx98Mmn1qyA+y3WNeK7hOTGAusVR5r3uOQJ0NuUxZt7J9vNusYNNVgKPSbww==", + "dev": true, "license": "MIT" }, "node_modules/run-parallel": { @@ -9152,9 +7861,10 @@ "license": "MIT" }, "node_modules/sass": { - "version": "1.84.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.84.0.tgz", - "integrity": "sha512-XDAbhEPJRxi7H0SxrnOpiXFQoUJHwkR2u3Zc4el+fK/Tt5Hpzw5kkQ59qVDfvdaUq6gCrEZIbySFBM2T9DNKHg==", + "version": "1.87.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.87.0.tgz", + "integrity": "sha512-d0NoFH4v6SjEK7BoX810Jsrhj7IQSYHAHLi/iSpgqKc7LaIDshFRlSg5LOymf9FqQhxEHs2W5ZQXlvy0KD45Uw==", + "dev": true, "license": "MIT", "dependencies": { "chokidar": "^4.0.0", @@ -9175,6 +7885,7 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, "license": "MIT", "dependencies": { "readdirp": "^4.0.1" @@ -9187,9 +7898,10 @@ } }, "node_modules/sass/node_modules/readdirp": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.1.tgz", - "integrity": "sha512-h80JrZu/MHUZCyHu5ciuoI0+WxsCxzxJTILn6Fs8rxSnFPh+UVHYfeIxK1nVGugMqkfC4vJcBOYbkfkwYK0+gw==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, "license": "MIT", "engines": { "node": ">= 14.18.0" @@ -9200,9 +7912,9 @@ } }, "node_modules/schema-utils": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.0.tgz", - "integrity": "sha512-Gf9qqc58SpCA/xdziiHz35F4GNIWYWZrEshUc/G/r5BnLph6xpKuLeoJoQuj5WfBIx/eQLf+hmVPYHaxJu7V2g==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.2.tgz", + "integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==", "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", @@ -9614,12 +8326,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/sisteransi": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", - "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", - "license": "MIT" - }, "node_modules/slash": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", @@ -9657,6 +8363,7 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" @@ -9676,6 +8383,7 @@ "version": "3.2.0", "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dev": true, "license": "Apache-2.0", "dependencies": { "spdx-expression-parse": "^3.0.0", @@ -9686,12 +8394,14 @@ "version": "2.5.0", "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", + "dev": true, "license": "CC-BY-3.0" }, "node_modules/spdx-expression-parse": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, "license": "MIT", "dependencies": { "spdx-exceptions": "^2.1.0", @@ -9702,6 +8412,7 @@ "version": "3.0.21", "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.21.tgz", "integrity": "sha512-Bvg/8F5XephndSK3JffaRqdT+gyhfqIPwDHpX80tJrF8QQRYMo8sNMeaZ2Dp5+jhwKnUmIOyFFQfHRkjJm5nXg==", + "dev": true, "license": "CC0-1.0" }, "node_modules/spdy": { @@ -9734,12 +8445,6 @@ "wbuf": "^1.7.3" } }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "license": "BSD-3-Clause" - }, "node_modules/statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", @@ -9749,12 +8454,6 @@ "node": ">= 0.8" } }, - "node_modules/stopword": { - "version": "0.1.19", - "resolved": "https://registry.npmjs.org/stopword/-/stopword-0.1.19.tgz", - "integrity": "sha512-oKkl/LClyJ2YLWm2xZvIiCUGiTsggj+BPOQyt3IKtPUJZj43jYxFJEmXvP1VZQvMuexdodMBshL4sVUSPURmwg==", - "license": "MIT" - }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -9786,52 +8485,11 @@ "dev": true, "license": "MIT" }, - "node_modules/string.prototype.includes": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", - "integrity": "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.7", - "define-properties": "^1.2.1", - "es-abstract": "^1.23.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/string.prototype.matchall": { - "version": "4.0.12", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", - "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", - "license": "MIT", - "peer": true, - "dependencies": { - "call-bind": "^1.0.8", - "call-bound": "^1.0.3", - "define-properties": "^1.2.1", - "es-abstract": "^1.23.6", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", - "get-intrinsic": "^1.2.6", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "internal-slot": "^1.1.0", - "regexp.prototype.flags": "^1.5.3", - "set-function-name": "^2.0.2", - "side-channel": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/string.prototype.padend": { "version": "3.1.6", "resolved": "https://registry.npmjs.org/string.prototype.padend/-/string.prototype.padend-3.1.6.tgz", "integrity": "sha512-XZpspuSB7vJWhvJc9DLSlrXl1mcA2BdoY5jjnS135ydXqLoqhs96JjDtCkjJEQHvfqZIp9hBuBMgI589peyx9Q==", + "dev": true, "license": "MIT", "dependencies": { "call-bind": "^1.0.7", @@ -9846,17 +8504,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/string.prototype.repeat": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", - "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", - "license": "MIT", - "peer": true, - "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - } - }, "node_modules/string.prototype.trim": { "version": "1.2.10", "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", @@ -9917,6 +8564,7 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" @@ -9934,15 +8582,6 @@ "node": ">=4" } }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -9955,12 +8594,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/striptags": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/striptags/-/striptags-3.2.0.tgz", - "integrity": "sha512-g45ZOGzHDMe2bdYMdIvdAfCQkCTDMGBazSw1ypMowwGIee7ZQ5dU0rBJ8Jqgl+jAKIv4dbeE1jscZq9wid1Tkw==", - "license": "MIT" - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -9985,19 +8618,10 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/terser": { - "version": "5.38.1", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.38.1.tgz", - "integrity": "sha512-GWANVlPM/ZfYzuPHjq0nxT+EbOEDDN3Jwhwdg1D8TU8oSkktp8w64Uq4auuGLxFSoNTRDncTq2hQHX1Ld9KHkA==", + "version": "5.39.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.39.0.tgz", + "integrity": "sha512-LBAhFyLho16harJoWMg/nZsQYgTrg5jXOn2nCYjRUcZZEdE3qa2zb8QEDRUGVZBW4rlazf2fxkg8tztybTaqWw==", "license": "BSD-2-Clause", "dependencies": { "@jridgewell/source-map": "^0.3.3", @@ -10013,9 +8637,9 @@ } }, "node_modules/terser-webpack-plugin": { - "version": "5.3.11", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.11.tgz", - "integrity": "sha512-RVCsMfuD0+cTt3EwX8hSl2Ks56EbFHWmhluwcqoPKtBnfjiT6olaq7PRIRfhyU8nnC2MrnDrBLfrD/RGE+cVXQ==", + "version": "5.3.14", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz", + "integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==", "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "^0.3.25", @@ -10075,12 +8699,6 @@ "url": "https://github.com/chalk/supports-color?sponsor=1" } }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "license": "MIT" - }, "node_modules/thenby": { "version": "1.3.4", "resolved": "https://registry.npmjs.org/thenby/-/thenby-1.3.4.tgz", @@ -10100,12 +8718,6 @@ "tslib": "^2" } }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", - "license": "MIT" - }, "node_modules/thunky": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", @@ -10118,40 +8730,61 @@ "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", "license": "MIT" }, - "node_modules/to-no-case": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/to-no-case/-/to-no-case-1.0.2.tgz", - "integrity": "sha512-Z3g735FxuZY8rodxV4gH7LxClE4H0hTIyHNIHdk+vpQxjLm0cwnKXq/OFVZ76SOQmto7txVcwSCwkU5kqp+FKg==", - "license": "MIT" - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "node_modules/tinyglobby": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", + "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", + "dev": true, "license": "MIT", "dependencies": { - "is-number": "^7.0.0" + "fdir": "^6.4.4", + "picomatch": "^4.0.2" }, "engines": { - "node": ">=8.0" + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" } }, - "node_modules/to-snake-case": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-snake-case/-/to-snake-case-1.0.0.tgz", - "integrity": "sha512-joRpzBAk1Bhi2eGEYBjukEWHOe/IvclOkiJl3DtA91jV6NwQ3MwXA4FHYeqk8BNp/D8bmi9tcNbRu/SozP0jbQ==", + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.4.4", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", + "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", + "dev": true, "license": "MIT", - "dependencies": { - "to-space-case": "^1.0.0" + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } } }, - "node_modules/to-space-case": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-space-case/-/to-space-case-1.0.0.tgz", - "integrity": "sha512-rLdvwXZ39VOn1IxGL3V6ZstoTbwLRckQmn/U8ZDLuWwIXNpuZDhQ3AiRUlhTbOXFVE9C+dR51wM0CBDhk31VcA==", + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "license": "MIT", "dependencies": { - "to-no-case": "^1.0.0" + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" } }, "node_modules/toidentifier": { @@ -10163,12 +8796,6 @@ "node": ">=0.6" } }, - "node_modules/toml": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/toml/-/toml-2.3.6.tgz", - "integrity": "sha512-gVweAectJU3ebq//Ferr2JUY4WKSDe5N+z0FvjDncLGyHmIDoxgY/2Ie4qfEIDm4IS7OA6Rmdm7pdEEdMcV/xQ==", - "license": "MIT" - }, "node_modules/tree-dump": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/tree-dump/-/tree-dump-1.0.2.tgz", @@ -10185,13 +8812,16 @@ "tslib": "2" } }, - "node_modules/truncate-utf8-bytes": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz", - "integrity": "sha512-95Pu1QXQvruGEhv62XCMO3Mm90GscOCClvrIUwCM0PYOXK3kaF3l3sIHxx71ThJfcbM2O5Au6SO3AWCSEfW4mQ==", - "license": "WTFPL", - "dependencies": { - "utf8-byte-length": "^1.0.1" + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" } }, "node_modules/tsconfig-paths": { @@ -10224,18 +8854,6 @@ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "license": "Apache-2.0", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -10248,18 +8866,6 @@ "node": ">= 0.8.0" } }, - "node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -10348,9 +8954,9 @@ } }, "node_modules/typescript": { - "version": "5.7.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", - "integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==", + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", "license": "Apache-2.0", "peer": true, "bin": { @@ -10380,9 +8986,9 @@ } }, "node_modules/undici-types": { - "version": "6.20.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", - "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", "license": "MIT" }, "node_modules/unicode-canonical-property-names-ecmascript": { @@ -10425,23 +9031,11 @@ "node": ">=4" } }, - "node_modules/unicorn-magic": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", - "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/universalify": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, "license": "MIT", "engines": { "node": ">= 10.0.0" @@ -10457,9 +9051,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.2.tgz", - "integrity": "sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", "funding": [ { "type": "opencollective", @@ -10495,12 +9089,6 @@ "punycode": "^2.1.0" } }, - "node_modules/utf8-byte-length": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/utf8-byte-length/-/utf8-byte-length-1.0.5.tgz", - "integrity": "sha512-Xn0w3MtiQ6zoz2vFyUVruaCL53O/DwUvkEeOvj+uulMm0BkUGYWmBYVyElqZaSLhY6ZD0ulfU3aBra2aVT4xfA==", - "license": "(WTFPL OR MIT)" - }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -10529,6 +9117,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, "license": "Apache-2.0", "dependencies": { "spdx-correct": "^3.0.0", @@ -10567,13 +9156,14 @@ } }, "node_modules/webpack": { - "version": "5.97.1", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.97.1.tgz", - "integrity": "sha512-EksG6gFY3L1eFMROS/7Wzgrii5mBAFe4rIr3r2BTfo7bcc+DWwFZ4OJ/miOuHJO/A85HwyI4eQ0F6IKXesO7Fg==", + "version": "5.99.7", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.99.7.tgz", + "integrity": "sha512-CNqKBRMQjwcmKR0idID5va1qlhrqVUKpovi+Ec79ksW8ux7iS1+A6VqzfZXgVYCFRKl7XL5ap3ZoMpwBJxcg0w==", "license": "MIT", "dependencies": { "@types/eslint-scope": "^3.7.7", "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", "@webassemblyjs/ast": "^1.14.1", "@webassemblyjs/wasm-edit": "^1.14.1", "@webassemblyjs/wasm-parser": "^1.14.1", @@ -10590,9 +9180,9 @@ "loader-runner": "^4.2.0", "mime-types": "^2.1.27", "neo-async": "^2.6.2", - "schema-utils": "^3.2.0", + "schema-utils": "^4.3.2", "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.10", + "terser-webpack-plugin": "^5.3.11", "watchpack": "^2.4.1", "webpack-sources": "^3.2.3" }, @@ -10712,14 +9302,15 @@ } }, "node_modules/webpack-dev-server": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-5.2.0.tgz", - "integrity": "sha512-90SqqYXA2SK36KcT6o1bvwvZfJFcmoamqeJY7+boioffX9g9C0wjjJRGUrQIuh43pb0ttX7+ssavmj/WN2RHtA==", + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-5.2.1.tgz", + "integrity": "sha512-ml/0HIj9NLpVKOMq+SuBPLHcmbG+TGIjXRHsYfZwocUBIqEvws8NnS/V9AFQ5FKP+tgn5adwVwRrTEpGL33QFQ==", "license": "MIT", "dependencies": { "@types/bonjour": "^3.5.13", "@types/connect-history-api-fallback": "^1.5.4", "@types/express": "^4.17.21", + "@types/express-serve-static-core": "^4.17.21", "@types/serve-index": "^1.9.4", "@types/serve-static": "^1.15.5", "@types/sockjs": "^0.3.36", @@ -10795,9 +9386,9 @@ } }, "node_modules/webpack-dev-server/node_modules/open": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/open/-/open-10.1.0.tgz", - "integrity": "sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==", + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/open/-/open-10.1.1.tgz", + "integrity": "sha512-zy1wx4+P3PfhXSEPJNtZmJXfhkkIaxU1VauWIrDZw1O7uJRDRJtKr9n3Ic4NgbA16KyOxOXO2ng9gYwCdXuSXA==", "license": "MIT", "dependencies": { "default-browser": "^5.2.1", @@ -10835,24 +9426,6 @@ "node": ">=10.13.0" } }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, "node_modules/webpack/node_modules/tapable": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", @@ -10965,15 +9538,16 @@ } }, "node_modules/which-typed-array": { - "version": "1.1.18", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.18.tgz", - "integrity": "sha512-qEcY+KJYlWyLH9vNbsr6/5j59AXk5ni5aakf8ldzBvGde6Iz4sxZGkJyWSAueTG7QhOvNRYb1lDdFmL5Td0QKA==", + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", "license": "MIT", "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", - "call-bound": "^1.0.3", - "for-each": "^0.3.3", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2" }, @@ -11017,16 +9591,10 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "license": "ISC" - }, "node_modules/ws": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", - "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w==", "license": "MIT", "engines": { "node": ">=10.0.0" @@ -11061,9 +9629,9 @@ "license": "ISC" }, "node_modules/yaml": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.0.tgz", - "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.1.tgz", + "integrity": "sha512-10ULxpnOCQXxJvBgxsn9ptjq6uviG/htZKk9veJGhlqn3w/DxQ631zFF+nlQXLwmImeS5amR2dl2U8sg6U9jsQ==", "dev": true, "license": "ISC", "bin": { diff --git a/docs/package.json b/docs/package.json index a64c6c5b16ac..e637059c57ee 100644 --- a/docs/package.json +++ b/docs/package.json @@ -27,35 +27,30 @@ "license": "Apache License 2.0", "homepage": "https://github.com/yugabyte/yugabyte-db/docs#readme", "dependencies": { - "@babel/core": "7.26.8", - "@babel/eslint-parser": "7.26.8", + "@babel/core": "7.26.10", "@babel/plugin-proposal-decorators": "7.25.9", - "@babel/preset-env": "7.26.8", + "@babel/preset-env": "7.26.9", "@fortawesome/fontawesome-pro": "6.7.2", + "@stylistic/eslint-plugin": "4.2.0", "algoliasearch": "4.23.3", - "babel-loader": "9.2.1", + "babel-loader": "10.0.0", "clipboard": "2.0.11", "detect-external-link": "2.0.1", - "eslint": "8.56.0", - "eslint-config-airbnb": "19.0.4", - "eslint-config-xo-space": "0.35.0", + "eslint": "9.25.1", "eslint-plugin-import": "2.31.0", - "eslint-plugin-jsx-a11y": "6.10.2", - "eslint-webpack-plugin": "4.2.0", - "hugo-algolia": "1.2.14", - "npm-run-all": "4.1.5", - "react-dev-tools": "0.0.1", - "react-dev-utils": "12.0.1", - "run-p": "0.0.0", - "sass": "1.84.0", - "webpack": "5.97.1", + "eslint-webpack-plugin": "5.0.1", + "globals": "16.0.0", + "sass": "1.87.0", + "webpack": "5.99.7", "webpack-cli": "6.0.1", - "webpack-dev-server": "5.2.0", + "webpack-dev-server": "5.2.1", "yb-rrdiagram": "0.0.7" }, "devDependencies": { - "autoprefixer": "10.4.20", - "postcss": "8.5.1", - "postcss-cli": "11.0.0" + "autoprefixer": "10.4.21", + "npm-run-all": "4.1.5", + "postcss": "8.5.3", + "postcss-cli": "11.0.1", + "run-p": "0.0.0" } } diff --git a/docs/src/index.js b/docs/src/index.js index 4ddbfc2a366a..5c5361592199 100644 --- a/docs/src/index.js +++ b/docs/src/index.js @@ -171,6 +171,17 @@ function yugabyteActiveLeftNav() { }); } +/** + * Add class to `right-nav-auto-scroll` in right menu. + */ +function rightnavAutoScroll() { + if ($('.td-sidebar-toc .td-toc').innerHeight() + 260 >= window.innerHeight) { + $('.td-sidebar-toc .td-toc').addClass('right-nav-auto-scroll'); + } else { + $('.td-sidebar-toc .td-toc').removeClass('right-nav-auto-scroll'); + } +} + $(document).ready(() => { const isSafari = /Safari/.test(navigator.userAgent) && /Apple Computer/.test(navigator.vendor); if (isSafari) { @@ -275,6 +286,7 @@ $(document).ready(() => { }); $('body').addClass('dragging'); yugabytePageFinderWidth(); + rightnavAutoScroll(); }); }); @@ -348,6 +360,28 @@ $(document).ready(() => { }); })(); + /** + * Check immediate heading before H5 on particular pages to apply divider on them. + * Like `/preview/reference/configuration/yb-tserver/`. + */ + (() => { + if (document.body.classList.contains('configuration')) { + const headings = document.querySelectorAll('.configuration h2, .configuration h3, .configuration h4, .configuration h5'); + let checkH5 = false; + + headings.forEach(heading => { + const tag = heading.tagName; + + if (tag === 'H2' || tag === 'H3' || tag === 'H4') { + checkH5 = true; + } else if (tag === 'H5' && checkH5) { + heading.classList.add('first-h5'); + checkH5 = false; + } + }); + } + })(); + /** * Add Image Popup. */ @@ -552,7 +586,10 @@ $(document).ready(() => { } })(document); + let lastScrollTop = 0; $(window).on('scroll', () => { + let activeLink = ''; + // Active TOC link on scroll. if ($('.td-toc #TableOfContents').length > 0) { let rightMenuSelector = '.td-content > h2,.td-content > h3,.td-content > h4'; @@ -567,10 +604,39 @@ $(document).ready(() => { const scrollTop = $(window).scrollTop(); const headingId = $(element).attr('id'); if (offsetTop - 75 <= scrollTop) { + activeLink = $(`.td-toc #TableOfContents a[href="#${headingId}"]`); $('.td-toc #TableOfContents a').removeClass('active-scroll'); - $(`.td-toc #TableOfContents a[href="#${headingId}"]`).addClass('active-scroll'); + activeLink.addClass('active-scroll'); } }); + + /* + * Autoscroll right nav where the right nav is a very long one. + */ + const tocContainer = $('.td-sidebar-toc .td-toc.right-nav-auto-scroll'); + if (tocContainer.length > 0) { + const linkOffset = activeLink.length ? activeLink.position().top : 0; + const containerHeight = tocContainer.height(); + const linkHeight = activeLink.length ? activeLink.outerHeight() : 0; + + let scrollFlag = 'up'; + let currentScroll = window.pageYOffset || document.documentElement.scrollTop; + if (currentScroll > lastScrollTop) { + scrollFlag = 'down'; + } + lastScrollTop = currentScroll <= 0 ? 0 : currentScroll; + + if (scrollFlag === 'down') { + tocContainer.scrollTop(tocContainer.scrollTop() + linkOffset - (containerHeight - linkHeight) - 20); + } else if (scrollFlag === 'up') { + let currentPosition = linkOffset - 145 - linkHeight; + if (currentPosition > containerHeight) { + tocContainer.scrollTop(tocContainer.scrollTop() + linkOffset - ((containerHeight / 2) + linkHeight / 2)); + } else if (currentPosition <= 18) { + tocContainer.scrollTop(tocContainer.scrollTop() + linkOffset - (150 + linkHeight)); + } + } + } } }); @@ -683,10 +749,13 @@ $(document).ready(() => { yugabytePageFinderWidth(); }, 500); }); + + rightnavAutoScroll(); }); $(window).resize(() => { rightnavAppend(); + rightnavAutoScroll(); $('.td-main .td-sidebar').attr('style', ''); $('.td-main #dragbar').attr('style', ''); $('.td-main').attr('style', ''); diff --git a/docs/static/fonts/sf-mono/SFMonoSemibold.woff2 b/docs/static/fonts/sf-mono/SFMonoSemibold.woff2 new file mode 100644 index 000000000000..ef083fc716ba Binary files /dev/null and b/docs/static/fonts/sf-mono/SFMonoSemibold.woff2 differ diff --git a/docs/static/icons/t-server.svg b/docs/static/icons/t-server.svg new file mode 100644 index 000000000000..aef454a45c45 --- /dev/null +++ b/docs/static/icons/t-server.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/java/interface-annotations/pom.xml b/java/interface-annotations/pom.xml index 99d36a9011a2..b8222e3db631 100644 --- a/java/interface-annotations/pom.xml +++ b/java/interface-annotations/pom.xml @@ -21,7 +21,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT interface-annotations diff --git a/java/pom.xml b/java/pom.xml index 8fe8b7019c10..0b5165245da3 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -40,7 +40,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT pom Yugabyte diff --git a/java/yb-cdc/pom.xml b/java/yb-cdc/pom.xml index 89298e30803b..dcef6a3f58c8 100644 --- a/java/yb-cdc/pom.xml +++ b/java/yb-cdc/pom.xml @@ -5,7 +5,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-cdc YB CDC Connector diff --git a/java/yb-cdc/src/test/java/org/yb/cdc/common/CDCBaseClass.java b/java/yb-cdc/src/test/java/org/yb/cdc/common/CDCBaseClass.java index 39d3a7aebad3..9f8fa8c5d1f0 100644 --- a/java/yb-cdc/src/test/java/org/yb/cdc/common/CDCBaseClass.java +++ b/java/yb-cdc/src/test/java/org/yb/cdc/common/CDCBaseClass.java @@ -52,7 +52,8 @@ public class CDCBaseClass extends BaseMiniClusterTest { protected String CDC_INTENT_SIZE_GFLAG = "cdc_max_stream_intent_records"; protected String CDC_ENABLE_CONSISTENT_RECORDS = "cdc_enable_consistent_records"; protected String CDC_POPULATE_SAFEPOINT_RECORD = "cdc_populate_safepoint_record"; - + protected String TEST_DCHECK_FOR_MISSING_SCHEMA_PACKING = + "TEST_dcheck_for_missing_schema_packing"; // Postgres settings. protected static final String DEFAULT_PG_DATABASE = "yugabyte"; protected static final String DEFAULT_PG_USER = "yugabyte"; @@ -97,8 +98,8 @@ protected Integer getYsqlRequestLimit() { return null; } - /** empty helper function */ protected void setUp() throws Exception { + setServerFlag(getTserverHostAndPort(), TEST_DCHECK_FOR_MISSING_SCHEMA_PACKING, "false"); } /** diff --git a/java/yb-cli/pom.xml b/java/yb-cli/pom.xml index f1b19d715510..c94607b0edc6 100644 --- a/java/yb-cli/pom.xml +++ b/java/yb-cli/pom.xml @@ -25,7 +25,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-cli diff --git a/java/yb-client/pom.xml b/java/yb-client/pom.xml index 101f813c2a46..4f307d662304 100644 --- a/java/yb-client/pom.xml +++ b/java/yb-client/pom.xml @@ -25,7 +25,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-client diff --git a/java/yb-client/src/main/java/org/yb/client/AsyncYBClient.java b/java/yb-client/src/main/java/org/yb/client/AsyncYBClient.java index dcafaf313760..76e81698ee30 100644 --- a/java/yb-client/src/main/java/org/yb/client/AsyncYBClient.java +++ b/java/yb-client/src/main/java/org/yb/client/AsyncYBClient.java @@ -108,7 +108,6 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import javax.net.ssl.TrustManagerFactory; -import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.util.io.pem.PemObject; import org.bouncycastle.util.io.pem.PemReader; import org.slf4j.Logger; @@ -3808,7 +3807,6 @@ private void handleClose(TabletClient client, Channel channel) { private SslHandler createSslHandler() { try { - Security.addProvider(new BouncyCastleProvider()); CertificateFactory cf = CertificateFactory.getInstance("X.509"); FileInputStream fis = new FileInputStream(certFile); List cas; diff --git a/java/yb-cql-4x/pom.xml b/java/yb-cql-4x/pom.xml index 7f51b46279d6..0f7dde4b9da3 100644 --- a/java/yb-cql-4x/pom.xml +++ b/java/yb-cql-4x/pom.xml @@ -7,7 +7,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-cql-4x YB CQL Support for 4.x Driver diff --git a/java/yb-cql/pom.xml b/java/yb-cql/pom.xml index d07ed27a6e0b..b0a63e78f8ae 100644 --- a/java/yb-cql/pom.xml +++ b/java/yb-cql/pom.xml @@ -7,7 +7,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-cql YB CQL Support diff --git a/java/yb-jedis-tests/pom.xml b/java/yb-jedis-tests/pom.xml index 5ec8d99fcdb7..0dcf31dbf926 100644 --- a/java/yb-jedis-tests/pom.xml +++ b/java/yb-jedis-tests/pom.xml @@ -7,7 +7,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-jedis-tests YB Jedis Tests diff --git a/java/yb-loadtester/pom.xml b/java/yb-loadtester/pom.xml index 917095b7312f..26d02434106c 100644 --- a/java/yb-loadtester/pom.xml +++ b/java/yb-loadtester/pom.xml @@ -6,7 +6,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-loadtester diff --git a/java/yb-multiapi/pom.xml b/java/yb-multiapi/pom.xml index ec89ff388480..bb60472c8ff7 100644 --- a/java/yb-multiapi/pom.xml +++ b/java/yb-multiapi/pom.xml @@ -9,7 +9,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-multiapi diff --git a/java/yb-pgsql/pom.xml b/java/yb-pgsql/pom.xml index c372638ec1ea..30055b6ffba3 100644 --- a/java/yb-pgsql/pom.xml +++ b/java/yb-pgsql/pom.xml @@ -8,7 +8,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-pgsql YB PostgreSQL Support diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/ConcurrentTablespaceTest.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/ConcurrentTablespaceTest.java index 446848b2d501..70e9ac24f034 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/ConcurrentTablespaceTest.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/ConcurrentTablespaceTest.java @@ -121,7 +121,7 @@ private Tablespace[] generateTestTablespaces() { private List setupConcurrentDdlDmlThreads(String ddlTemplate) { final int totalThreads = numDmlThreads + numDdlThreads; - final CyclicBarrier barrier = new CyclicBarrier(totalThreads); + final Phaser phaser = new Phaser(totalThreads); final List threads = new ArrayList<>(); // Add the DDL thread. @@ -131,14 +131,14 @@ private List setupConcurrentDdlDmlThreads(String ddlTemplate) { connections[i], ddlTemplate, errorsDetected, - barrier, + phaser, numStmtsPerThread, tablespaces)); } // Add the DML threads. for (int i = numDdlThreads; i < totalThreads; ++i) { - threads.add(new DMLRunner(connections[i], errorsDetected, barrier, numStmtsPerThread, i)); + threads.add(new DMLRunner(connections[i], errorsDetected, phaser, numStmtsPerThread, i)); } return threads; } @@ -223,7 +223,7 @@ public void testTableCreationFailure() throws Exception { YBClient client = miniCluster.getClient(); connections = setupConnections(); final int totalThreads = numDmlThreads + numDdlThreads; - final CyclicBarrier barrier = new CyclicBarrier(totalThreads); + final Phaser phaser = new Phaser(totalThreads); final List threads = new ArrayList<>(); AtomicBoolean invalidPlacementError = new AtomicBoolean(false); @@ -236,7 +236,7 @@ public void testTableCreationFailure() throws Exception { connections[0], "CREATE TABLE validplacementtable (a int) TABLESPACE %s", errorsDetected, - barrier, + phaser, 1, new Tablespace[] {valid_ts})); @@ -248,7 +248,7 @@ public void testTableCreationFailure() throws Exception { connections[1], "CREATE TABLE invalid_placementtable (a int) TABLESPACE %s", invalidPlacementError, - barrier, + phaser, 1, new Tablespace[] {invalid_ts})); @@ -268,19 +268,19 @@ public void testTableCreationFailure() throws Exception { public abstract class SQLRunner extends Thread { protected final Connection conn; protected final AtomicBoolean errorsDetected; - protected final CyclicBarrier barrier; + protected final Phaser phaser; protected final int numStmtsPerThread; protected int idx; // Only used by DMLRunner public SQLRunner( Connection conn, AtomicBoolean errorsDetected, - CyclicBarrier barrier, + Phaser phaser, int numStmtsPerThread, int idx) { this.conn = conn; this.errorsDetected = errorsDetected; - this.barrier = barrier; + this.phaser = phaser; this.numStmtsPerThread = numStmtsPerThread; this.idx = idx; // This field is not used in DDLRunner } @@ -289,16 +289,22 @@ public SQLRunner( public void run() { int item_idx = 0; while (item_idx < numStmtsPerThread && !errorsDetected.get()) { - try (Statement lstmt = conn.createStatement()) { - barrier.await(); - executeStatement(lstmt, item_idx); + try { + phaser.arriveAndAwaitAdvance(); + + try (Statement lstmt = conn.createStatement()) { + executeStatement(lstmt, item_idx); + } + item_idx++; } catch (PSQLException e) { handlePSQLException(e); - } catch (SQLException | InterruptedException | BrokenBarrierException e) { + } catch (SQLException e) { logAndSetError(e); } } + // Thread finished: shrink the party count. + phaser.arriveAndDeregister(); } protected abstract void executeStatement(Statement lstmt, int item_idx) throws SQLException; @@ -324,7 +330,7 @@ private void handlePSQLException(PSQLException e) { protected void logAndSetError(Exception e) { LOG.info("SQL thread: Unexpected error: ", e); errorsDetected.set(true); - barrier.reset(); + phaser.forceTermination(); } } @@ -337,10 +343,10 @@ public class DMLRunner extends SQLRunner { public DMLRunner( Connection conn, AtomicBoolean errorsDetected, - CyclicBarrier barrier, + Phaser phaser, int numStmtsPerThread, int idx) { - super(conn, errorsDetected, barrier, numStmtsPerThread, idx); + super(conn, errorsDetected, phaser, numStmtsPerThread, idx); } @Override @@ -372,10 +378,10 @@ public DDLRunner( Connection conn, String sql, AtomicBoolean errorsDetected, - CyclicBarrier barrier, + Phaser phaser, int numStmtsPerThread, Tablespace[] tablespaces) { - super(conn, errorsDetected, barrier, numStmtsPerThread, 0); // idx is not used here + super(conn, errorsDetected, phaser, numStmtsPerThread, 0); // idx is not used here this.sql = sql; this.tablespaces = tablespaces; } diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/ExplainAnalyzeUtils.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/ExplainAnalyzeUtils.java index 3d673b36fcdc..0138f07a6623 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/ExplainAnalyzeUtils.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/ExplainAnalyzeUtils.java @@ -35,6 +35,7 @@ public class ExplainAnalyzeUtils { public static final String NODE_HASH_JOIN = "Hash Join"; public static final String NODE_INDEX_ONLY_SCAN = "Index Only Scan"; public static final String NODE_INDEX_SCAN = "Index Scan"; + public static final String NODE_INDEX_SCAN_BACKWARD = "Index Scan Backward"; public static final String NODE_LIMIT = "Limit"; public static final String NODE_MERGE_JOIN = "Merge Join"; public static final String NODE_MODIFY_TABLE = "ModifyTable"; @@ -45,6 +46,7 @@ public class ExplainAnalyzeUtils { public static final String NODE_VALUES_SCAN = "Values Scan"; public static final String NODE_YB_BITMAP_TABLE_SCAN = "YB Bitmap Table Scan"; public static final String NODE_YB_BATCHED_NESTED_LOOP = "YB Batched Nested Loop"; + public static final String INDEX_SCAN_DIRECTION_BACKWARD = "Backward"; public static final String PLAN = "Plan"; @@ -74,6 +76,7 @@ public interface PlanCheckerBuilder extends ObjectCheckerBuilder { PlanCheckerBuilder alias(String value); PlanCheckerBuilder indexName(String value); PlanCheckerBuilder nodeType(String value); + PlanCheckerBuilder scanDirection(String value); PlanCheckerBuilder operation(String value); PlanCheckerBuilder planRows(ValueChecker checker); PlanCheckerBuilder plans(Checker... checker); @@ -112,7 +115,11 @@ public interface PlanCheckerBuilder extends ObjectCheckerBuilder { // Seek and Next Estimation PlanCheckerBuilder estimatedSeeks(ValueChecker checker); - PlanCheckerBuilder estimatedNexts(ValueChecker checker); + PlanCheckerBuilder estimatedNextsAndPrevs(ValueChecker checker); + + // Roundtrips Estimation + PlanCheckerBuilder estimatedTableRoundtrips(ValueChecker checker); + PlanCheckerBuilder estimatedIndexRoundtrips(ValueChecker checker); // Estimated Docdb Result Width PlanCheckerBuilder estimatedDocdbResultWidth(ValueChecker checker); diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPg15Regress.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPg15Regress.java deleted file mode 100644 index 06868481e542..000000000000 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPg15Regress.java +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) YugaByte, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software distributed under the License -// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -// or implied. See the License for the specific language governing permissions and limitations -// under the License. -// -package org.yb.pgsql; - -import java.util.Map; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.yb.util.BuildTypeUtil; -import org.yb.util.YBTestRunnerNonTsanOnly; - -// Runs the pg_regress test suite on YB code. -@RunWith(value = YBTestRunnerNonTsanOnly.class) -public class TestPg15Regress extends BasePgRegressTest { - @Override - public int getTestMethodTimeoutSec() { - return BuildTypeUtil.nonSanitizerVsSanitizer(2100, 2700); - } - - @Override - protected Map getTServerFlags() { - Map flagMap = super.getTServerFlags(); - flagMap.put("allowed_preview_flags_csv", "ysql_yb_enable_replication_commands"); - flagMap.put("ysql_yb_enable_replication_commands", "true"); - return flagMap; - } - - @Override - protected Map getMasterFlags() { - Map flagMap = super.getMasterFlags(); - flagMap.put("allowed_preview_flags_csv", "ysql_yb_enable_replication_commands"); - flagMap.put("ysql_yb_enable_replication_commands", "true"); - return flagMap; - } - - @Test - public void testPg15Regress() throws Exception { - runPgRegressTest("yb_pg15"); - } -} diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBackwardIndexScan.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBackwardIndexScan.java new file mode 100644 index 000000000000..d3c53448b4c7 --- /dev/null +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgBackwardIndexScan.java @@ -0,0 +1,214 @@ +package org.yb.pgsql; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yb.YBTestRunner; +import org.yb.pgsql.ExplainAnalyzeUtils.PlanCheckerBuilder; +import org.yb.pgsql.ExplainAnalyzeUtils.TopLevelCheckerBuilder; +import org.yb.util.json.Checkers; +import org.yb.util.json.JsonUtil; +import org.yb.util.json.ValueChecker; + +import static org.yb.pgsql.ExplainAnalyzeUtils.NODE_INDEX_ONLY_SCAN; +import static org.yb.pgsql.ExplainAnalyzeUtils.NODE_LIMIT; +import static org.yb.pgsql.ExplainAnalyzeUtils.INDEX_SCAN_DIRECTION_BACKWARD; +import static org.yb.pgsql.ExplainAnalyzeUtils.testExplainDebug; + +@RunWith(value=YBTestRunner.class) +public class TestPgBackwardIndexScan extends BasePgSQLTest { + + private static final double SEEK_FAULT_TOLERANCE_OFFSET = 1; + private static final double SEEK_FAULT_TOLERANCE_RATE = 0.2; + private static final double SEEK_LOWER_BOUND_FACTOR = 1 - SEEK_FAULT_TOLERANCE_RATE; + private static final double SEEK_UPPER_BOUND_FACTOR = 1 + SEEK_FAULT_TOLERANCE_RATE; + private static final double NEXT_FAULT_TOLERANCE_OFFSET = 2; + private static final double NEXT_FAULT_TOLERANCE_RATE = 0.5; + private static final double NEXT_LOWER_BOUND_FACTOR = 1 - NEXT_FAULT_TOLERANCE_RATE; + private static final double NEXT_UPPER_BOUND_FACTOR = 1 + NEXT_FAULT_TOLERANCE_RATE; + + private static final Logger LOG = + LoggerFactory.getLogger(TestPgBackwardIndexScan.class); + + private static TopLevelCheckerBuilder makeTopLevelBuilder() { + return JsonUtil.makeCheckerBuilder(TopLevelCheckerBuilder.class, false); + } + + private static PlanCheckerBuilder makePlanBuilder() { + return JsonUtil.makeCheckerBuilder(PlanCheckerBuilder.class, false); + } + + @Before + public void setUp() throws Exception { + try (Statement stmt = connection.createStatement()) { + stmt.execute("SET yb_enable_base_scans_cost_model = true"); + } + } + + @After + public void tearDown() throws Exception { + try (Statement stmt = connection.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS test"); + } + } + + @Override + protected Map getTServerFlags() { + Map flagMap = super.getTServerFlags(); + flagMap.put("use_fast_backward_scan", "false"); + flagMap.put("ysql_enable_packed_row_for_colocated_table", "true"); + return flagMap; + } + + @Override + protected Map getMasterFlags() { + Map flagMap = super.getMasterFlags(); + flagMap.put("use_fast_backward_scan", "false"); + flagMap.put("ysql_enable_packed_row_for_colocated_table", "true"); + return flagMap; + } + + private ValueChecker expectedSeeksRange(double expected_seeks) { + double expected_lower_bound = expected_seeks * SEEK_LOWER_BOUND_FACTOR + - SEEK_FAULT_TOLERANCE_OFFSET; + double expected_upper_bound = expected_seeks * SEEK_UPPER_BOUND_FACTOR + + SEEK_FAULT_TOLERANCE_OFFSET; + return Checkers.closed(expected_lower_bound, expected_upper_bound); + } + + private ValueChecker expectedNextsRange(double expected_nexts) { + double expected_lower_bound = expected_nexts * NEXT_LOWER_BOUND_FACTOR + - NEXT_FAULT_TOLERANCE_OFFSET; + double expected_upper_bound = expected_nexts * NEXT_UPPER_BOUND_FACTOR + + NEXT_FAULT_TOLERANCE_OFFSET; + return Checkers.closed(expected_lower_bound, expected_upper_bound); + } + + private void testSeekAndNextEstimationIndexOnlyScanBackwardHelper( + Statement stmt, String query, + String table_name, String index_name, + double expected_seeks, + double expected_nexts) throws Exception { + try { + testExplainDebug(stmt, + String.format("/*+ Set(enable_sort off) IndexOnlyScan(%s %s) */ %s", + table_name, index_name, query), + makeTopLevelBuilder() + .plan(makePlanBuilder() + .nodeType(NODE_INDEX_ONLY_SCAN) + .scanDirection(INDEX_SCAN_DIRECTION_BACKWARD) + .relationName(table_name) + .indexName(index_name) + .estimatedSeeks(expectedSeeksRange(expected_seeks)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .build()) + .build()); + } + catch (AssertionError e) { + LOG.info("Failed Query: " + query); + LOG.info(e.toString()); + throw e; + } + } + + private void testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper( + Statement stmt, String query, + String table_name, String index_name, + double expected_seeks, + double expected_nexts) throws Exception { + try { + testExplainDebug(stmt, + String.format("/*+ IndexOnlyScan(%s %s) */ %s", table_name, index_name, query), + makeTopLevelBuilder() + .plan(makePlanBuilder() + .nodeType(NODE_LIMIT) + .plans(makePlanBuilder() + .nodeType(NODE_INDEX_ONLY_SCAN) + .scanDirection(INDEX_SCAN_DIRECTION_BACKWARD) + .relationName(table_name) + .indexName(index_name) + .estimatedSeeks(expectedSeeksRange(expected_seeks)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .build()) + .build()) + .build()); + } + catch (AssertionError e) { + LOG.info("Failed Query: " + query); + LOG.info(e.toString()); + throw e; + } + } + + @Test + public void testSeekNextEstimationIndexScan() throws Exception { + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + boolean isConnMgr = isTestRunningWithConnectionManager(); + if (isConnMgr) { + setUp(); + } + + try (Statement stmt = connection.createStatement()) { + stmt.execute("CREATE TABLE t1 (k1 INT, k2 INT, v1 INT)"); + stmt.execute("INSERT INTO t1 SELECT s1, s2, s2 FROM " + + "generate_series(1, 300) s1, generate_series(1, 300) s2"); + + stmt.execute("CREATE INDEX t1_idx_1 ON t1 (k1 ASC, k2 ASC)"); + stmt.execute("ANALYZE t1"); + + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 DESC LIMIT 5000", + "t1", "t1_idx_1", 180000, 180000); + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 DESC LIMIT 10000", + "t1", "t1_idx_1", 180000, 180000); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 < 10 ORDER BY k1 DESC", + "t1", "t1_idx_1", 4800, 4800); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 > 290 ORDER BY k1 DESC", + "t1", "t1_idx_1", 6419, 6419); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 < 10 ORDER BY k1 DESC", + "t1", "t1_idx_1", 4805, 5251); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 > 290 ORDER BY k1 DESC", + "t1", "t1_idx_1", 6773, 6835); + + stmt.execute("DROP INDEX t1_idx_1"); + + stmt.execute("CREATE INDEX t1_idx_2 ON t1 (k1 DESC, k2 ASC)"); + stmt.execute("ANALYZE t1"); + + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 ASC LIMIT 5000", + "t1", "t1_idx_2", 180000, 180000); + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 ASC LIMIT 10000", + "t1", "t1_idx_2", 180000, 180000); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 < 10 ORDER BY k1 ASC", + "t1", "t1_idx_2", 4800, 4800); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 > 290 ORDER BY k1 ASC", + "t1", "t1_idx_2", 6419, 6419); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 < 10 ORDER BY k1 ASC", + "t1", "t1_idx_2", 4805, 5251); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 > 290 ORDER BY k1 ASC", + "t1", "t1_idx_2", 6773, 6835); + } + } +} diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java index 718cb95e2036..46b85e0282fc 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgCostModelSeekNextEstimation.java @@ -62,6 +62,10 @@ public class TestPgCostModelSeekNextEstimation extends BasePgSQLTest { private static final String METRIC_NUM_DB_SEEK = "rocksdb_number_db_seek"; private static final String METRIC_NUM_DB_NEXT = "rocksdb_number_db_next"; + private static final String T5_NAME = "t5"; + private static final String T5_K1_INDEX_NAME = "t5_k1_idx"; + private static final String T5_K2_INDEX_NAME = "t5_k2_idx"; + private Connection connection2; private static TopLevelCheckerBuilder makeTopLevelBuilder() { @@ -88,14 +92,36 @@ private ValueChecker expectedNextsRange(double expected_nexts) { return Checkers.closed(expected_lower_bound, expected_upper_bound); } + // If expected_roundtrips==0, then the checker will verify the actual value + // is exactly 0. If non-zero, then the actual value should be in the range 1 to + // max Double (so the checker is only verifying the value is non-zero). + private ValueChecker expectedRoundtripsRange(double expected_roundtrips) { + + ValueChecker checker; + + if (expected_roundtrips == 0) + { + checker = Checkers.closed(0.0d, 0.0d); + } + else + { + checker = Checkers.closed(1.0, Integer.MAX_VALUE); + } + + return checker; + } + private void testSeekAndNextEstimationIndexScanHelper( Statement stmt, String query, String table_name, String index_name, double expected_seeks, double expected_nexts, + double expected_index_roundtrips, + double expected_table_roundtrips, Integer expected_docdb_result_width) throws Exception { testSeekAndNextEstimationIndexScanHelper(stmt, query, table_name, index_name, - NODE_INDEX_SCAN, expected_seeks, expected_nexts, expected_docdb_result_width); + NODE_INDEX_SCAN, expected_seeks, expected_nexts, expected_index_roundtrips, + expected_table_roundtrips,expected_docdb_result_width); } private void testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults( @@ -103,9 +129,12 @@ private void testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults( String table_name, String index_name, double expected_seeks, double expected_nexts, + double expected_index_roundtrips, + double expected_table_roundtrips, Integer expected_docdb_result_width) throws Exception { testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, query, table_name, index_name, - NODE_INDEX_SCAN, expected_seeks, expected_nexts, expected_docdb_result_width); + NODE_INDEX_SCAN, expected_seeks, expected_nexts, expected_index_roundtrips, + expected_table_roundtrips, expected_docdb_result_width); } private void testSeekAndNextEstimationIndexOnlyScanHelper( @@ -113,9 +142,12 @@ private void testSeekAndNextEstimationIndexOnlyScanHelper( String table_name, String index_name, double expected_seeks, double expected_nexts, + double expected_index_roundtrips, + double expected_table_roundtrips, Integer expected_docdb_result_width) throws Exception { testSeekAndNextEstimationIndexScanHelper(stmt, query, table_name, index_name, - NODE_INDEX_ONLY_SCAN, expected_seeks, expected_nexts, expected_docdb_result_width); + NODE_INDEX_ONLY_SCAN, expected_seeks, expected_nexts, expected_index_roundtrips, + expected_table_roundtrips,expected_docdb_result_width); } private void testSeekAndNextEstimationIndexOnlyScanHelper_IgnoreActualResults( @@ -123,9 +155,12 @@ private void testSeekAndNextEstimationIndexOnlyScanHelper_IgnoreActualResults( String table_name, String index_name, double expected_seeks, double expected_nexts, + double expected_index_roundtrips, + double expected_table_roundtrips, Integer expected_docdb_result_width) throws Exception { testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, query, table_name, index_name, - NODE_INDEX_ONLY_SCAN, expected_seeks, expected_nexts, expected_docdb_result_width); + NODE_INDEX_ONLY_SCAN, expected_seeks, expected_nexts, expected_index_roundtrips, + expected_table_roundtrips,expected_docdb_result_width); } private void testSeekAndNextEstimationIndexScanHelper( @@ -134,6 +169,8 @@ private void testSeekAndNextEstimationIndexScanHelper( String node_type, double expected_seeks, double expected_nexts, + double expected_index_roundtrips, + double expected_table_roundtrips, Integer expected_docdb_result_width) throws Exception { try { testExplainDebug(stmt, query, @@ -143,7 +180,9 @@ private void testSeekAndNextEstimationIndexScanHelper( .relationName(table_name) .indexName(index_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .estimatedIndexRoundtrips(expectedRoundtripsRange(expected_index_roundtrips)) + .estimatedTableRoundtrips(expectedRoundtripsRange(expected_table_roundtrips)) .estimatedDocdbResultWidth(Checkers.equal(expected_docdb_result_width)) .metric(METRIC_NUM_DB_SEEK, expectedSeeksRange(expected_seeks)) .metric(METRIC_NUM_DB_NEXT, expectedNextsRange(expected_nexts)) @@ -165,7 +204,7 @@ private ObjectChecker makeBitmapIndexScanChecker( .nodeType(NODE_BITMAP_INDEX_SCAN) .indexName(index_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) .metric(METRIC_NUM_DB_SEEK, expectedSeeksRange(expected_seeks)) .metric(METRIC_NUM_DB_NEXT, expectedNextsRange(expected_nexts)) .build(); @@ -179,7 +218,7 @@ private ObjectChecker makeBitmapIndexScanChecker_IgnoreActualResults( .nodeType(NODE_BITMAP_INDEX_SCAN) .indexName(index_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) .build(); } @@ -197,7 +236,7 @@ private void testSeekAndNextEstimationBitmapScanHelper( .nodeType(NODE_YB_BITMAP_TABLE_SCAN) .relationName(table_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) .estimatedDocdbResultWidth(Checkers.equal(expected_docdb_result_width)) .metric(METRIC_NUM_DB_SEEK, expectedSeeksRange(expected_seeks)) .metric(METRIC_NUM_DB_NEXT, expectedNextsRange(expected_nexts)) @@ -226,7 +265,7 @@ private void testSeekAndNextEstimationBitmapScanHelper_IgnoreActualResults( .nodeType(NODE_YB_BITMAP_TABLE_SCAN) .relationName(table_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) .estimatedDocdbResultWidth(Checkers.equal(expected_docdb_result_width)) .plans(bitmap_index_checker) .build()) @@ -245,6 +284,8 @@ private void testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults( String node_type, double expected_seeks, double expected_nexts, + double expected_index_roundtrips, + double expected_table_roundtrips, Integer expected_docdb_result_width) throws Exception { try { testExplainDebug(stmt, query, @@ -254,7 +295,9 @@ private void testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults( .relationName(table_name) .indexName(index_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .estimatedIndexRoundtrips(expectedRoundtripsRange(expected_index_roundtrips)) + .estimatedTableRoundtrips(expectedRoundtripsRange(expected_table_roundtrips)) .estimatedDocdbResultWidth(Checkers.equal(expected_docdb_result_width)) .build()) .build()); @@ -270,6 +313,7 @@ private void testSeekAndNextEstimationSeqScanHelper( Statement stmt, String query, String table_name, double expected_seeks, double expected_nexts, + double expected_table_roundtrips, long expected_docdb_result_width) throws Exception { try { testExplainDebug(stmt, query, @@ -278,7 +322,9 @@ private void testSeekAndNextEstimationSeqScanHelper( .nodeType(NODE_SEQ_SCAN) .relationName(table_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .estimatedIndexRoundtrips(expectedRoundtripsRange(0)) + .estimatedTableRoundtrips(expectedRoundtripsRange(expected_table_roundtrips)) .estimatedDocdbResultWidth(Checkers.equal(expected_docdb_result_width)) .metric(METRIC_NUM_DB_SEEK, expectedSeeksRange(expected_seeks)) .metric(METRIC_NUM_DB_NEXT, expectedNextsRange(expected_nexts)) @@ -296,6 +342,7 @@ private void testSeekAndNextEstimationSeqScanHelper_IgnoreActualResults( Statement stmt, String query, String table_name, double expected_seeks, double expected_nexts, + double expected_table_roundtrips, long expected_docdb_result_width) throws Exception { try { testExplainDebug(stmt, query, @@ -304,7 +351,8 @@ private void testSeekAndNextEstimationSeqScanHelper_IgnoreActualResults( .nodeType(NODE_SEQ_SCAN) .relationName(table_name) .estimatedSeeks(expectedSeeksRange(expected_seeks)) - .estimatedNexts(expectedNextsRange(expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .estimatedTableRoundtrips(expectedRoundtripsRange(expected_table_roundtrips)) .estimatedDocdbResultWidth(Checkers.equal(expected_docdb_result_width)) .build()) .build()); @@ -333,13 +381,13 @@ private void testSeekAndNextEstimationJoinHelper_IgnoreActualResults( .relationName(outer_table_name) .nodeType(outer_table_scan_type) .estimatedSeeks(expectedSeeksRange(outer_expected_seeks)) - .estimatedNexts(expectedNextsRange(outer_expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(outer_expected_nexts)) .build(), makePlanBuilder() .relationName(inner_table_name) .nodeType(inner_table_scan_type) .estimatedSeeks(expectedSeeksRange(inner_expected_seeks)) - .estimatedNexts(expectedNextsRange(inner_expected_nexts)) + .estimatedNextsAndPrevs(expectedNextsRange(inner_expected_nexts)) .build()) .build()) .build()); @@ -401,8 +449,19 @@ public void setUp() throws Exception { + "generate_series(1, 20) s4", T4_NO_PKEY_NAME)); stmt.execute(String.format("CREATE STATISTICS %s_stx ON k1, k2, k3, k4 FROM %s", T4_NO_PKEY_NAME, T4_NO_PKEY_NAME)); - stmt.execute(String.format("ANALYZE %s, %s, %s, %s, %s, %s;", - T1_NAME, T2_NAME, T3_NAME, T4_NAME, T4_NO_PKEY_NAME, T2_NO_PKEY_NAME)); + // Create a non-colocated table. + stmt.execute(String.format("CREATE TABLE %s (k1 INT, k2 INT) " + + "WITH (colocated = false)", T5_NAME)); + stmt.execute(String.format("CREATE INDEX %s on %s (k1 ASC)", + T5_K1_INDEX_NAME, T5_NAME)); + stmt.execute(String.format("CREATE INDEX %s on %s (k2 ASC)", + T5_K2_INDEX_NAME, T5_NAME)); + stmt.execute(String.format("INSERT INTO %s SELECT k1, k2 FROM %s", + T5_NAME, T2_NO_PKEY_NAME)); + stmt.execute(String.format("CREATE STATISTICS %s_stx ON k1, k2 FROM %s", + T5_NAME, T5_NAME)); + stmt.execute(String.format("ANALYZE %s, %s, %s, %s, %s, %s, %s;", + T1_NAME, T2_NAME, T3_NAME, T4_NAME, T4_NO_PKEY_NAME, T2_NO_PKEY_NAME, T5_NAME)); stmt.execute("SET yb_enable_optimizer_statistics = true"); stmt.execute("SET yb_enable_base_scans_cost_model = true"); stmt.execute("SET yb_bnl_batch_size = 1024"); @@ -461,175 +520,180 @@ public void testSeekNextEstimationIndexScan() throws Exception { } testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8)", T1_NAME, T1_NAME), - T1_NAME, T1_INDEX_NAME, 2, 4, 5); + T1_NAME, T1_INDEX_NAME, 2, 4, 1, 0, 5); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12)", T1_NAME, T1_NAME), - T1_NAME, T1_INDEX_NAME, 3, 7, 5); + T1_NAME, T1_INDEX_NAME, 3, 7, 1, 0, 5); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16)", T1_NAME, T1_NAME), - T1_NAME, T1_INDEX_NAME, 4, 10, 5); + T1_NAME, T1_INDEX_NAME, 4, 10, 1, 0, 5); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 4, 86, 10); + T2_NAME, T2_INDEX_NAME, 4, 86, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 101, 280, 10); + T2_NAME, T2_INDEX_NAME, 101, 280, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12) AND k4 IN (4, 8, 12, 16)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 6007, 16808, 20); + T4_NAME, T4_INDEX_NAME, 6007, 16808, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k2 IN (4, 8, 12, 16) AND k4 IN (4, 8, 12)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 6505, 17804, 20); + T4_NAME, T4_INDEX_NAME, 6505, 17804, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16) AND k2 IN (4, 8, 12, 16)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 22, 6440, 20); + T4_NAME, T4_INDEX_NAME, 22, 6440, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T1_NAME, T1_NAME), - T1_NAME, T1_INDEX_NAME, 1, 10, 5); + T1_NAME, T1_INDEX_NAME, 1, 10, 1, 0, 5); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 1, 200, 10); + T2_NAME, T2_INDEX_NAME, 1, 200, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T3_NAME, T3_NAME), - T3_NAME, T3_INDEX_NAME, 5, 4000, 15); + T3_NAME, T3_INDEX_NAME, 5, 4000, 1, 0, 15); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 79, 80000, 20); + T4_NAME, T4_INDEX_NAME, 79, 80000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k2 >= 4 and k2 < 14", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 41, 280, 10); + T2_NAME, T2_INDEX_NAME, 41, 280, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k3 >= 4 and k3 < 14", T3_NAME, T3_NAME), - T3_NAME, T3_INDEX_NAME, 804, 5600, 15); + T3_NAME, T3_INDEX_NAME, 804, 5600, 1, 0, 15); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k4 >= 4 and k4 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 16079, 112000, 20); + T4_NAME, T4_INDEX_NAME, 16079, 112000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k3 >= 4 and k3 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 879, 81600, 20); + T4_NAME, T4_INDEX_NAME, 879, 81600, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k2 >= 4 and k2 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 120, 80000, 20); + T4_NAME, T4_INDEX_NAME, 120, 80000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14 and k3 >= 4 and k3 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 440, 40800, 20); + T4_NAME, T4_INDEX_NAME, 440, 40800, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 = 4 and k2 IN (4, 8, 12, 16)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 5, 1606, 20); + T4_NAME, T4_INDEX_NAME, 5, 1606, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16) and k2 = 4", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 5, 1606, 20); + T4_NAME, T4_INDEX_NAME, 5, 1606, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k3 IN (4, 8, 12, 16) and k4 = 4", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 2002, 4000, 20); + T4_NAME, T4_INDEX_NAME, 2002, 4000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 5 and k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 5, 8, 10); + T2_NAME, T2_INDEX_NAME, 5, 8, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 6 and k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 10, 18, 10); + T2_NAME, T2_INDEX_NAME, 10, 18, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14 and k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, T2_INDEX_NAME, 50, 98, 10); + T2_NAME, T2_INDEX_NAME, 50, 98, 1, 0, 10); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 7 and k3 IN (4, 8, 12, 16)", T3_NAME, T3_NAME), - T3_NAME, T3_INDEX_NAME, 301, 600, 15); + T3_NAME, T3_INDEX_NAME, 301, 600, 1, 0, 15); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k2 >= 4 and k2 < 7 and k4 IN (4, 8, 12)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 4844, 9680, 20); + T4_NAME, T4_INDEX_NAME, 4844, 9680, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (1, 4, 7, 10)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 35, 32037, 20); + T4_NAME, T4_INDEX_NAME, 35, 32037, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k2 >= 4", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 129, 115744, 20); + T4_NAME, T4_INDEX_NAME, 129, 115744, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k1 < 14 AND k2 >= 4", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 76, 68084, 20); + T4_NAME, T4_INDEX_NAME, 76, 68084, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k1 < 14 AND k2 >= 4 AND k2 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 59, 40077, 20); + T4_NAME, T4_INDEX_NAME, 59, 40077, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k2 >= 4 AND k2 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 100, 68132, 20); + T4_NAME, T4_INDEX_NAME, 100, 68132, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (1, 4, 7, 10) AND k2 IN (1, 4, 7, 10)", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 22, 6436, 20); + T4_NAME, T4_INDEX_NAME, 22, 6436, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k3 >= 4", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 453, 116392, 20); + T4_NAME, T4_INDEX_NAME, 453, 116392, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k1 < 14 AND k3 >= 4 AND k3 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 440, 40839, 20); + T4_NAME, T4_INDEX_NAME, 440, 40839, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, String.format("/*+IndexScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k3 >= 4 AND k3 < 14", T4_NAME, T4_NAME), - T4_NAME, T4_INDEX_NAME, 747, 69426, 20); + T4_NAME, T4_INDEX_NAME, 747, 69426, 1, 0, 20); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 >= 4 AND k3 >= 4", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 452, 116394, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 452, 116394, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 >= 4 AND k3 = 4", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 686, 8168, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 686, 8168, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 >= 4 AND k3 IN (4, 8, 12)", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 1379, 23141, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 1379, 23141, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 = 4 AND k3 IN (4, 8, 12)", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 81, 1363, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 81, 1363, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 IN (4, 8, 12) AND k3 IN (4, 8, 12)", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 246, 4091, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 246, 4091, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 IN (4, 8, 12) AND k3 >= 4", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 82, 20547, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 82, 20547, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format("/*+IndexOnlyScan(%s %s)*/ SELECT k2 FROM %s " + "WHERE k1 IN (4, 8, 12) AND k3 = 4", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 124, 1389, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 124, 1389, 1, 0, 5); testSeekAndNextEstimationIndexOnlyScanHelper(stmt, String.format(" SELECT k2 FROM %s " + "WHERE k1 = 4 AND k3 = 4", T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, T4_NO_PKEY_NAME), - T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 40, 482, 5); + T4_NO_PKEY_NAME, T4_NO_PKEY_SINDEX_3_NAME, 40, 482, 1, 0, 5); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s WHERE k1 = 4", T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, T2_NO_PKEY_NAME), - T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, 21, 20, 10); + T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, 21, 20, 0, 0, 10); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s WHERE k1 >= 4", T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, T2_NO_PKEY_NAME), - T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, 341, 340, 10); + T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, 341, 340, 0, 0, 10); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s WHERE k1 IN (4, 8, 12)", T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, T2_NO_PKEY_NAME), - T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, 63, 63, 10); + T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K1_NAME, 63, 63, 0, 0, 10); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s WHERE k2 = 4", T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, T2_NO_PKEY_NAME), - T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, 21, 20, 10); + T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, 21, 20, 0, 0, 10); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s WHERE k2 >= 4", T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, T2_NO_PKEY_NAME), - T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, 341, 340, 10); + T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, 341, 340, 0, 0, 10); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s WHERE k2 IN (4, 8, 12)", T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, T2_NO_PKEY_NAME), - T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, 63, 63, 10); + T2_NO_PKEY_NAME, T2_NO_PKEY_SINDEX_K2_NAME, 63, 63, 0, 0, 10); + // Try a non-colocated table with a secondary index. + testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, + String.format("/*+IndexScan(%s %s)*/ SELECT * FROM %s " + + "WHERE k1 < 10 /* t5 query 1 */", T5_NAME, T5_K1_INDEX_NAME, T5_NAME), + T5_NAME, T5_K1_INDEX_NAME, 93, 450, 1, 1, 10); } } @@ -902,7 +966,7 @@ public void testSeekNextEstimationBitmapScanExceedingWorkMem() throws Exception final String query = "/*+ %s(t) */ SELECT * FROM %s AS t WHERE %s >= 4 AND %s >= 4"; testSeekAndNextEstimationSeqScanHelper(stmt, String.format(query, "SeqScan", T4_NAME, "k1", "k2"), - T4_NAME, estimated_seeks, estimated_nexts, 20); + T4_NAME, estimated_seeks, estimated_nexts, 1, 20); testSeekAndNextEstimationBitmapScanHelper(stmt, String.format(query, "BitmapScan", T4_NAME, "k1", "k2"), T4_NAME, estimated_seeks, estimated_nexts, 20, @@ -910,7 +974,7 @@ public void testSeekNextEstimationBitmapScanExceedingWorkMem() throws Exception testSeekAndNextEstimationSeqScanHelper(stmt, String.format(query, "SeqScan", T4_NAME, "k1", "k3"), - T4_NAME, estimated_seeks, estimated_nexts, 20); + T4_NAME, estimated_seeks, estimated_nexts, 1, 20); testSeekAndNextEstimationBitmapScanHelper(stmt, String.format(query, "BitmapScan", T4_NAME, "k1", "k3"), T4_NAME, estimated_seeks, estimated_nexts, 20, @@ -918,7 +982,7 @@ public void testSeekNextEstimationBitmapScanExceedingWorkMem() throws Exception testSeekAndNextEstimationSeqScanHelper(stmt, String.format(query, "SeqScan", T4_NAME, "k1", "k4"), - T4_NAME, estimated_seeks, estimated_nexts, 20); + T4_NAME, estimated_seeks, estimated_nexts, 1, 20); testSeekAndNextEstimationBitmapScanHelper(stmt, String.format(query, "BitmapScan", T4_NAME, "k1", "k4"), T4_NAME, estimated_seeks, estimated_nexts, 20, @@ -932,109 +996,109 @@ public void testSeekNextEstimationSeqScan() throws Exception { stmt.execute(String.format("SET enable_indexscan=off")); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8)", T1_NAME, T1_NAME), - T1_NAME, 1, 19, 5); + T1_NAME, 1, 19, 1, 5); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12)", T1_NAME, T1_NAME), - T1_NAME, 1, 19, 5); + T1_NAME, 1, 19, 1, 5); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16)", T1_NAME, T1_NAME), - T1_NAME, 1, 19, 5); + T1_NAME, 1, 19, 1, 5); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12) AND k4 IN (4, 8, 12, 16)", T4_NAME, T4_NAME), - T4_NAME, 5, 160003, 20); + T4_NAME, 5, 160003, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k2 IN (4, 8, 12, 16) AND k4 IN (4, 8, 12)", T4_NAME, T4_NAME), - T4_NAME, 5, 160003, 20); + T4_NAME, 5, 160003, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16) AND k2 IN (4, 8, 12, 16)", T4_NAME, T4_NAME), - T4_NAME, 7, 160005, 20); + T4_NAME, 7, 160005, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T1_NAME, T1_NAME), - T1_NAME, 1, 19, 5); + T1_NAME, 1, 19, 1, 5); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T3_NAME, T3_NAME), - T3_NAME, 4, 8002, 15); + T3_NAME, 4, 8002, 1, 15); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14", T4_NAME, T4_NAME), - T4_NAME, 79, 160077, 20); + T4_NAME, 79, 160077, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k2 >= 4 and k2 < 14", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k3 >= 4 and k3 < 14", T3_NAME, T3_NAME), - T3_NAME, 4, 8002, 15); + T3_NAME, 4, 8002, 1, 15); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k4 >= 4 and k4 < 14", T4_NAME, T4_NAME), - T4_NAME, 79, 160077, 20); + T4_NAME, 79, 160077, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k3 >= 4 and k3 < 14", T4_NAME, T4_NAME), - T4_NAME, 79, 160077, 20); + T4_NAME, 79, 160077, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k2 >= 4 and k2 < 14", T4_NAME, T4_NAME), - T4_NAME, 79, 160077, 20); + T4_NAME, 79, 160077, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14 and k3 >= 4 and k3 < 14", T4_NAME, T4_NAME), - T4_NAME, 40, 160038, 20); + T4_NAME, 40, 160038, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 = 4 and k2 IN (4, 8, 12, 16)", T4_NAME, T4_NAME), - T4_NAME, 2, 160000, 20); + T4_NAME, 2, 160000, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (4, 8, 12, 16) and k2 = 4", T4_NAME, T4_NAME), - T4_NAME, 2, 160000, 20); + T4_NAME, 2, 160000, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k3 IN (4, 8, 12, 16) and k4 = 4", T4_NAME, T4_NAME), - T4_NAME, 2, 160000, 20); + T4_NAME, 2, 160000, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 5 and k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 6 and k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 14 and k2 IN (4, 8, 12, 16)", T2_NAME, T2_NAME), - T2_NAME, 1, 399, 10); + T2_NAME, 1, 399, 1, 10); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 and k1 < 7 and k3 IN (4, 8, 12, 16)", T3_NAME, T3_NAME), - T3_NAME, 2, 8000, 15); + T3_NAME, 2, 8000, 1, 15); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k2 >= 4 and k2 < 7 and k4 IN (4, 8, 12)", T4_NAME, T4_NAME), - T4_NAME, 4, 160002, 20); + T4_NAME, 4, 160002, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (1, 4, 7, 10)", T4_NAME, T4_NAME), - T4_NAME, 32, 160029, 20); + T4_NAME, 32, 160029, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k2 >= 4", T4_NAME, T4_NAME), - T4_NAME, 114, 160112, 20); + T4_NAME, 114, 160112, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k1 < 14 AND k2 >= 4", T4_NAME, T4_NAME), - T4_NAME, 67, 160065, 20); + T4_NAME, 67, 160065, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k1 < 14 AND k2 >= 4 AND k2 < 14", T4_NAME, T4_NAME), - T4_NAME, 40, 160038, 20); + T4_NAME, 40, 160038, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k2 >= 4 AND k2 < 14", T4_NAME, T4_NAME), - T4_NAME, 67, 160065, 20); + T4_NAME, 67, 160065, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 IN (1, 4, 7, 10) AND k2 IN (1, 4, 7, 10)", T4_NAME, T4_NAME), - T4_NAME, 5, 160003, 20); + T4_NAME, 5, 160003, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k3 >= 4", T4_NAME, T4_NAME), - T4_NAME, 114, 160112, 20); + T4_NAME, 114, 160112, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k1 < 14 AND k3 >= 4 AND k3 < 14", T4_NAME, T4_NAME), - T4_NAME, 40, 160038, 20); + T4_NAME, 40, 160038, 1, 20); testSeekAndNextEstimationSeqScanHelper(stmt, String.format("/*+SeqScan(%s)*/ SELECT * " + "FROM %s WHERE k1 >= 4 AND k3 >= 4 AND k3 < 14", T4_NAME, T4_NAME), - T4_NAME, 67, 160065, 20); + T4_NAME, 67, 160065, 1, 20); } } @@ -1052,14 +1116,14 @@ public void testSeekNextEstimationStorageIndexFilters() throws Exception { */ testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+IndexScan(test test_index_k1) */ SELECT * FROM test WHERE k1 > 50000 and v1 > 80000", - "test", "test_index_k1", 50000, 50000, 10); + "test", "test_index_k1", 50000, 50000, 0, 0, 10); /* The filter on v1 will be executed on the included column in test_index_k1_v1. As a result, * fewer seeks will be needed on the base table. */ testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+IndexScan(test test_index_k1_v1) */ SELECT * FROM test WHERE k1 > 50000 and v1 > 80000", - "test", "test_index_k1_v1", 10000, 50000, 10); + "test", "test_index_k1_v1", 10000, 50000, 0, 0, 10); } } @@ -1084,27 +1148,27 @@ public void testSeekNextEstimationSeekForwardOptimization() throws Exception { */ testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+IndexScan(t4)*/ SELECT * FROM t4 WHERE k2 IN (4, 5, 6, 7)", - T4_NAME, T4_INDEX_NAME, 132, 32200, 20); + T4_NAME, T4_INDEX_NAME, 132, 32200, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, "/*+IndexScan(t4)*/ SELECT * FROM t4 WHERE k2 IN (4, 6, 8, 10)", - T4_NAME, T4_INDEX_NAME, 132, 32200, 20); + T4_NAME, T4_INDEX_NAME, 132, 32200, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+IndexScan(t4)*/ SELECT * FROM t4 WHERE k4 IN (4, 5, 6, 7)", - T4_NAME, T4_INDEX_NAME, 40031, 80000, 20); + T4_NAME, T4_INDEX_NAME, 40031, 80000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+IndexScan(t4)*/ SELECT * FROM t4 WHERE k4 IN (4, 6, 8, 10)", - T4_NAME, T4_INDEX_NAME, 40031, 80000, 20); + T4_NAME, T4_INDEX_NAME, 40031, 80000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+IndexScan(t4)*/ SELECT * FROM t4 WHERE k4 IN (4, 7, 10, 13)", - T4_NAME, T4_INDEX_NAME, 40031, 80000, 20); + T4_NAME, T4_INDEX_NAME, 40031, 80000, 1, 0, 20); testSeekAndNextEstimationIndexScanHelper(stmt, "/*+IndexScan(t4)*/ SELECT * FROM t4 WHERE k4 IN (4, 8, 12, 16)", - T4_NAME, T4_INDEX_NAME, 40031, 80000, 20); + T4_NAME, T4_INDEX_NAME, 40031, 80000, 1, 0, 20); } } @@ -1130,16 +1194,16 @@ public void testSeekNextEstimation25862IntegerOverflow() throws Exception { testSeekAndNextEstimationSeqScanHelper_IgnoreActualResults(stmt, "/*+ SeqScan(t_25862) */ SELECT * FROM t_25862 WHERE k1 > 0", - "t_25862", 1302084.0, 4001302082.0, 2); + "t_25862", 1302084.0, 4001302082.0, 1, 2); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+ IndexScan(t_25862 t_25862_pkey) */ SELECT * FROM t_25862 WHERE k1 > 0", - "t_25862", "t_25862_pkey", 1302084.0, 1333333334.0, 2); + "t_25862", "t_25862_pkey", 1302084.0, 1333333334.0, 1, 0, 2); testSeekAndNextEstimationIndexScanHelper_IgnoreActualResults(stmt, "/*+ IndexScan(t_25862 t_25862_idx) */ SELECT * FROM t_25862 WHERE v1 > 0", - "t_25862", "t_25862_idx", 1334635417.0, 1333333334.0, 2); + "t_25862", "t_25862_idx", 1334635417.0, 1333333334.0, 0, 0, 2); testSeekAndNextEstimationIndexOnlyScanHelper_IgnoreActualResults(stmt, "/*+ IndexOnlyScan(t_25862 t_25862_idx) */ SELECT v1 FROM t_25862 WHERE v1 > 0", - "t_25862", "t_25862_idx", 1302084.0, 1333333334.0, 1); + "t_25862", "t_25862_idx", 1302084.0, 1333333334.0, 1, 0, 1); } } diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgFastBackwardIndexScan.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgFastBackwardIndexScan.java new file mode 100644 index 000000000000..13ff5511f257 --- /dev/null +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgFastBackwardIndexScan.java @@ -0,0 +1,216 @@ +package org.yb.pgsql; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yb.YBTestRunner; +import org.yb.pgsql.ExplainAnalyzeUtils.PlanCheckerBuilder; +import org.yb.pgsql.ExplainAnalyzeUtils.TopLevelCheckerBuilder; +import org.yb.util.json.Checkers; +import org.yb.util.json.JsonUtil; +import org.yb.util.json.ValueChecker; + +import static org.yb.pgsql.ExplainAnalyzeUtils.NODE_INDEX_ONLY_SCAN; +import static org.yb.pgsql.ExplainAnalyzeUtils.NODE_LIMIT; +import static org.yb.pgsql.ExplainAnalyzeUtils.INDEX_SCAN_DIRECTION_BACKWARD; +import static org.yb.pgsql.ExplainAnalyzeUtils.testExplainDebug; + +@RunWith(value=YBTestRunner.class) +public class TestPgFastBackwardIndexScan extends BasePgSQLTest { + + private static final double SEEK_FAULT_TOLERANCE_OFFSET = 1; + private static final double SEEK_FAULT_TOLERANCE_RATE = 0.2; + private static final double SEEK_LOWER_BOUND_FACTOR = 1 - SEEK_FAULT_TOLERANCE_RATE; + private static final double SEEK_UPPER_BOUND_FACTOR = 1 + SEEK_FAULT_TOLERANCE_RATE; + private static final double NEXT_FAULT_TOLERANCE_OFFSET = 2; + private static final double NEXT_FAULT_TOLERANCE_RATE = 0.5; + private static final double NEXT_LOWER_BOUND_FACTOR = 1 - NEXT_FAULT_TOLERANCE_RATE; + private static final double NEXT_UPPER_BOUND_FACTOR = 1 + NEXT_FAULT_TOLERANCE_RATE; + + private static final Logger LOG = + LoggerFactory.getLogger(TestPgFastBackwardIndexScan.class); + + private static TopLevelCheckerBuilder makeTopLevelBuilder() { + return JsonUtil.makeCheckerBuilder(TopLevelCheckerBuilder.class, false); + } + + private static PlanCheckerBuilder makePlanBuilder() { + return JsonUtil.makeCheckerBuilder(PlanCheckerBuilder.class, false); + } + + @Before + public void setUp() throws Exception { + try (Statement stmt = connection.createStatement()) { + stmt.execute("SET yb_enable_base_scans_cost_model = true"); + } + } + + @After + public void tearDown() throws Exception { + try (Statement stmt = connection.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS test"); + } + } + + @Override + protected Map getTServerFlags() { + Map flagMap = super.getTServerFlags(); + flagMap.put("use_fast_backward_scan", "true"); + flagMap.put("ysql_enable_packed_row_for_colocated_table", "true"); + return flagMap; + } + + @Override + protected Map getMasterFlags() { + Map flagMap = super.getMasterFlags(); + flagMap.put("use_fast_backward_scan", "true"); + flagMap.put("ysql_enable_packed_row_for_colocated_table", "true"); + return flagMap; + } + + private ValueChecker expectedSeeksRange(double expected_seeks) { + double expected_lower_bound = expected_seeks * SEEK_LOWER_BOUND_FACTOR + - SEEK_FAULT_TOLERANCE_OFFSET; + double expected_upper_bound = expected_seeks * SEEK_UPPER_BOUND_FACTOR + + SEEK_FAULT_TOLERANCE_OFFSET; + return Checkers.closed(expected_lower_bound, expected_upper_bound); + } + + private ValueChecker expectedNextsRange(double expected_nexts) { + double expected_lower_bound = expected_nexts * NEXT_LOWER_BOUND_FACTOR + - NEXT_FAULT_TOLERANCE_OFFSET; + double expected_upper_bound = expected_nexts * NEXT_UPPER_BOUND_FACTOR + + NEXT_FAULT_TOLERANCE_OFFSET; + return Checkers.closed(expected_lower_bound, expected_upper_bound); + } + + private void testSeekAndNextEstimationIndexOnlyScanBackwardHelper( + Statement stmt, String query, + String table_name, String index_name, + double expected_seeks, + double expected_nexts) throws Exception { + try { + testExplainDebug(stmt, + String.format("/*+ IndexOnlyScan(%s %s) */ %s", table_name, index_name, query), + makeTopLevelBuilder() + .plan(makePlanBuilder() + .nodeType(NODE_INDEX_ONLY_SCAN) + .scanDirection(INDEX_SCAN_DIRECTION_BACKWARD) + .relationName(table_name) + .indexName(index_name) + .estimatedSeeks(expectedSeeksRange(expected_seeks)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .build()) + .build()); + } + catch (AssertionError e) { + LOG.info("Failed Query: " + query); + LOG.info(e.toString()); + throw e; + } + } + + private void testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper( + Statement stmt, String query, + String table_name, String index_name, + double expected_seeks, + double expected_nexts) throws Exception { + try { + testExplainDebug(stmt, + String.format("/*+ IndexOnlyScan(%s %s) */ %s", table_name, index_name, query), + makeTopLevelBuilder() + .plan(makePlanBuilder() + .nodeType(NODE_LIMIT) + .plans(makePlanBuilder() + .nodeType(NODE_INDEX_ONLY_SCAN) + .scanDirection(INDEX_SCAN_DIRECTION_BACKWARD) + .relationName(table_name) + .indexName(index_name) + .estimatedSeeks(expectedSeeksRange(expected_seeks)) + .estimatedNextsAndPrevs(expectedNextsRange(expected_nexts)) + .build()) + .build()) + .build()); + } + catch (AssertionError e) { + LOG.info("Failed Query: " + query); + LOG.info(e.toString()); + throw e; + } + } + + @Test + public void testSeekNextEstimationIndexScan() throws Exception { + setConnMgrWarmupModeAndRestartCluster(ConnectionManagerWarmupMode.ROUND_ROBIN); + boolean isConnMgr = isTestRunningWithConnectionManager(); + if (isConnMgr) { + setUp(); + } + + try (Statement stmt = connection.createStatement()) { + stmt.execute("CREATE TABLE t1 (k1 INT, k2 INT, v1 INT)"); + stmt.execute("INSERT INTO t1 SELECT s1, s2, s2 FROM " + + "generate_series(1, 300) s1, generate_series(1, 300) s2"); + + stmt.execute("CREATE INDEX t1_idx_1 ON t1 (k1 ASC, k2 ASC)"); + stmt.execute("ANALYZE t1"); + + /* Index Scan node isn't aware of LIMIT on top, expects to return all 125k rows */ + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 DESC LIMIT 5000", + "t1", "t1_idx_1", 89, 90000); + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 DESC LIMIT 10000", + "t1", "t1_idx_1", 89, 90000); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 < 100 ORDER BY k1 DESC", + "t1", "t1_idx_1",30, 30000); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 > 200 ORDER BY k1 DESC", + "t1", "t1_idx_1",30, 30000); + + /* When filter on k2, additional seeks are needed for skip scan */ + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 < 100 ORDER BY k1 DESC", + "t1", "t1_idx_1",330, 30000); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 > 200 ORDER BY k1 DESC", + "t1", "t1_idx_1",330, 30000); + + stmt.execute("DROP INDEX t1_idx_1"); + + stmt.execute("CREATE INDEX t1_idx_2 ON t1 (k1 DESC, k2 ASC)"); + stmt.execute("ANALYZE t1"); + + /* Index Scan node isn't aware of LIMIT on top, expects to return all 125k rows */ + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 ASC LIMIT 5000", + "t1", "t1_idx_2", 89, 125000); + testSeekAndNextEstimationLimitIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 ORDER BY k1 ASC LIMIT 10000", + "t1", "t1_idx_2", 89, 125000); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 < 100 ORDER BY k1 ASC", + "t1", "t1_idx_2", 30, 30000); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k1 > 200 ORDER BY k1 ASC", + "t1", "t1_idx_2", 30, 30000); + + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 < 100 ORDER BY k1 ASC", + "t1", "t1_idx_2", 330, 30000); + testSeekAndNextEstimationIndexOnlyScanBackwardHelper(stmt, + "SELECT k1, k2 FROM t1 WHERE k2 > 200 ORDER BY k1 ASC", + "t1", "t1_idx_2", 330, 30000); + } + } +} diff --git a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgReplicationSlot.java b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgReplicationSlot.java index 3ce3dd7a39ce..ce0ce2a6986f 100644 --- a/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgReplicationSlot.java +++ b/java/yb-pgsql/src/test/java/org/yb/pgsql/TestPgReplicationSlot.java @@ -83,6 +83,7 @@ protected Map getTServerFlags() { flagMap.put( "cdcsdk_publication_list_refresh_interval_secs","" + kPublicationRefreshIntervalSec); flagMap.put("cdc_send_null_before_image_if_not_exists", "true"); + flagMap.put("TEST_dcheck_for_missing_schema_packing", "false"); return flagMap; } diff --git a/java/yb-sample/pom.xml b/java/yb-sample/pom.xml index 4d2d6ad19627..aea44a156574 100644 --- a/java/yb-sample/pom.xml +++ b/java/yb-sample/pom.xml @@ -8,7 +8,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-sample YB Manual Support diff --git a/java/yb-ysql-conn-mgr/pom.xml b/java/yb-ysql-conn-mgr/pom.xml index c6737ec8b7cc..318817e8c4a5 100644 --- a/java/yb-ysql-conn-mgr/pom.xml +++ b/java/yb-ysql-conn-mgr/pom.xml @@ -22,7 +22,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-ysql-conn-mgr Ysql Connection Manager Tests diff --git a/java/yb-yugabyted/pom.xml b/java/yb-yugabyted/pom.xml index 1313c1e43734..ba919ecdb6d9 100644 --- a/java/yb-yugabyted/pom.xml +++ b/java/yb-yugabyted/pom.xml @@ -10,7 +10,7 @@ org.yb yb-parent - 0.8.103-SNAPSHOT + 0.8.104-SNAPSHOT yb-yugabyted diff --git a/managed/RUNTIME-FLAGS.md b/managed/RUNTIME-FLAGS.md index f031a8723787..612be4136b2b 100644 --- a/managed/RUNTIME-FLAGS.md +++ b/managed/RUNTIME-FLAGS.md @@ -21,6 +21,7 @@ | "Default Metric Graph Point Count" | "yb.metrics.default_points" | "CUSTOMER" | "Default Metric Graph Point Count, if step is not defined in the query" | "Integer" | | "Fetch Batch Size of Task Info" | "yb.task_info_db_query_batch_size" | "CUSTOMER" | "Knob that can be used to make lesser number of calls to DB" | "Integer" | | "Use Ansible for provisioning" | "yb.node_agent.use_ansible_provisioning" | "CUSTOMER" | "If enabled use Ansible for provisioning" | "Boolean" | +| "Notify user on password reset" | "yb.user.send_password_reset_notification" | "CUSTOMER" | "If enabled, user will be notified on password reset" | "Boolean" | | "Allow Unsupported Instances" | "yb.internal.allow_unsupported_instances" | "PROVIDER" | "Enabling removes supported instance type filtering on AWS providers." | "Boolean" | | "Default AWS Instance Type" | "yb.aws.default_instance_type" | "PROVIDER" | "Default AWS Instance Type" | "String" | | "Default GCP Instance Type" | "yb.gcp.default_instance_type" | "PROVIDER" | "Default GCP Instance Type" | "String" | @@ -95,6 +96,7 @@ | "Shell Output Max Directory Size" | "yb.logs.shell.output_dir_max_size" | "GLOBAL" | "Output logs for shell commands are written to tmp folder.This setting defines rotation policy based on directory size." | "Bytes" | | "Max Size of each log message" | "yb.logs.max_msg_size" | "GLOBAL" | "We limit the length of each log line as sometimes we dump entire output of script. If you want to debug something specific and the script output isgetting truncated in application log then increase this limit" | "Bytes" | | "KMS Refresh Interval" | "yb.kms.refresh_interval" | "GLOBAL" | "Default refresh interval for the KMS providers." | "Duration" | +| "Allow CipherTrust KMS" | "yb.kms.allow_ciphertrust" | "GLOBAL" | "Allow the usage of CipherTrust KMS." | "Boolean" | | "Percentage of Hashicorp vault TTL to renew the token after" | "yb.kms.hcv_token_renew_percent" | "GLOBAL" | "HashiCorp Vault tokens expire when their TTL is reached. This setting renews the token after it has used the specified percentage of its original TTL. Default: 70%." | "Integer" | | "Start Master On Stop Node" | "yb.start_master_on_stop_node" | "GLOBAL" | "Auto-start master process on a similar available node on stopping a master node" | "Boolean" | | "Start Master On Remove Node" | "yb.start_master_on_remove_node" | "GLOBAL" | "Auto-start master process on a similar available node on removal of a master node" | "Boolean" | @@ -325,3 +327,4 @@ | "NFS precheck buffer space" | "yb.backup.nfs_precheck_buffer_kb" | "UNIVERSE" | "Amount of space (in KB) we want as buffer for NFS precheck" | "Long" | | "Wait after each pod restart in rolling operations" | "yb.kubernetes.operator.rolling_ops_wait_after_each_pod_ms" | "UNIVERSE" | "Time to wait after each pod restart before restarting the next pod in rolling operations" | "Integer" | | "Backup and restore to use pre roles behaviour" | "ybc.revert_to_pre_roles_behaviour" | "UNIVERSE" | "Have YBC use the pre roles backup and restore behaviour" | "Boolean" | +| "Enable backups during DDL" | "yb.backup.enable_backups_during_ddl" | "UNIVERSE" | "Have YBC ysql-dump use read-time as of snapshot time to support backups during DDL" | "Boolean" | diff --git a/managed/build.sbt b/managed/build.sbt index 8da7d1e93542..e43d152efc13 100644 --- a/managed/build.sbt +++ b/managed/build.sbt @@ -1,15 +1,16 @@ import jline.console.ConsoleReader import play.sbt.PlayImport.PlayKeys.{playInteractionMode, playMonitoredFiles} import play.sbt.PlayInteractionMode + import java.io.File import java.nio.charset.StandardCharsets import java.nio.file.{FileSystems, Files, Paths} import sbt.complete.Parsers.spaceDelimited -import sbt.Tests._ +import sbt.Tests.* -import scala.collection.JavaConverters._ +import scala.collection.JavaConverters.* import scala.sys.process.Process -import scala.sys.process._ +import scala.sys.process.* historyPath := Some(file(System.getenv("HOME") + "/.sbt/.yugaware-history")) @@ -171,7 +172,10 @@ libraryDependencies ++= Seq( // https://github.com/YugaByte/cassandra-java-driver/releases "com.yugabyte" % "cassandra-driver-core" % "3.8.0-yb-7", "org.yaml" % "snakeyaml" % "2.1", - "org.bouncycastle" % "bcpkix-jdk18on" % "1.80", + "org.bouncycastle" % "bc-fips" % "2.1.0", + "org.bouncycastle" % "bcpkix-fips" % "2.1.9", + "org.bouncycastle" % "bctls-fips" % "2.1.20", + "org.mindrot" % "jbcrypt" % "0.4", "org.springframework.security" % "spring-security-core" % "5.8.16", "com.amazonaws" % "aws-java-sdk-ec2" % "1.12.768", "com.amazonaws" % "aws-java-sdk-kms" % "1.12.768", @@ -241,7 +245,7 @@ libraryDependencies ++= Seq( "io.jsonwebtoken" % "jjwt-impl" % "0.11.5", "io.jsonwebtoken" % "jjwt-jackson" % "0.11.5", "io.swagger" % "swagger-annotations" % "1.6.1", // needed for annotations in prod code - "de.dentrassi.crypto" % "pem-keystore" % "2.2.1", + "de.dentrassi.crypto" % "pem-keystore" % "3.0.0", // Prod dependency temporary as we use HSQLDB as a dummy perf_advisor DB for YBM scenario // Remove once YBM starts using real PG DB. "org.hsqldb" % "hsqldb" % "2.7.1", @@ -924,8 +928,8 @@ runPlatform := { Project.extract(newState).runTask(runPlatformTask, newState) } -libraryDependencies += "org.yb" % "yb-client" % "0.8.103-SNAPSHOT" -libraryDependencies += "org.yb" % "ybc-client" % "2.2.0.2-b2" +libraryDependencies += "org.yb" % "yb-client" % "0.8.104-SNAPSHOT" +libraryDependencies += "org.yb" % "ybc-client" % "2.2.0.2-b3" libraryDependencies += "org.yb" % "yb-perf-advisor" % "1.0.0-b35" libraryDependencies ++= Seq( @@ -1006,6 +1010,10 @@ dependencyOverrides ++= jacksonOverrides excludeDependencies += "org.eclipse.jetty" % "jetty-io" excludeDependencies += "org.eclipse.jetty" % "jetty-server" excludeDependencies += "commons-collections" % "commons-collections" +excludeDependencies += "org.bouncycastle" % "bcpkix-jdk15on" +excludeDependencies += "org.bouncycastle" % "bcprov-jdk15on" +excludeDependencies += "org.bouncycastle" % "bcpkix-jdk18on" +excludeDependencies += "org.bouncycastle" % "bcprov-jdk18on" Global / concurrentRestrictions := Seq(Tags.limitAll(16)) @@ -1138,7 +1146,13 @@ lazy val swagger = project dependencyOverrides ++= jacksonOverrides, dependencyOverrides += "org.scala-lang.modules" %% "scala-xml" % "2.1.0", - swaggerGen := Def.taskDyn { + excludeDependencies += "org.bouncycastle" % "bcpkix-jdk15on", + excludeDependencies += "org.bouncycastle" % "bcprov-jdk15on", + excludeDependencies += "org.bouncycastle" % "bcpkix-jdk18on", + excludeDependencies += "org.bouncycastle" % "bcprov-jdk18on", + + +swaggerGen := Def.taskDyn { // Consider generating this only in managedResources val swaggerJson = (root / Compile / resourceDirectory).value / "swagger.json" val swaggerStrictJson = (root / Compile / resourceDirectory).value / "swagger-strict.json" diff --git a/managed/devops/opscli/ybops/cloud/common/method.py b/managed/devops/opscli/ybops/cloud/common/method.py index dadfa4d6acfb..7ea1c512ed78 100644 --- a/managed/devops/opscli/ybops/cloud/common/method.py +++ b/managed/devops/opscli/ybops/cloud/common/method.py @@ -1353,6 +1353,8 @@ def prepare(self): help="Path to GCP credentials file used for logs export.") self.parser.add_argument('--ycql_audit_log_level', default=None, help="YCQL audit log level.") + self.parser.add_argument('--skip_ansible_configure_playbook', action="store_true", + help="If specified will not run the ansible playbooks.") def get_ssh_user(self): # Force the yugabyte user for configuring instances. The configure step performs YB specific @@ -1696,7 +1698,7 @@ def callback(self, args): if delete_paths: self.extra_vars["delete_paths"] = delete_paths # If we are just rotating certs, we don't need to do any configuration changes. - if not rotate_certs: + if not rotate_certs and not args.skip_ansible_configure_playbook: self.cloud.setup_ansible(args).run( "configure-{}.yml".format(args.type), self.extra_vars, host_info) diff --git a/managed/node-agent/app/server/rpc.go b/managed/node-agent/app/server/rpc.go index 408f7ec431ba..66f88320cf85 100644 --- a/managed/node-agent/app/server/rpc.go +++ b/managed/node-agent/app/server/rpc.go @@ -350,6 +350,58 @@ func (server *RPCServer) SubmitTask( res.TaskId = taskID return res, nil } + installYbcInput := req.GetInstallYbcInput() + if installYbcInput != nil { + installYbcHandler := task.NewInstallYbcHandler(installYbcInput, username) + err := task.GetTaskManager().Submit(ctx, taskID, installYbcHandler) + if err != nil { + util.FileLogger().Errorf(ctx, "Error in running install ybc - %s", err.Error()) + return res, status.Error(codes.Internal, err.Error()) + } + res.TaskId = taskID + return res, nil + } + configureServerInput := req.GetConfigureServerInput() + if configureServerInput != nil { + configureServerHandler := task.NewConfigureServerHandler(configureServerInput, username) + err := task.GetTaskManager().Submit(ctx, taskID, configureServerHandler) + if err != nil { + util.FileLogger().Errorf(ctx, "Error in running configure server - %s", err.Error()) + return res, status.Error(codes.Internal, err.Error()) + } + res.TaskId = taskID + return res, nil + } + installOtelCollectorInput := req.GetInstallOtelCollectorInput() + if installOtelCollectorInput != nil { + installOtelCollectorHandler := task.NewInstallOtelCollectorHandler( + installOtelCollectorInput, + username, + ) + err := task.GetTaskManager().Submit(ctx, taskID, installOtelCollectorHandler) + if err != nil { + util.FileLogger(). + Errorf(ctx, "Error in running install otel collector - %s", err.Error()) + return res, status.Error(codes.Internal, err.Error()) + } + res.TaskId = taskID + return res, nil + } + setupCGroupInput := req.GetSetupCGroupInput() + if setupCGroupInput != nil { + SetupCgroupHandler := task.NewSetupCgroupHandler( + setupCGroupInput, + username, + ) + err := task.GetTaskManager().Submit(ctx, taskID, SetupCgroupHandler) + if err != nil { + util.FileLogger(). + Errorf(ctx, "Error in running setup cGroup - %s", err.Error()) + return res, status.Error(codes.Internal, err.Error()) + } + res.TaskId = taskID + return res, nil + } return res, status.Error(codes.Unimplemented, "Unknown task") } diff --git a/managed/node-agent/app/task/configure_server.go b/managed/node-agent/app/task/configure_server.go new file mode 100644 index 000000000000..3215bc6eb724 --- /dev/null +++ b/managed/node-agent/app/task/configure_server.go @@ -0,0 +1,316 @@ +// Copyright (c) YugaByte, Inc. + +package task + +import ( + "context" + "errors" + "fmt" + "io/fs" + "node-agent/app/task/helpers" + "node-agent/app/task/module" + pb "node-agent/generated/service" + "node-agent/util" + "path/filepath" + "strings" +) + +const ( + SystemdUnitPath = ".config/systemd/user" + ServerTemplateSubpath = "server/" +) + +var SystemdUnits = []string{ + "yb-zip_purge_yb_logs.service", + "yb-clean_cores.service", + "yb-collect_metrics.service", + "yb-zip_purge_yb_logs.timer", + "yb-clean_cores.timer", + "yb-collect_metrics.timer", +} + +type ConfigureServerHandler struct { + shellTask *ShellTask + param *pb.ConfigureServerInput + username string + logOut util.Buffer +} + +func NewConfigureServerHandler( + param *pb.ConfigureServerInput, + username string, +) *ConfigureServerHandler { + return &ConfigureServerHandler{ + param: param, + username: username, + logOut: util.NewBuffer(module.MaxBufferCapacity), + } +} + +// CurrentTaskStatus implements the AsyncTask method. +func (h *ConfigureServerHandler) CurrentTaskStatus() *TaskStatus { + return &TaskStatus{ + Info: h.logOut, + ExitStatus: &ExitStatus{}, + } +} + +func (h *ConfigureServerHandler) String() string { + return "Configure Server Task" +} + +func (h *ConfigureServerHandler) Handle(ctx context.Context) (*pb.DescribeTaskResponse, error) { + util.FileLogger().Info(ctx, "Starting configure server handler.") + + // 0. Validate that the processes are specified. + if len(h.param.GetProcesses()) == 0 { + err := errors.New("processes is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 1) figure out home dir + home := "" + if h.param.GetYbHomeDir() != "" { + home = h.param.GetYbHomeDir() + } else { + err := errors.New("ybHomeDir is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 2) determine yb_metric_dir + yb_metrics_dir := filepath.Join(h.param.GetRemoteTmp(), "yugabyte/metrics") + cmd := "systemctl show node_exporter | grep -oP '(?<=--collector.textfile.directory=)[^ ]+' | head -n1" + h.logOut.WriteLine("Determing the node_exporter textfile directory") + cmdInfo, err := module.RunShellCmd(ctx, h.username, h.String(), cmd, h.logOut) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed in %v - %s", cmd, err.Error()) + return nil, err + } + if cmdInfo.StdOut.String() != yb_metrics_dir { + yb_metrics_dir = filepath.Join(home, "metrics") + } + + // 3) Execute the shell commands. + err = h.execShellCommands(ctx, home) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed - %s", err.Error()) + return nil, err + } + + // 4) Setup the server scripts. + err = h.setupServerScript(ctx, home, yb_metrics_dir) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed - %s", err.Error()) + return nil, err + } + + // 5) Enable the user systemd units. + err = h.enableSystemdServices(ctx, home) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed - %s", err.Error()) + return nil, err + } + + for _, process := range h.param.GetProcesses() { + // 6) Configure the individual specified process. + err = h.configureProcess(ctx, home, process) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed - %s", err.Error()) + return nil, err + } + } + + return nil, nil +} + +func (h *ConfigureServerHandler) configureProcess(ctx context.Context, home, process string) error { + mountPoints := h.param.GetMountPoints() + if len(mountPoints) == 0 { + return errors.New("mountPoints is required") + } + mountPoint := mountPoints[0] + steps := []struct { + Desc string + Cmd string + }{ + { + fmt.Sprintf("make-yb-%s-conf-dir", process), + fmt.Sprintf("mkdir -p %s", filepath.Join(home, process, "conf")), + }, + { + "create-mount-logs-directory", + fmt.Sprintf("mkdir -p %s", filepath.Join(mountPoint, "yb-data/", process, "logs")), + }, + { + "symlink-logs-to-yb-logs", + fmt.Sprintf( + "unlink %s > /dev/null 2>&1; ln -sf %s %s", + filepath.Join(home, process, "logs"), + filepath.Join(mountPoint, "yb-data/", process, "logs"), + filepath.Join(home, process, "logs"), + ), + }, + } + + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err + } + return nil +} + +func (h *ConfigureServerHandler) enableSystemdServices(ctx context.Context, home string) error { + for _, unit := range SystemdUnits { + cmd := module.EnableSystemdUnit(h.username, unit) + h.logOut.WriteLine("Running configure server phase: %s", cmd) + util.FileLogger().Infof(ctx, "Running command %v", cmd) + _, err := module.RunShellCmd(ctx, h.username, h.String(), cmd, h.logOut) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed in %v - %s", cmd, err.Error()) + return err + } + + if unit != "network-online.target" && unit[len(unit)-6:] == "timer" { + startCmd := module.StartSystemdUnit(h.username, unit) + h.logOut.WriteLine("Running configure server phase: %s", startCmd) + util.FileLogger().Infof(ctx, "Running command %v", startCmd) + _, err = module.RunShellCmd(ctx, h.username, h.String(), startCmd, h.logOut) + if err != nil { + util.FileLogger(). + Errorf(ctx, "Configure server failed in %v - %s", cmd, err.Error()) + return err + } + } + } + + info, err := helpers.GetOSInfo() + if err != nil { + util.FileLogger().Errorf(ctx, "Error retreiving OS information %s", err.Error()) + return err + } + + unitDir := "/lib/systemd/system" + if strings.Contains(info.ID, "suse") || strings.Contains(info.Family, "suse") { + unitDir = "/usr/lib/systemd/system" + } + + // Link network-online.target if required + linkCmd := fmt.Sprintf("systemctl --user link %s/network-online.target", unitDir) + h.logOut.WriteLine("Running configure server phase: %s", linkCmd) + util.FileLogger().Infof(ctx, "Running command %v", linkCmd) + _, err = module.RunShellCmd(ctx, h.username, h.String(), linkCmd, h.logOut) + if err != nil { + util.FileLogger().Errorf(ctx, "Configure server failed in %v - %s", linkCmd, err.Error()) + return err + } + return nil +} + +func (h *ConfigureServerHandler) setupServerScript( + ctx context.Context, + home, yb_metrics_dir string, +) error { + serverScriptContext := map[string]any{ + "mount_paths": strings.Join(h.param.GetMountPoints(), " "), + "user_name": h.username, + "yb_cores_dir": filepath.Join(home, "cores"), + "systemd_option": true, + "yb_home_dir": home, + "num_cores_to_keep": h.param.GetNumCoresToKeep(), + "yb_metrics_dir": yb_metrics_dir, + } + + // Copy yb-server-ctl.sh script. + err := module.CopyFile( + ctx, + serverScriptContext, + filepath.Join(ServerTemplateSubpath, "yb-server-ctl.sh.j2"), + filepath.Join(home, "bin", "yb-server-ctl.sh"), + fs.FileMode(0755), + ) + if err != nil { + return err + } + + // Copy clock-sync.sh script. + err = module.CopyFile( + ctx, + serverScriptContext, + filepath.Join(ServerTemplateSubpath, "clock-sync.sh.j2"), + filepath.Join(home, "bin", "clock-sync.sh"), + fs.FileMode(0755), + ) + if err != nil { + return err + } + + // Copy clean_cores.sh script. + err = module.CopyFile( + ctx, + serverScriptContext, + filepath.Join(ServerTemplateSubpath, "clean_cores.sh.j2"), + filepath.Join(home, "bin", "clean_cores.sh"), + fs.FileMode(0755), + ) + if err != nil { + return err + } + + // Copy zip_purge_yb_logs.sh.sh script. + err = module.CopyFile( + ctx, + serverScriptContext, + filepath.Join(ServerTemplateSubpath, "zip_purge_yb_logs.sh.j2"), + filepath.Join(home, "bin", "zip_purge_yb_logs.sh"), + fs.FileMode(0755), + ) + if err != nil { + return err + } + + // Copy collect_metrics_wrapper.sh script. + err = module.CopyFile( + ctx, + serverScriptContext, + filepath.Join(ServerTemplateSubpath, "collect_metrics_wrapper.sh.j2"), + filepath.Join(home, "bin", "collect_metrics_wrapper.sh"), + fs.FileMode(0755), + ) + if err != nil { + return err + } + + return nil +} + +func (h *ConfigureServerHandler) execShellCommands( + ctx context.Context, + home string, +) error { + mountPoints := h.param.GetMountPoints() + if len(mountPoints) == 0 { + return errors.New("mountPoints is required") + } + mountPoint := mountPoints[0] + steps := []struct { + Desc string + Cmd string + }{ + {"make-yb-bin-dir", fmt.Sprintf("mkdir -p %s", filepath.Join(home, "bin"))}, + {"make-cores-dir", fmt.Sprintf("mkdir -p %s", filepath.Join(mountPoint, "cores"))}, + { + "symlink-cores-to-yb-cores", + fmt.Sprintf( + "unlink %s > /dev/null 2>&1; ln -sf %s %s", + filepath.Join(home, "cores"), + filepath.Join(mountPoint, "cores"), + filepath.Join(home, "cores"), + ), + }, + } + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err + } + return nil +} diff --git a/managed/node-agent/app/task/configure_service_task.go b/managed/node-agent/app/task/configure_service_task.go index c7b454fa24bd..22fd7bedf11b 100644 --- a/managed/node-agent/app/task/configure_service_task.go +++ b/managed/node-agent/app/task/configure_service_task.go @@ -82,7 +82,7 @@ func (handler *ConfigureServiceHandler) Handle( ) util.FileLogger(). - Infof(ctx, "Starting install/configure earlyoom handler with %v", handler.shellTask.command.RedactCommandArgs()) + Infof(ctx, "Starting install/configure earlyoom handler with %v", handler.shellTask.cmdInfo.RedactCommandArgs()) output, err := handler.shellTask.Process(ctx) if err != nil { diff --git a/managed/node-agent/app/task/helpers/yb_helper.go b/managed/node-agent/app/task/helpers/yb_helper.go index 55b7eeb68f02..386339443c40 100644 --- a/managed/node-agent/app/task/helpers/yb_helper.go +++ b/managed/node-agent/app/task/helpers/yb_helper.go @@ -3,10 +3,12 @@ package helpers import ( + "bufio" "fmt" "os" "path/filepath" "regexp" + "runtime" "strings" ) @@ -16,6 +18,15 @@ type Release struct { Name string } +// OSInfo represents parsed OS release info +type OSInfo struct { + ID string // e.g., "ubuntu" + Family string // e.g., "debian" + Pretty string // e.g., "Ubuntu 22.04.4 LTS" + Arch string // e.g., "x86_64" + Version string // e.g., "22" +} + var releaseFormat = regexp.MustCompile(`yugabyte[-_]([\d]+\.[\d]+\.[\d]+\.[\d]+-[a-z0-9]+)`) // extractReleaseFromArchive parses the archive filename and returns a Release. @@ -66,3 +77,45 @@ func ListDirectoryContent(dirPath string) ([]string, error) { } return names, nil } + +// GetOSInfo parses /etc/os-release and returns OS info +func GetOSInfo() (*OSInfo, error) { + file, err := os.Open("/etc/os-release") + if err != nil { + return nil, fmt.Errorf("failed to open /etc/os-release: %w", err) + } + defer file.Close() + + info := &OSInfo{} + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + // Remove quotes from values + if keyVal := strings.SplitN(line, "=", 2); len(keyVal) == 2 { + key := keyVal[0] + val := strings.Trim(keyVal[1], `"`) + switch key { + case "ID": + info.ID = strings.ToLower(val) + case "ID_LIKE": + info.Family = strings.ToLower(val) + case "PRETTY_NAME": + info.Pretty = val + case "VERSION_ID": + if parts := strings.SplitN(val, ".", 2); len(parts) > 0 { + info.Version = parts[0] + } + } + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading /etc/os-release: %w", err) + } + info.Arch = runtime.GOARCH + return info, nil +} + +func IsRhel9(osInfo *OSInfo) bool { + return (strings.Contains(osInfo.Family, "rhel") || strings.Contains(osInfo.ID, "rhel")) && + osInfo.Version == "9" +} diff --git a/managed/node-agent/app/task/install_otel_collector.go b/managed/node-agent/app/task/install_otel_collector.go new file mode 100644 index 000000000000..b88bb2074d9a --- /dev/null +++ b/managed/node-agent/app/task/install_otel_collector.go @@ -0,0 +1,297 @@ +// Copyright (c) YugaByte, Inc. + +package task + +import ( + "context" + "errors" + "fmt" + "io/fs" + "node-agent/app/task/module" + pb "node-agent/generated/service" + "node-agent/util" + "path/filepath" +) + +type InstallOtelCollector struct { + shellTask *ShellTask + param *pb.InstallOtelCollectorInput + username string + logOut util.Buffer +} + +func NewInstallOtelCollectorHandler( + param *pb.InstallOtelCollectorInput, + username string, +) *InstallOtelCollector { + return &InstallOtelCollector{ + param: param, + username: username, + logOut: util.NewBuffer(module.MaxBufferCapacity), + } +} + +// CurrentTaskStatus implements the AsyncTask method. +func (h *InstallOtelCollector) CurrentTaskStatus() *TaskStatus { + return &TaskStatus{ + Info: h.logOut, + ExitStatus: &ExitStatus{}, + } +} + +func (h *InstallOtelCollector) String() string { + return "Install otel collector Task" +} + +func (h *InstallOtelCollector) Handle(ctx context.Context) (*pb.DescribeTaskResponse, error) { + util.FileLogger().Infof(ctx, "Starting otel collector installation") + + // 1) figure out home dir + home := "" + if h.param.GetYbHomeDir() != "" { + home = h.param.GetYbHomeDir() + } else { + err := errors.New("ybHomeDir is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 2) Put & setup the otel collector. + err := h.execOtelCollectorSetupSteps(ctx, home) + if err != nil { + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 3) Place the otel-collector.service at desired location. + otelCollectorServiceContext := map[string]any{ + "user_name": h.username, + "yb_home_dir": home, + } + + unit := "otel-collector.service" + // Copy otel-collector.service + err = module.CopyFile( + ctx, + otelCollectorServiceContext, + filepath.Join(ServerTemplateSubpath, unit), + filepath.Join(home, SystemdUnitPath, unit), + fs.FileMode(0755), + ) + + if err != nil { + return nil, err + } + + // 4) stop the systemd-unit if it's running. + stopCmd := module.StopSystemdUnit(h.username, unit) + h.logOut.WriteLine("Running otel-collector server phase: %s", stopCmd) + if _, err := module.RunShellCmd(ctx, h.username, "stop-otel-collector", stopCmd, h.logOut); err != nil { + return nil, err + } + + // 5) Configure the otel-collector service. + err = h.configureOtelCollector(ctx, home) + if err != nil { + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 6) Start and enable the otel-collector service. + startCmd := module.StartSystemdUnit(h.username, unit) + h.logOut.WriteLine("Running otel-collector phase: %s", startCmd) + if _, err = module.RunShellCmd(ctx, h.username, "start-otel-collector", startCmd, h.logOut); err != nil { + return nil, err + } + + return nil, nil +} + +// GetOtelCollectorSetupSteps returns the sequence of steps needed for configuring the otel collector. +func (h *InstallOtelCollector) execOtelCollectorSetupSteps( + ctx context.Context, + ybHome string, +) error { + pkgName := filepath.Base(h.param.GetOtelColPackagePath()) + otelCollectorPackagePath := filepath.Join(h.param.GetRemoteTmp(), pkgName) + otelCollectorDirectory := filepath.Join(ybHome, "otel-collector") + mountPoint := "" + if len(h.param.GetMountPoints()) > 0 { + mountPoint = h.param.GetMountPoints()[0] + } + + steps := []struct { + Desc string + Cmd string + }{ + { + "make-yb-otel-collector-dir", + fmt.Sprintf( + "mkdir -p %s && chmod 0755 %s", + otelCollectorDirectory, + otelCollectorDirectory, + ), + }, + { + "untar-otel-collector", + fmt.Sprintf( + "tar --no-same-owner -xzvf %s -C %s", + otelCollectorPackagePath, + otelCollectorDirectory, + ), + }, + { + "ensure 755 permission for otelcol-contrib", + fmt.Sprintf( + "chmod -R 755 %s", + filepath.Join(otelCollectorDirectory, "otelcol-contrib"), + ), + }, + { + "create OpenTelemetry collector logs directory", + fmt.Sprintf( + "mkdir -p %s && chmod 0755 %s", + filepath.Join(mountPoint, "otel-collector/logs"), + filepath.Join(mountPoint, "otel-collector/logs"), + ), + }, + { + "symlink OpenTelemetry collector logs directory", + fmt.Sprintf( + "rm -rf %s && ln -sf %s %s && chmod 0755 %s", + filepath.Join(ybHome, "otel-collector/logs"), + filepath.Join(mountPoint, "otel-collector/logs"), + filepath.Join(ybHome, "otel-collector/logs"), + filepath.Join(ybHome, "otel-collector/logs"), + ), + }, + { + "create OpenTelemetry collector persistent queues directory", + fmt.Sprintf( + "mkdir -p %s && chmod 0755 %s", + filepath.Join(mountPoint, "otel-collector/queue"), + filepath.Join(mountPoint, "otel-collector/queue"), + ), + }, + { + "symlink OpenTelemetry collector persistent queues directory", + fmt.Sprintf( + "rm -rf %s && ln -sf %s %s && chmod 0755 %s", + filepath.Join(ybHome, "otel-collector/queue"), + filepath.Join(mountPoint, "otel-collector/queue"), + filepath.Join(ybHome, "otel-collector/queue"), + filepath.Join(ybHome, "otel-collector/queue"), + ), + }, + { + "delete-otel-collector-package", + fmt.Sprintf("rm -rf %s", otelCollectorPackagePath), + }, + } + + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err + } + return nil +} + +func (h *InstallOtelCollector) configureOtelCollector(ctx context.Context, ybHome string) error { + otelCollectorConfigFile := filepath.Join(ybHome, "otel-collector", "config.yml") + otelColLogCleanupEnv := filepath.Join(ybHome, "otel-collector", "log_cleanup_env") + awsCredsFile := filepath.Join(ybHome, ".aws", "credentials") + gcpCredsFile := filepath.Join(ybHome, "otel-collector", "gcp_creds") + + steps := []struct { + Desc string + Cmd string + }{ + { + "remove-otel-collector-config-file-if-exists", + fmt.Sprintf( + "rm -rf %s", + otelCollectorConfigFile, + ), + }, + { + "place-new-otel-collector-config-file", + fmt.Sprintf( + "mv %s %s", + h.param.GetOtelColConfigFile(), + otelCollectorConfigFile, + ), + }, + { + "create-aws-creds-dir", + fmt.Sprintf("mkdir -p %s/.aws", ybHome), + }, + { + "remove-otel-collector-aws-block-if-exists", + fmt.Sprintf(`if [ -f %s ]; then \ + awk '/# BEGIN YB MANAGED BLOCK - OTEL COLLECTOR CREDENTIALS/ {inblock=1} \ + /# END YB MANAGED BLOCK - OTEL COLLECTOR CREDENTIALS/ {inblock=0; next} \ + !inblock' %s > %s.tmp && mv %s.tmp %s; fi`, + awsCredsFile, + awsCredsFile, + awsCredsFile, + awsCredsFile, + awsCredsFile, + ), + }, + { + "remove-gcp-credentials", + fmt.Sprintf("rm -rf %s", gcpCredsFile), + }, + { + "clean-up-otel-log-cleanup-env", + fmt.Sprintf("rm -rf %s", otelColLogCleanupEnv), + }, + { + "write-otel-log-cleanup-env", + fmt.Sprintf( + `echo "preserve_audit_logs=true" > %s && echo "ycql_audit_log_level=%s" >> %s`, + otelColLogCleanupEnv, + h.param.GetYcqlAuditLogLevel(), + otelColLogCleanupEnv, + ), + }, + { + "set-permission-otel-log-cleanup-env", + fmt.Sprintf(`chmod 0440 %s`, otelColLogCleanupEnv), + }, + } + + if h.param.GetOtelColAwsAccessKey() != "" && h.param.GetOtelColAwsSecretKey() != "" { + steps = append(steps, struct { + Desc string + Cmd string + }{ + "append-otel-collector-creds", + fmt.Sprintf( + `echo '# BEGIN YB MANAGED BLOCK - OTEL COLLECTOR CREDENTIALS + [otel-collector] + aws_access_key_id = %s + aws_secret_access_key = %s + # END YB MANAGED BLOCK - OTEL COLLECTOR CREDENTIALS' >> %s && chmod 440 %s`, + h.param.GetOtelColAwsAccessKey(), + h.param.GetOtelColAwsSecretKey(), + awsCredsFile, + awsCredsFile, + ), + }) + } + + if h.param.GetOtelColGcpCredsFile() != "" { + steps = append(steps, struct { + Desc string + Cmd string + }{ + "place-new-gcp-creds-file", + fmt.Sprintf("mv %s %s", h.param.GetOtelColGcpCredsFile(), gcpCredsFile), + }) + } + + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err + } + return nil +} diff --git a/managed/node-agent/app/task/install_software.go b/managed/node-agent/app/task/install_software.go index d123bb06aefa..dd3e1807a840 100644 --- a/managed/node-agent/app/task/install_software.go +++ b/managed/node-agent/app/task/install_software.go @@ -29,7 +29,7 @@ func NewInstallSoftwareHandler( return &InstallSoftwareHandler{ param: param, username: username, - logOut: util.NewBuffer(MaxBufferCapacity), + logOut: util.NewBuffer(module.MaxBufferCapacity), } } @@ -45,23 +45,6 @@ func (h *InstallSoftwareHandler) String() string { return "Install Software Task" } -// helper that wraps NewShellTaskWithUser + Process + error logging -func (h *InstallSoftwareHandler) runShell( - ctx context.Context, - desc, shell string, - args []string, -) error { - h.logOut.WriteLine("Running install software phase: %s", desc) - h.shellTask = NewShellTaskWithUser(desc, h.username, shell, args) - _, err := h.shellTask.Process(ctx) - if err != nil { - util.FileLogger().Errorf(ctx, - "Install software failed [%s]: %s", desc, err) - return err - } - return nil -} - func (h *InstallSoftwareHandler) Handle(ctx context.Context) (*pb.DescribeTaskResponse, error) { util.FileLogger().Info(ctx, "Starting install software handler.") @@ -72,6 +55,12 @@ func (h *InstallSoftwareHandler) Handle(ctx context.Context) (*pb.DescribeTaskRe return nil, err } + if len(h.param.GetSymLinkFolders()) == 0 { + err := errors.New("server process is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + // 1) extract all the names and paths up front pkgName := filepath.Base(ybPkg) pkgFolder := helpers.ExtractArchiveFolderName(pkgName) @@ -95,7 +84,7 @@ func (h *InstallSoftwareHandler) Handle(ctx context.Context) (*pb.DescribeTaskRe h.logOut.WriteLine("Download command %s", cmdStr) if cmdStr != "" { h.logOut.WriteLine("Dowloading software") - if err := h.runShell(ctx, "download-software", util.DefaultShell, []string{"-c", cmdStr}); err != nil { + if _, err := module.RunShellCmd(ctx, h.username, "download-software", cmdStr, h.logOut); err != nil { return nil, err } // optional checksum @@ -147,8 +136,8 @@ func (h *InstallSoftwareHandler) execShellCommands( ) error { releasesDir := filepath.Join(home, "releases", releaseVersion) steps := []struct { - desc string - cmd string + Desc string + Cmd string }{ {"make-yb-software-dir", fmt.Sprintf("mkdir -p %s", ybSoftwareDir)}, { @@ -169,11 +158,8 @@ func (h *InstallSoftwareHandler) execShellCommands( ), }, } - - for _, step := range steps { - if err := h.runShell(ctx, step.desc, util.DefaultShell, []string{"-c", step.cmd}); err != nil { - return err - } + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err } return nil } @@ -183,7 +169,7 @@ func (h *InstallSoftwareHandler) setupSymlinks( home string, ybSoftwareDir string, ) error { - processes := []string{"master", "tserver"} + processes := h.param.GetSymLinkFolders() files, err := helpers.ListDirectoryContent(ybSoftwareDir) if err != nil { return err @@ -194,8 +180,8 @@ func (h *InstallSoftwareHandler) setupSymlinks( src := filepath.Join(ybSoftwareDir, f) dst := filepath.Join(targetDir, f) desc := fmt.Sprintf("symlink-%s-to-%s", src, dst) - cmd := fmt.Sprintf("ln -sf %s %s", src, dst) - if err := h.runShell(ctx, desc, util.DefaultShell, []string{"-c", cmd}); err != nil { + cmd := fmt.Sprintf("unlink %s > /dev/null 2>&1; ln -sf %s %s", dst, src, dst) + if _, err := module.RunShellCmd(ctx, h.username, desc, cmd, h.logOut); err != nil { return err } } diff --git a/managed/node-agent/app/task/install_ybc.go b/managed/node-agent/app/task/install_ybc.go new file mode 100644 index 000000000000..e8b055ad01c0 --- /dev/null +++ b/managed/node-agent/app/task/install_ybc.go @@ -0,0 +1,200 @@ +// Copyright (c) YugaByte, Inc. + +package task + +import ( + "context" + "errors" + "fmt" + "node-agent/app/task/helpers" + "node-agent/app/task/module" + pb "node-agent/generated/service" + "node-agent/util" + "path/filepath" +) + +type InstallYbcHandler struct { + shellTask *ShellTask + param *pb.InstallYbcInput + username string + logOut util.Buffer +} + +func NewInstallYbcHandler(param *pb.InstallYbcInput, username string) *InstallYbcHandler { + return &InstallYbcHandler{ + param: param, + username: username, + logOut: util.NewBuffer(module.MaxBufferCapacity), + } +} + +// CurrentTaskStatus implements the AsyncTask method. +func (h *InstallYbcHandler) CurrentTaskStatus() *TaskStatus { + return &TaskStatus{ + Info: h.logOut, + ExitStatus: &ExitStatus{}, + } +} + +func (h *InstallYbcHandler) String() string { + return "Install YBC Task" +} + +func (h *InstallYbcHandler) execSetupYBCCommands( + ctx context.Context, + ybcPackagePath, ybcSoftwareDir, ybcControllerDir string, +) error { + /* + ybcPackagePath - Points to the current location where the YBC package is stored + ybcSoftwareDir - Points to the location where YBC package will be stored. + Example - /home/yugabyte/yb-software/ybc-2.2.0.2-b2-linux-x86_64 + ybcControllerDir - YBC directory on the node, /home/yugabyte/controller. + */ + + steps := []struct { + Desc string + Cmd string + }{ + {"clean-ybc-software-dir", fmt.Sprintf("rm -rf %s", ybcSoftwareDir)}, + { + "make-ybc-software-dir", + fmt.Sprintf( + "mkdir -p %s && chown %s:%s %s && chmod 0755 %s", + ybcSoftwareDir, + h.username, + h.username, + ybcSoftwareDir, + ybcSoftwareDir, + ), + }, + { + "untar-ybc-software", + fmt.Sprintf( + "tar --no-same-owner -xzvf %s --strip-components=1 -C %s", + ybcPackagePath, + ybcSoftwareDir, + ), + }, + { + "make-controller-dir", + fmt.Sprintf( + "mkdir -p %s && chown %s:%s %s && chmod 0755 %s", + ybcControllerDir, + h.username, + h.username, + ybcControllerDir, + ybcControllerDir, + ), + }, + {"remove-temp-package", fmt.Sprintf("rm -rf %s", ybcPackagePath)}, + } + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err + } + return nil +} + +func (h *InstallYbcHandler) execConfigureYBCCommands( + ctx context.Context, + ybcSoftwareDir, ybcControllerDir string, +) error { + mountPoints := h.param.GetMountPoints() + if len(mountPoints) == 0 { + return errors.New("mountPoints is required") + } + mountPoint := mountPoints[0] + steps := []struct { + Desc string + Cmd string + }{ + { + "setup-bin-symlink", + fmt.Sprintf( + "unlink %s > /dev/null 2>&1; ln -sf %s %s", + filepath.Join(ybcControllerDir, "bin"), + filepath.Join(ybcSoftwareDir, "bin"), + filepath.Join(ybcControllerDir, "bin"), + ), + }, + { + "create-ybc-logs-dir-mount-path", + fmt.Sprintf( + "mkdir -p %s && chown %s:%s %s && chmod 0755 %s", + filepath.Join(mountPoint, "ybc-data/controller/logs"), + h.username, + h.username, + filepath.Join(mountPoint, "ybc-data/controller/logs"), + filepath.Join(mountPoint, "ybc-data/controller/logs"), + ), + }, + { + "create-logs-dir-symlinks", + fmt.Sprintf( + "unlink %s > /dev/null 2>&1; ln -sf %s %s", + filepath.Join(ybcControllerDir, "logs"), + filepath.Join(mountPoint, "ybc-data/controller/logs"), + filepath.Join(ybcControllerDir, "logs"), + ), + }, + { + "create-ybc-conf-dir", + fmt.Sprintf( + "mkdir -p %s && chown %s:%s %s && chmod 0755 %s", + filepath.Join(ybcControllerDir, "conf"), + h.username, + h.username, + filepath.Join(ybcControllerDir, "conf"), + filepath.Join(ybcControllerDir, "conf"), + ), + }, + } + + if err := module.RunShellSteps(ctx, h.username, steps, h.logOut); err != nil { + return err + } + return nil +} + +func (h *InstallYbcHandler) Handle(ctx context.Context) (*pb.DescribeTaskResponse, error) { + util.FileLogger().Info(ctx, "Starting install YBC handler.") + + ybcPkg := h.param.GetYbcPackage() + if ybcPkg == "" { + err := errors.New("ybPackage is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 1) extract all the names and paths up front + pkgName := filepath.Base(ybcPkg) + pkgFolder := helpers.ExtractArchiveFolderName(pkgName) + + // 2) figure out home dir + home := "" + if h.param.GetYbHomeDir() != "" { + home = h.param.GetYbHomeDir() + } else { + err := errors.New("ybHomeDir is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + ybcSoftwareDir := filepath.Join(home, "yb-software", pkgFolder) + ybcControllerDir := filepath.Join(h.param.GetYbHomeDir(), "controller") + // 3) Put the ybc software at the desired location. + ybcPackagePath := filepath.Join(h.param.GetRemoteTmp(), pkgName) + err := h.execSetupYBCCommands(ctx, ybcPackagePath, ybcSoftwareDir, ybcControllerDir) + if err != nil { + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 4) Configure the ybc package. + err = h.execConfigureYBCCommands(ctx, ybcSoftwareDir, ybcControllerDir) + if err != nil { + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + return nil, nil +} diff --git a/managed/node-agent/app/task/module/shell.go b/managed/node-agent/app/task/module/shell.go index c374b94091e2..65284f536f18 100644 --- a/managed/node-agent/app/task/module/shell.go +++ b/managed/node-agent/app/task/module/shell.go @@ -14,6 +14,12 @@ import ( "github.com/creack/pty" ) +const ( + // MaxBufferCapacity is the max number of bytes allowed in the buffer + // before truncating the first bytes. + MaxBufferCapacity = 1000000 +) + var ( // Parameters in the command that should be redacted. redactParams = map[string]bool{ @@ -24,50 +30,21 @@ var ( userVariables = []string{"LOGNAME", "USER", "LNAME", "USERNAME"} ) -// Command handles command execution. -type Command struct { - // Name of the command. - name string - cmd string - user string - args []string -} - -// NewCommand returns a command instance. -func NewCommand(name string, cmd string, args []string) *Command { - return NewCommandWithUser(name, "", cmd, args) -} - -// NewCommandWithUser returns a command instance. -func NewCommandWithUser(name string, user string, cmd string, args []string) *Command { - return &Command{ - name: name, - user: user, - cmd: cmd, - args: args, - } -} - -// Name returns the name of the command. -func (command *Command) Name() string { - return command.name -} - -// Cmd returns the command. -func (command *Command) Cmd() string { - return command.cmd -} - -// Args returns the command arguments. -func (command *Command) Args() []string { - return command.args +// CommandInfo holds command information. +type CommandInfo struct { + User string + Desc string + Cmd string + Args []string + StdOut util.Buffer + StdErr util.Buffer } // RedactCommandArgs redacts the command arguments and returns them. -func (command *Command) RedactCommandArgs() []string { +func (cmdInfo *CommandInfo) RedactCommandArgs() []string { redacted := []string{} redactValue := false - for _, param := range command.args { + for _, param := range cmdInfo.Args { if strings.HasPrefix(param, "-") { if _, ok := redactParams[strings.TrimLeft(param, "-")]; ok { redactValue = true @@ -84,26 +61,126 @@ func (command *Command) RedactCommandArgs() []string { return redacted } -// Create returns the exec command with the environment set. -func (command *Command) Create(ctx context.Context) (*exec.Cmd, error) { - userDetail, err := util.UserInfo(command.user) +// RunCmd runs the command in the command info. +func (cmdInfo *CommandInfo) RunCmd(ctx context.Context) error { + userDetail, err := util.UserInfo(cmdInfo.User) if err != nil { - return nil, err + return err } util.FileLogger().Debugf(ctx, "Using user: %s, uid: %d, gid: %d", userDetail.User.Username, userDetail.UserID, userDetail.GroupID) - env, _ := command.userEnv(ctx, userDetail) - cmd, err := command.command(ctx, userDetail, command.cmd, command.args...) + env, _ := userEnv(ctx, userDetail) + cmd, err := createCmd(ctx, userDetail, cmdInfo.Cmd, cmdInfo.Args...) if err != nil { util.FileLogger(). - Warnf(ctx, "Failed to create command %s. Error: %s", command.name, err.Error()) - return nil, err + Errorf(ctx, "Failed to create command %s. Error: %s", cmdInfo.Desc, err.Error()) + return err } cmd.Env = append(cmd.Env, env...) - return cmd, nil + if cmdInfo.StdOut != nil { + cmd.Stdout = cmdInfo.StdOut + } + if cmdInfo.StdErr != nil { + cmd.Stderr = cmdInfo.StdErr + } + return cmd.Run() +} + +// RunSteps runs a list of command steps with the specified user and logs the output. +func RunSteps( + ctx context.Context, + user string, + steps []struct { + Desc string + Cmd string + Args []string + }, logOut util.Buffer) error { + cmdInfos := make([]*CommandInfo, len(steps)) + for i, step := range steps { + cmdInfos[i] = &CommandInfo{ + User: user, + Desc: step.Desc, + Cmd: step.Cmd, + Args: step.Args, + StdOut: util.NewBuffer(MaxBufferCapacity), + StdErr: util.NewBuffer(MaxBufferCapacity), + } + } + return createCmds( + ctx, + user, + cmdInfos, + func(ctx context.Context, cmdInfo *CommandInfo, cmd *exec.Cmd) error { + if logOut != nil { + logOut.WriteLine("Running step: %s", cmdInfo.Desc) + } + err := cmd.Run() + if err != nil { + util.FileLogger(). + Errorf(ctx, "Failed to run step %s: %s", cmdInfo.Desc, err.Error()) + if logOut != nil { + logOut.WriteLine("Failed to run step %s: %s", cmdInfo.Desc, err.Error()) + } + } + return err + }) } -func (command *Command) command( +// RunShellCmd runs a shell command with the specified user. +func RunShellCmd( + ctx context.Context, + user, desc, cmdStr string, + logOut util.Buffer, +) (*CommandInfo, error) { + cmdInfo := &CommandInfo{ + User: user, + Desc: desc, + Cmd: util.DefaultShell, + Args: []string{"-c", cmdStr}, + StdOut: util.NewBuffer(MaxBufferCapacity), + StdErr: util.NewBuffer(MaxBufferCapacity), + } + if logOut != nil { + logOut.WriteLine("Running shell command for %s", desc) + } + err := cmdInfo.RunCmd(ctx) + if err != nil { + util.FileLogger(). + Errorf(ctx, "Failed to run shell command for %s: %s", desc, err.Error()) + if logOut != nil { + logOut.WriteLine("Failed to run shell command for %s: %s", desc, err.Error()) + } + } + return cmdInfo, err +} + +// RunShellSteps runs a list of shell command steps with the specified user and logs the output. +func RunShellSteps( + ctx context.Context, + user string, + steps []struct { + Desc string + Cmd string + }, logOut util.Buffer) error { + cmdSteps := make([]struct { + Desc string + Cmd string + Args []string + }, len(steps)) + for i, step := range steps { + cmdSteps[i] = struct { + Desc string + Cmd string + Args []string + }{step.Desc, util.DefaultShell, []string{"-c", step.Cmd}} + } + return RunSteps( + ctx, + user, + cmdSteps, logOut) +} + +func createCmd( ctx context.Context, userDetail *util.UserDetail, name string, @@ -130,14 +207,50 @@ func (command *Command) command( return cmd, nil } -func (command *Command) userEnv( +// createCmds creates commands from the command info list and passes them to the receiver. +func createCmds( + ctx context.Context, + user string, + infos []*CommandInfo, + receiver func(context.Context, *CommandInfo, *exec.Cmd) error, +) error { + userDetail, err := util.UserInfo(user) + if err != nil { + return err + } + util.FileLogger().Debugf(ctx, "Using user: %s, uid: %d, gid: %d", + userDetail.User.Username, userDetail.UserID, userDetail.GroupID) + env, _ := userEnv(ctx, userDetail) + for _, info := range infos { + cmd, err := createCmd(ctx, userDetail, info.Cmd, info.Args...) + if err != nil { + util.FileLogger(). + Warnf(ctx, "Failed to create command %s. Error: %s", info.Desc, err.Error()) + return err + } + cmd.Env = append(cmd.Env, env...) + if info.StdOut != nil { + cmd.Stdout = info.StdOut + } + if info.StdErr != nil { + cmd.Stderr = info.StdErr + } + err = receiver(ctx, info, cmd) + if err != nil { + return err + } + } + return nil +} + +func userEnv( ctx context.Context, userDetail *util.UserDetail, ) ([]string, error) { // Approximate capacity of 100. env := make([]string, 0, 100) // Interactive shell to source ~/.bashrc. - cmd, err := command.command(ctx, userDetail, "bash") + cmd, err := createCmd(ctx, userDetail, "bash") // Create a pseudo tty (non stdin) to act like SSH login. // Otherwise, the child process is stopped because it is a background process. ptty, err := pty.Start(cmd) diff --git a/managed/node-agent/app/task/module/shell_test.go b/managed/node-agent/app/task/module/shell_test.go index 7b73f7e687d5..b3b07319d3c6 100644 --- a/managed/node-agent/app/task/module/shell_test.go +++ b/managed/node-agent/app/task/module/shell_test.go @@ -10,7 +10,6 @@ import ( ) func TestCommandEnv(t *testing.T) { - command := NewCommand("test_env", "echo", []string{"test"}) ctx := context.Background() currentUser, err := user.Current() if err != nil { @@ -20,7 +19,7 @@ func TestCommandEnv(t *testing.T) { if err != nil { t.Fatal(err) } - env, err := command.userEnv(ctx, userDetail) + env, err := userEnv(ctx, userDetail) if err != nil { t.Fatal(err) } diff --git a/managed/node-agent/app/task/module/systemd.go b/managed/node-agent/app/task/module/systemd.go index 4bc3e41570d9..41c70e6cc774 100644 --- a/managed/node-agent/app/task/module/systemd.go +++ b/managed/node-agent/app/task/module/systemd.go @@ -16,7 +16,7 @@ func IsUserSystemd(username, serverName string) (bool, error) { if err != nil { return false, err } - if !strings.HasSuffix(serverName, ".service") { + if !strings.HasSuffix(serverName, ".service") && !strings.HasSuffix(serverName, ".timer") { serverName = serverName + ".service" } path := filepath.Join(info.User.HomeDir, ".config/systemd/user", serverName) @@ -30,17 +30,43 @@ func IsUserSystemd(username, serverName string) (bool, error) { return false, err } -func ControlServerCmd(username, serverName, controlType string) (string, error) { +func getUserOptionForUserLevel(username, serverName string) string { userOption := "" if username != "" { yes, err := IsUserSystemd(username, serverName) if err != nil { - return "", err + return "" } if yes { userOption = "--user " } } + + return userOption +} + +func EnableSystemdUnit(username, serverName string) string { + userOption := getUserOptionForUserLevel(username, serverName) + return fmt.Sprintf( + "systemctl %sdaemon-reload && systemctl %senable %s", + userOption, + userOption, + serverName, + ) +} + +func StartSystemdUnit(username, serverName string) string { + userOption := getUserOptionForUserLevel(username, serverName) + return fmt.Sprintf("systemctl %sstart %s", userOption, serverName) +} + +func StopSystemdUnit(username, serverName string) string { + userOption := getUserOptionForUserLevel(username, serverName) + return fmt.Sprintf("systemctl %s stop %s", userOption, serverName) +} + +func ControlServerCmd(username, serverName, controlType string) (string, error) { + userOption := getUserOptionForUserLevel(username, serverName) return fmt.Sprintf( "systemctl %sdaemon-reload && systemctl %senable %s && systemctl %s%s %s", userOption, diff --git a/managed/node-agent/app/task/server_control.go b/managed/node-agent/app/task/server_control.go index 20540387fd35..eccd836f560b 100644 --- a/managed/node-agent/app/task/server_control.go +++ b/managed/node-agent/app/task/server_control.go @@ -19,11 +19,16 @@ type ServerControlHandler struct { taskStatus *atomic.Value param *pb.ServerControlInput username string + logOut util.Buffer } // ServerControlHandler returns a new instance of ServerControlHandler. func NewServerControlHandler(param *pb.ServerControlInput, username string) *ServerControlHandler { - return &ServerControlHandler{param: param, username: username} + return &ServerControlHandler{ + param: param, + username: username, + logOut: util.NewBuffer(module.MaxBufferCapacity), + } } // CurrentTaskStatus implements the AsyncTask method. @@ -41,22 +46,21 @@ func (handler *ServerControlHandler) String() string { func (handler *ServerControlHandler) Handle( ctx context.Context, ) (*pb.DescribeTaskResponse, error) { - var shellTask *ShellTask if handler.param.GetNumVolumes() > 0 { cmd := "df | awk '{{print $6}}' | egrep '^/mnt/d[0-9]+' | wc -l" util.FileLogger().Infof(ctx, "Running command %v", cmd) - shellTask = NewShellTaskWithUser( - handler.String(), + cmdInfo, err := module.RunShellCmd( + ctx, handler.username, - util.DefaultShell, - []string{"-c", cmd}, + "getNumVolumes", + cmd, + handler.logOut, ) - status, err := shellTask.Process(ctx) if err != nil { util.FileLogger().Errorf(ctx, "Server control failed in %v - %s", cmd, err.Error()) return nil, err } - count, err := strconv.Atoi(strings.TrimSpace(status.Info.String())) + count, err := strconv.Atoi(strings.TrimSpace(cmdInfo.StdOut.String())) if err != nil { util.FileLogger(). Errorf(ctx, "Failed to parse output of command %v - %s", cmd, err.Error()) @@ -79,13 +83,13 @@ func (handler *ServerControlHandler) Handle( } if yes { lingerCmd := fmt.Sprintf("loginctl enable-linger %s", handler.username) - shellTask = NewShellTaskWithUser( - handler.String(), + _, err := module.RunShellCmd( + ctx, handler.username, - util.DefaultShell, - []string{"-c", lingerCmd}, + "loginctl enable-linger", + lingerCmd, + handler.logOut, ) - _, err = shellTask.Process(ctx) if err != nil { util.FileLogger(). Errorf(ctx, "Server control failed in %v - %s", lingerCmd, err.Error()) @@ -102,14 +106,8 @@ func (handler *ServerControlHandler) Handle( util.FileLogger().Errorf(ctx, "Failed to get server control command - %s", err.Error()) return nil, err } - shellTask = NewShellTaskWithUser( - handler.String(), - handler.username, - util.DefaultShell, - []string{"-c", cmd}, - ) util.FileLogger().Infof(ctx, "Running command %v", cmd) - _, err = shellTask.Process(ctx) + _, err = module.RunShellCmd(ctx, handler.username, "serverControl", cmd, handler.logOut) if err != nil { util.FileLogger().Errorf(ctx, "Server control failed in %v - %s", cmd, err.Error()) return nil, err diff --git a/managed/node-agent/app/task/server_gflags_task.go b/managed/node-agent/app/task/server_gflags_task.go index 994d802a05a9..a0e180a27c6e 100644 --- a/managed/node-agent/app/task/server_gflags_task.go +++ b/managed/node-agent/app/task/server_gflags_task.go @@ -3,8 +3,8 @@ package task import ( - "bytes" "context" + "fmt" "io/fs" "node-agent/app/task/module" pb "node-agent/generated/service" @@ -12,7 +12,6 @@ import ( "path/filepath" "strconv" "strings" - "sync/atomic" ) const ( @@ -26,10 +25,9 @@ var ( ) type ServerGflagsHandler struct { - taskStatus *atomic.Value - param *pb.ServerGFlagsInput - username string - logOut util.Buffer + param *pb.ServerGFlagsInput + username string + logOut util.Buffer } // NewServerGflagsHandler returns a new instance of ServerControlHandler. @@ -37,7 +35,7 @@ func NewServerGflagsHandler(param *pb.ServerGFlagsInput, username string) *Serve return &ServerGflagsHandler{ param: param, username: username, - logOut: util.NewBuffer(MaxBufferCapacity), + logOut: util.NewBuffer(module.MaxBufferCapacity), } } @@ -60,31 +58,25 @@ func (handler *ServerGflagsHandler) postmasterCgroupPath(ctx context.Context) (s return "", err } handler.logOut.WriteLine("Determining cgroup version") - cmd, err := module.NewCommandWithUser( - "DetermineCgroupVersion", - handler.username, - "stat", - []string{"-fc", "%%T", "/sys/fs/cgroup/"}, - ).Create(ctx) - if err != nil { - return "", err + cmdInfo := &module.CommandInfo{ + User: handler.username, + Desc: "DetermineCgroupVersion", + Cmd: "stat", + Args: []string{"-fc", "%T", "/sys/fs/cgroup/"}, + StdOut: util.NewBuffer(module.MaxBufferCapacity), } - buffer := &bytes.Buffer{} - cmd.Stdout = buffer - err = cmd.Run() + err = cmdInfo.RunCmd(ctx) if err != nil { return "", err } userID := strconv.Itoa(int(userInfo.UserID)) postmasterCgroupPath := "/sys/fs/cgroup/memory/ysql" - stdout := strings.TrimSpace(buffer.String()) + stdout := strings.TrimSpace(cmdInfo.StdOut.String()) if stdout == "cgroup2fs" { postmasterCgroupPath = filepath.Join( - "/sys/fs/cgroup/user.slice/user-", - userID, - ".slice/user@", - userID, - ".service/ysql") + fmt.Sprintf("user.slice/user-%s.slice", userID), + fmt.Sprintf("user@%s.service", userID), + "ysql") } return postmasterCgroupPath, nil } @@ -109,16 +101,13 @@ func (handler *ServerGflagsHandler) Handle( } } if len(toDeletePaths) > 0 { - command := module.NewCommandWithUser( - "DeleteMasterState", - handler.username, - "rm", - []string{"-rf", strings.Join(toDeletePaths, " ")}, - ) - cmd, err := command.Create(ctx) - if err == nil { - err = cmd.Run() + cmdInfo := &module.CommandInfo{ + User: handler.username, + Desc: "DeleteMasterState", + Cmd: "rm", + Args: []string{"-rf", strings.Join(toDeletePaths, " ")}, } + err := cmdInfo.RunCmd(ctx) if err != nil { util.FileLogger(). Errorf(ctx, "Failed to delete master paths %v: %v", toDeletePaths, err) @@ -133,7 +122,7 @@ func (handler *ServerGflagsHandler) Handle( if err != nil { return nil, err } - processedGflags := map[string]any{} + processedGflags = map[string]string{} for k, v := range gflags { if k == "postmaster_cgroup" { processedGflags["postmaster_cgroup"] = path diff --git a/managed/node-agent/app/task/setup_cgroups.go b/managed/node-agent/app/task/setup_cgroups.go new file mode 100644 index 000000000000..fa6b57e14ebb --- /dev/null +++ b/managed/node-agent/app/task/setup_cgroups.go @@ -0,0 +1,133 @@ +// Copyright (c) YugaByte, Inc. + +package task + +import ( + "context" + "errors" + "fmt" + "io/fs" + "node-agent/app/task/helpers" + "node-agent/app/task/module" + pb "node-agent/generated/service" + "node-agent/util" + "path/filepath" + "strconv" + "strings" +) + +type SetupCgroupHandler struct { + param *pb.SetupCGroupInput + username string + logOut util.Buffer +} + +func NewSetupCgroupHandler(param *pb.SetupCGroupInput, username string) *SetupCgroupHandler { + return &SetupCgroupHandler{ + param: param, + username: username, + logOut: util.NewBuffer(module.MaxBufferCapacity), + } +} + +// CurrentTaskStatus implements the AsyncTask method. +func (h *SetupCgroupHandler) CurrentTaskStatus() *TaskStatus { + return &TaskStatus{ + Info: h.logOut, + ExitStatus: &ExitStatus{}, + } +} + +func (h *SetupCgroupHandler) String() string { + return "Setup cGroup Task" +} + +func (h *SetupCgroupHandler) Handle(ctx context.Context) (*pb.DescribeTaskResponse, error) { + util.FileLogger().Info(ctx, "Starting setup cGroup handler.") + + // 1) Retrieve OS information. + osInfo, err := helpers.GetOSInfo() + if err != nil { + err := errors.New("error retrieving OS information") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // 2) figure out home dir + home := "" + if h.param.GetYbHomeDir() != "" { + home = h.param.GetYbHomeDir() + } else { + err := errors.New("ybHomeDir is required") + util.FileLogger().Error(ctx, err.Error()) + return nil, err + } + + // Setup cGroups for rhel:9 deployments + if helpers.IsRhel9(osInfo) { + h.logOut.WriteLine("Determining cgroup version") + cmdInfo := &module.CommandInfo{ + User: h.username, + Desc: "DetermineCgroupVersion", + Cmd: "stat", + Args: []string{"-fc", "%T", "/sys/fs/cgroup/"}, + StdOut: util.NewBuffer(module.MaxBufferCapacity), + } + util.FileLogger().Infof(ctx, "Running command %v", cmdInfo) + err = cmdInfo.RunCmd(ctx) + if err != nil { + return nil, err + } + + userInfo, _ := util.UserInfo(h.username) + stdout := strings.TrimSpace(cmdInfo.StdOut.String()) + userID := strconv.Itoa(int(userInfo.UserID)) + cGroupPath := "memory/ysql" + memMax := "memory.limit_in_bytes" + memSwapMap := "memory.memsw.limit_in_bytes" + + if stdout == "cgroup2fs" { + cGroupPath = filepath.Join( + fmt.Sprintf("user.slice/user-%s.slice", userID), + fmt.Sprintf("user@%s.service", userID), + "ysql") + memMax = "memory.max" + memSwapMap = "memory.swap.max" + } + + cGroupServiceContext := map[string]any{ + "cgroup_path": cGroupPath, + "mem_max": memMax, + "mem_swap_max": memSwapMap, + "pg_max_mem_mb": h.param.GetPgMaxMemMb(), + } + + h.logOut.WriteLine("Configuring cgroup systemd unit") + // Copy yb-ysql-cgroup.service. + _ = module.CopyFile( + ctx, + cGroupServiceContext, + filepath.Join(ServerTemplateSubpath, "yb-ysql-cgroup.service"), + filepath.Join(home, SystemdUnitPath, "yb-ysql-cgroup.service"), + fs.FileMode(0755), + ) + + cmd, err := module.ControlServerCmd( + h.username, + "yb-ysql-cgroup.service", + "start", + ) + if err != nil { + util.FileLogger().Errorf(ctx, "Failed to get server control command - %s", err.Error()) + return nil, err + } + util.FileLogger().Infof(ctx, "Running command %v", cmd) + _, err = module.RunShellCmd(ctx, h.username, "serverControl", cmd, h.logOut) + if err != nil { + util.FileLogger().Errorf(ctx, "Server control failed in %v - %s", cmd, err.Error()) + return nil, err + } + } + + return nil, nil +} diff --git a/managed/node-agent/app/task/shell_task.go b/managed/node-agent/app/task/shell_task.go index 5a1d2eebbab6..ad6776e8330c 100644 --- a/managed/node-agent/app/task/shell_task.go +++ b/managed/node-agent/app/task/shell_task.go @@ -21,12 +21,6 @@ import ( funk "github.com/thoas/go-funk" ) -const ( - // MaxBufferCapacity is the max number of bytes allowed in the buffer - // before truncating the first bytes. - MaxBufferCapacity = 1000000 -) - const ( mountPointsVolume = "mount_points_volume" mountPointsWritable = "mount_points_writable" @@ -49,9 +43,7 @@ const ( // ShellTask handles command execution using module.Command. type ShellTask struct { // Name of the task. - command *module.Command - stdout util.Buffer - stderr util.Buffer + cmdInfo *module.CommandInfo exitCode *atomic.Value } @@ -63,54 +55,54 @@ func NewShellTask(name string, cmd string, args []string) *ShellTask { // NewShellTaskWithUser returns a shell task executor. func NewShellTaskWithUser(name string, user string, cmd string, args []string) *ShellTask { return &ShellTask{ - command: module.NewCommandWithUser(name, user, cmd, args), + cmdInfo: &module.CommandInfo{ + User: user, + Desc: name, + Cmd: cmd, + Args: args, + StdOut: util.NewBuffer(module.MaxBufferCapacity), + StdErr: util.NewBuffer(module.MaxBufferCapacity), + }, exitCode: &atomic.Value{}, - stdout: util.NewBuffer(MaxBufferCapacity), - stderr: util.NewBuffer(MaxBufferCapacity), } } // TaskName returns the name of the shell task. func (s *ShellTask) TaskName() string { - return s.command.Name() + return s.cmdInfo.Desc } // Process runs the the command Task. func (s *ShellTask) Process(ctx context.Context) (*TaskStatus, error) { - util.FileLogger().Debugf(ctx, "Starting the command - %s", s.command.Name()) - taskStatus := &TaskStatus{Info: s.stdout, ExitStatus: &ExitStatus{Code: 1, Error: s.stderr}} - cmd, err := s.command.Create(ctx) - if err != nil { - util.FileLogger(). - Errorf(ctx, "Command creation for %s failed - %s", s.command.Name(), err.Error()) - return taskStatus, err + util.FileLogger().Debugf(ctx, "Starting the command - %s", s.cmdInfo.Desc) + taskStatus := &TaskStatus{ + Info: s.cmdInfo.StdOut, + ExitStatus: &ExitStatus{Code: 1, Error: s.cmdInfo.StdErr}, } - cmd.Stdout = s.stdout - cmd.Stderr = s.stderr if util.FileLogger().IsDebugEnabled() { - redactedArgs := s.command.RedactCommandArgs() + redactedArgs := s.cmdInfo.RedactCommandArgs() util.FileLogger(). - Debugf(ctx, "Running command %s with args %v", s.command.Cmd(), redactedArgs) + Debugf(ctx, "Running command %s with args %v", s.cmdInfo.Cmd, redactedArgs) } - err = cmd.Run() + err := s.cmdInfo.RunCmd(ctx) if err == nil { - taskStatus.Info = s.stdout + taskStatus.Info = s.cmdInfo.StdOut taskStatus.ExitStatus.Code = 0 if util.FileLogger().IsDebugEnabled() { util.FileLogger(). - Debugf(ctx, "Command %s executed successfully - %s", s.command.Name(), s.stdout.String()) + Debugf(ctx, "Command %s executed successfully - %s", s.cmdInfo.Desc, s.cmdInfo.StdOut.String()) } } else { - taskStatus.ExitStatus.Error = s.stderr + taskStatus.ExitStatus.Error = s.cmdInfo.StdErr if exitErr, ok := err.(*exec.ExitError); ok { taskStatus.ExitStatus.Code = exitErr.ExitCode() } - if util.FileLogger().IsDebugEnabled() && s.stdout.Len() > 0 { + if util.FileLogger().IsDebugEnabled() && s.cmdInfo.StdOut.Len() > 0 { util.FileLogger(). - Debugf(ctx, "Output for failed command %s - %s", s.command.Name(), s.stdout.String()) + Debugf(ctx, "Output for failed command %s - %s", s.cmdInfo.Desc, s.cmdInfo.StdOut.String()) } - errMsg := fmt.Sprintf("%s: %s", err.Error(), s.stderr.String()) - util.FileLogger().Errorf(ctx, "Command %s execution failed - %s", s.command.Name(), errMsg) + errMsg := fmt.Sprintf("%s: %s", err.Error(), s.cmdInfo.StdErr.String()) + util.FileLogger().Errorf(ctx, "Command %s execution failed - %s", s.cmdInfo.Desc, errMsg) } s.exitCode.Store(taskStatus.ExitStatus.Code) return taskStatus, err @@ -127,21 +119,21 @@ func (s *ShellTask) CurrentTaskStatus() *TaskStatus { v := s.exitCode.Load() if v == nil { return &TaskStatus{ - Info: s.stdout, + Info: s.cmdInfo.StdOut, } } return &TaskStatus{ - Info: s.stdout, + Info: s.cmdInfo.StdOut, ExitStatus: &ExitStatus{ Code: v.(int), - Error: s.stderr, + Error: s.cmdInfo.StdErr, }, } } // String implements the AsyncTask method. func (s *ShellTask) String() string { - return s.command.Name() + return s.cmdInfo.Desc } // Result returns the result. diff --git a/managed/node-agent/app/task/shell_task_test.go b/managed/node-agent/app/task/shell_task_test.go index 311fcf30b6d3..f554ef74b950 100644 --- a/managed/node-agent/app/task/shell_task_test.go +++ b/managed/node-agent/app/task/shell_task_test.go @@ -25,7 +25,7 @@ func TestShellTaskProcess(t *testing.T) { } if result.Info.String() != "test\n" { - t.Fatalf("Unexpected result") + t.Fatalf("Unexpected result: %s", result.Info.String()) } } diff --git a/managed/node-agent/proto/server.proto b/managed/node-agent/proto/server.proto index 2da935689fa0..8c317a2c7ff0 100644 --- a/managed/node-agent/proto/server.proto +++ b/managed/node-agent/proto/server.proto @@ -58,6 +58,10 @@ message SubmitTaskRequest { ConfigureServiceInput configureServiceInput = 6; InstallSoftwareInput installSoftwareInput = 7; ServerGFlagsInput serverGFlagsInput = 8; + InstallYbcInput installYbcInput = 9; + ConfigureServerInput configureServerInput = 10; + InstallOtelCollectorInput installOtelCollectorInput = 11; + SetupCGroupInput setupCGroupInput = 12; } } @@ -79,6 +83,10 @@ message DescribeTaskResponse { ConfigureServiceOutput configureServiceOutput = 6; InstallSoftwareOutput installSoftwareOutput = 7; ServerGFlagsOutput serverGFlagsOutput = 8; + InstallYbcOutput installYbcOutput = 9; + ConfigureServerOutput configureServerOutput = 10; + InstallOtelCollectorOutput installOtelCollectorOutput = 11; + SetupCGroupOutput setupCGroupOutput = 12; } } diff --git a/managed/node-agent/proto/yb.proto b/managed/node-agent/proto/yb.proto index 0edd8bc7ba0b..e6ea223f2705 100644 --- a/managed/node-agent/proto/yb.proto +++ b/managed/node-agent/proto/yb.proto @@ -111,6 +111,7 @@ message InstallSoftwareInput { string iTestS3PackagePath = 9; string remoteTmp = 10; string ybHomeDir = 11; + repeated string symLinkFolders = 12; } message InstallSoftwareOutput { @@ -126,3 +127,51 @@ message ServerGFlagsInput { message ServerGFlagsOutput { } + +message InstallYbcInput { + string ybcPackage = 1; + string remoteTmp = 2; + string ybHomeDir = 3; + repeated string mountPoints = 4; +} + +message InstallYbcOutput { + int32 pid = 1; +} + +message ConfigureServerInput { + string remoteTmp = 1; + string ybHomeDir = 2; + repeated string processes = 3; + repeated string mountPoints = 4; + uint32 numCoresToKeep = 5; +} + +message ConfigureServerOutput { + int32 pid = 1; +} + +message InstallOtelCollectorInput { + string ybHomeDir = 1; + repeated string mountPoints = 2; + string otelColPackagePath = 3; + string ycqlAuditLogLevel = 4; + string otelColConfigFile = 5; + string otelColAwsAccessKey = 6; + string otelColAwsSecretKey = 7; + string otelColGcpCredsFile = 8; + string remoteTmp = 9; +} + +message InstallOtelCollectorOutput { + int32 pid = 1; +} + +message SetupCGroupInput { + string ybHomeDir = 1; + uint32 pgMaxMemMb = 2; +} + +message SetupCGroupOutput { + int32 pid = 1; +} diff --git a/managed/node-agent/resources/preflight_check.sh b/managed/node-agent/resources/preflight_check.sh index e874fc06f0e3..d46c6b3c8053 100755 --- a/managed/node-agent/resources/preflight_check.sh +++ b/managed/node-agent/resources/preflight_check.sh @@ -183,7 +183,7 @@ check_ntp_synchronization() { else update_result_json "ntp_service_status" false fi - if [[ $skew_ms -lt 400 ]]; then + if awk "BEGIN{exit !(${skew_ms} < 400)}"; then update_result_json "ntp_skew" true else update_result_json "ntp_skew" false diff --git a/managed/node-agent/resources/templates/server/clean_cores.sh.j2 b/managed/node-agent/resources/templates/server/clean_cores.sh.j2 new file mode 100755 index 000000000000..66badb2a88da --- /dev/null +++ b/managed/node-agent/resources/templates/server/clean_cores.sh.j2 @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +# +# Copyright 2019 YugaByte, Inc. and Contributors +# +# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt + +set -euo pipefail + +print_help() { + cat <] +Options: + -n, --num_corestokeep + number of latest core files to keep (default: 5). + -h, --help + Show usage +EOT +} + +num_cores_to_keep={{ yb_num_clean_cores_to_keep }} +YB_CRASH_DIR=({{ yb_cores_dir }}/) +while [[ $# -gt 0 ]]; do + case "$1" in + -n|--num_corestokeep) + num_cores_to_keep=$2 + shift + ;; + -h|--help) + print_help + exit 0 + ;; + *) + echo "Invalid option: $1" >&2 + print_help + exit 1 + esac + shift +done + +USER=${USER:-$(whoami)} +if [[ "$(id -u)" != "0" && "$USER" != "yugabyte" ]]; then + echo "This script must be run as root or yugabyte" + exit 1 +fi + +find_core_files="find $YB_CRASH_DIR -name 'core_*' -type f -printf '%T+\t%p\n' | sort | + awk '{print \$2}'" +num_core_files=$(eval $find_core_files | wc -l) +if [ $num_core_files -gt $num_cores_to_keep ]; then + core_files_to_delete=$(eval $find_core_files | head -n$(($num_core_files - $num_cores_to_keep))) + for file in $core_files_to_delete; do + echo "Deleting core file $file" + rm $file + done +fi diff --git a/managed/node-agent/resources/templates/server/clock-sync.sh.j2 b/managed/node-agent/resources/templates/server/clock-sync.sh.j2 new file mode 100755 index 000000000000..d019511d378a --- /dev/null +++ b/managed/node-agent/resources/templates/server/clock-sync.sh.j2 @@ -0,0 +1,128 @@ +#!/bin/bash + +SCRIPT_NAME=$(basename "$0") + +################### Config ################### +is_acceptable_clock_skew_wait_enabled="{{ is_acceptable_clock_skew_wait_enabled | default(true) }}" # Whether check clock skew +acceptable_clock_skew_sec="{{ acceptable_clock_skew_sec | default(0.5) }}" # In seconds +max_tries="{{ acceptable_clock_skew_max_tries | default(120) }}" # Maximum number of tries before returning failure +retry_wait_time_s=1 # How long waits before retry in seconds + +if [[ "$is_acceptable_clock_skew_wait_enabled" != true && "$is_acceptable_clock_skew_wait_enabled" != "True" ]]; then + echo "Wait for clock skew to go below the acceptable threshold is disabled. Returning success." + exit 0 +fi + +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +readonly PYTHON_EXECUTABLES=("python" "python3" "python3.11" "python3.10" "python3.9" "python3.8" "python3.7" "python3.6" "python3.12" "python2") +PYTHON_EXECUTABLE="" +set_python_executable() { + for py_executable in "${PYTHON_EXECUTABLES[@]}"; do + if which "$py_executable" > /dev/null 2>&1; then + PYTHON_EXECUTABLE="$py_executable" + export PYTHON_EXECUTABLE + return + fi + done +} + +check_clock_sync_chrony() { + # if chrond is restarted, tracking will return all 0s + set_python_executable + chrony_tracking="$(chronyc tracking)" + if [[ $? -ne 0 ]]; then + echo "`chronyc tracking` failed to execute" + return 1 + fi + if [[ $(echo "${chrony_tracking}" | awk "/Reference ID/ {print \$4}") == "00000000" ]]; then + echo "chrony is not initialized" + return 1 + fi + local skew=$(echo "${chrony_tracking}" | awk "/System time/ {print \$4}") + local dispersion=$(echo "${chrony_tracking}" | awk "/Root dispersion/ {print \$4}") + local delay=$(echo "${chrony_tracking}" | awk "/Root delay/ {print \$4}") + local clock_error="" + if [[ -z "${PYTHON_EXECUTABLE}" ]]; then + clock_error=${skew} + else + clock_error=$(${PYTHON_EXECUTABLE} -c "print(${skew} + ${dispersion} + (0.5 * ${delay}))") + fi + + if awk "BEGIN {exit !($clock_error < $acceptable_clock_skew_sec)}"; then + echo "Clock skew is within acceptable limits: $skew ms" + return 0 + else + echo "Clock skew exceeds acceptable limits: $skew ms" + return 1 + fi +} + +check_clock_sync_ntpd() { + set_python_executable + local skew=$(ntpq -p | awk "\$1 ~ /^\*/ {print \$9}") + local acceptable_skew_ms=$(${PYTHON_EXECUTABLE} -c "print(${acceptable_clock_skew_sec} * 1000)") + + if [[ -z "$skew" ]]; then + echo "ntpd is not initialized" + return 1 + fi + + if awk "BEGIN{exit !(${skew} < ${acceptable_skew_ms})}"; then + echo "Clock skew is within acceptable limits: $skew ms" + return 0 + else + echo "Clock skew exceeds acceptable limits: $skew ms" + return 1 + fi +} + +check_clock_sync_timesyncd() { + synchronized=$(timedatectl status | grep "System clock synchronized" | awk "{print \$4}") + if [[ "${synchronized}" == "yes" ]]; then + echo "timesyncd reports clock is synchronized" + return 0 + else + echo "timesyncd clock is not synchronized" + return 1 + fi +} + +systemd_loaded() { + active=$(systemctl show --no-pager $1 | grep "ActiveState" | cut -d= -f2) + if [[ "${active}" == "active" ]]; then + return 0 + fi + return 1 +} + +iter=0 +while true; do + # If chrony is available, use it for clock sync. + if command_exists chronyc; then + check_clock_sync_chrony + res=$? + # If ntpd is available, use it for clock sync. + elif command_exists ntpd; then + check_clock_sync_ntpd + res=$? + elif systemd_loaded systemd-timesyncd; then + check_clock_sync_timesyncd + res=$? + else + echo "Chrony, NTPd, and timesyncd are not available, but required." + exit 1 + fi + ((iter++)) + if [ $res -eq 0 ]; then + echo "Success! Clock skew is within acceptable limits." + exit 0 + fi + if [ $iter -ge $max_tries ]; then + echo "Failure! Maximum number of tries reached." + exit 1 + fi + sleep "$retry_wait_time_s" +done diff --git a/managed/node-agent/resources/templates/server/collect_metrics_wrapper.sh.j2 b/managed/node-agent/resources/templates/server/collect_metrics_wrapper.sh.j2 new file mode 100755 index 000000000000..25410acda81a --- /dev/null +++ b/managed/node-agent/resources/templates/server/collect_metrics_wrapper.sh.j2 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Copyright 2021 YugaByte, Inc. and Contributors +# +# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt + +set -euo pipefail + +collect_metrics_script=({{ yb_home_dir }}/bin/collect_metrics.sh) +filename=({{ yb_metrics_dir }}/node_metrics.prom) + +USER=${USER:-$(whoami)} +if [[ "$(id -u)" != "0" && "$USER" != "yugabyte" ]]; then + echo "This script must be run as root or yugabyte" + exit 1 +fi + +# Just call a script, generated and uploaded by health check process +if [ -f $collect_metrics_script ]; then + /bin/bash $collect_metrics_script -o file -f $filename +else + echo "Metric collection script $collect_metrics_script does not exist" + exit 1 +fi diff --git a/managed/node-agent/resources/templates/server/otel-collector.service b/managed/node-agent/resources/templates/server/otel-collector.service new file mode 100644 index 000000000000..a2634466300a --- /dev/null +++ b/managed/node-agent/resources/templates/server/otel-collector.service @@ -0,0 +1,40 @@ +[Unit] +Description=OpenTelemetry Collector +After=network.target network-online.target multi-user.target +# Disable restart limits, using RestartSec to rate limit restarts +StartLimitInterval=0 + +[Path] +PathExists={{yb_home_dir}}/otel-collector/otelcol-contrib +PathExists={{yb_home_dir}}/otel-collector/config.yml + +[Service] +{% if ansible_os_family == 'RedHat' and (ansible_distribution_major_version == '7' or (ansible_distribution == 'Amazon' and ansible_distribution_major_version == '2')) %} +User={{ user_name }} +Group={{ user_name }} +{% endif %} +# Start +ExecStart={{yb_home_dir}}/otel-collector/otelcol-contrib \ + --config=file:{{yb_home_dir}}/otel-collector/config.yml +Restart=always +RestartSec=5 +# Stop -> SIGTERM - 10s - SIGKILL (if not stopped) +KillMode=process +TimeoutStopFailureMode=terminate +KillSignal=SIGTERM +TimeoutStopSec=10 +FinalKillSignal=SIGKILL +# Logs +StandardOutput=syslog +StandardError=syslog +# ulimit +LimitCORE=infinity +LimitNOFILE=1048576 +LimitNPROC=12000 + +Environment="AWS_PROFILE=otel-collector" +Environment="GOOGLE_APPLICATION_CREDENTIALS={{yb_home_dir}}/otel-collector/gcp_creds" +Environment="HOME={{yb_home_dir}}" + +[Install] +WantedBy=default.target diff --git a/managed/node-agent/resources/templates/server/yb-server-ctl.sh.j2 b/managed/node-agent/resources/templates/server/yb-server-ctl.sh.j2 new file mode 100644 index 000000000000..496252c2e2e1 --- /dev/null +++ b/managed/node-agent/resources/templates/server/yb-server-ctl.sh.j2 @@ -0,0 +1,426 @@ +#!/usr/bin/env bash +# +# Copyright 2019 YugaByte, Inc. and Contributors +# +# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt + +set -euo pipefail +# Redirect stderr to syslog. +exec 2> >(logger -t $(basename $0)) + +readonly MOUNT_PATHS=({{ mount_paths }}) +readonly EXPECTED_USERNAME=({{ user_name }}) +readonly CORES_DIR=({{ yb_cores_dir }}) +{% raw %} +readonly NUM_MOUNTS=${#MOUNT_PATHS[@]} +readonly SYSTEMD_OPTION={{systemd_option}} +{% endraw %} + +if ! [ -f /.dockerenv ] && [ "$(whoami)" != "$EXPECTED_USERNAME" ]; then + echo "Script must be run as user: $EXPECTED_USERNAME" + exit -1 +fi + +print_help() { + cat < OR + ${0##*/} clean-instance +Daemons: + master + tserver + controller + otel-collector +Commands: + create - Start the YB process on this node in cluster creation node (only applicable for + master) + status - Report the status of the YB process + start - Start the YB daemon on this node + stop - Stop the YB daemon on this node + clean - Remove all daemon data from this node + clean-no-conf - Remove all daemon data from this node, except configurations + clean-logs - Remove all daemon logs + -h, --help - Show usage +EOT +} + +check_pid_file() { + if [[ ! -f ${daemon_pid_file} ]]; then + print_err_out "Error: PID file does not exist: ${daemon_pid_file}, process is "\ + "probably not running" + exit 1 + fi +} + +exit_on_running() { + if [[ $(check_running) -eq 0 ]]; then + print_err_out "yb-$daemon already running" + exit 0 + fi +} + +NO_PID_FILE=200 +# arg1 [OPTIONAL]: proc_pid -- the PID of the process, else, defaults to contents of daemon_pid_file +check_running() { + set +e + custom_proc_pid=${1:-} + proc_pid=$custom_proc_pid + if [[ -z $proc_pid ]]; then + proc_pid=$(cat ${daemon_pid_file} 2>/dev/null) + if [[ $? -ne 0 ]]; then + echo $NO_PID_FILE + return + fi + fi + set -e + kill -0 "$proc_pid" 2>/dev/null + kill_outcome=$? + # Workaround race condition between: + # 1) cron checking the file exists and succeeding + # 2) stop deleting the PID file and stopping the process + # 3) cron then trying to kill and cat the file, failing and then restarting the daemon + # If we searched for a PID file above and then we couldn't find a process to kill, then check if + # the PID file still exists: + # - No, then return 0 so we do not restart the process + # - Yes, then default to outcome of kill command. + if [[ $kill_outcome -ne 0 ]] && [[ -z $custom_proc_pid ]] && [[ ! -f ${daemon_pid_file} ]]; then + echo 0 + else + echo $kill_outcome + fi +} + +get_pid() { + cat ${daemon_pid_file} +} + +print_err_out() { + echo $1 | tee /dev/stderr +} + +# arg1: pid_to_wait_for -- the PID of the process to wait for +wait_pid() { + pid_to_wait_for=$1 + end_time=$(($SECONDS + 10)) + while [[ $SECONDS -lt $end_time ]]; do + if [[ $(check_running "$pid_to_wait_for") -eq 1 ]]; then + break + fi + print_err_out "Waiting on PID: $pid_to_wait_for" + sleep 1 + done +} + +wait_for_dir_or_exit() { + local dir_to_check=$1 + local end_time=$(($SECONDS + 10)) + while [[ $SECONDS -lt $end_time ]]; do + if test -d $dir_to_check; + then + return + else + echo "Waiting for $dir_to_check dir..." + sleep 1 + fi + done + # Exit if the directory never appeared. + exit 1 +} + +clean_data_paths() { + clean_conf_arg=${1:-true} + + set -x + for (( i=0; i
- + )"; for (const auto& [txn, txn_entry] : txn_locks_) { - std::lock_guard txn_lock(txn_entry.mutex); + UniqueLock txn_lock(txn_entry->mutex); const auto& locks = - locks_map == LocksMapType::kGranted ? txn_entry.granted_locks : txn_entry.waiting_locks; + locks_map == LocksMapType::kGranted ? txn_entry->granted_locks : txn_entry->waiting_locks; for (const auto& [subtxn_id, subtxn_locks] : locks) { for (const auto& [object_id, entry] : subtxn_locks) { out << "" @@ -518,9 +1109,9 @@ size_t ObjectLockManagerImpl::TEST_LocksSize(LocksMapType locks_map) const { std::lock_guard lock(global_mutex_); size_t size = 0; for (const auto& [txn, txn_entry] : txn_locks_) { - std::lock_guard txn_lock(txn_entry.mutex); + UniqueLock txn_lock(txn_entry->mutex); const auto& locks = - locks_map == LocksMapType::kGranted ? txn_entry.granted_locks : txn_entry.waiting_locks; + locks_map == LocksMapType::kGranted ? txn_entry->granted_locks : txn_entry->waiting_locks; for (const auto& [subtxn_id, subtxn_locks] : locks) { size += subtxn_locks.size(); } @@ -536,94 +1127,46 @@ size_t ObjectLockManagerImpl::TEST_WaitingLocksSize() const { return TEST_LocksSize(LocksMapType::kWaiting); } -void ObjectLockManagerImpl::DoReleaseTrackedLock( - const ObjectLockPrefix& object_id, const TrackedLockEntry& entry) { - // We don't pass an intents set to unlock so as to trigger notify on every lock release. It is - // necessary as two (or more) transactions could be holding a read lock and one of the txns - // could request a conflicting lock mode. And since conflicts with self should be ignored, we - // need to signal the cond variable on every release, else the lock release call from the other - // transaction wouldn't unblock the waiter. - DoUnlockSingleEntry(entry.locked_batch_entry, object_id, entry.state); - - entry.locked_batch_entry.ref_count -= entry.ref_count; - if (entry.locked_batch_entry.ref_count == 0) { - locks_.erase(object_id); - free_lock_entries_.push_back(&entry.locked_batch_entry); +std::unordered_map + ObjectLockManagerImpl::TEST_GetLockStateMapForTxn(const TransactionId& txn) const { + TrackedTxnLockEntryPtr txn_entry; + { + std::lock_guard lock(global_mutex_); + auto txn_it = txn_locks_.find(txn); + if (txn_it == txn_locks_.end()) { + return {}; + } + txn_entry = txn_it->second; } + UniqueLock txn_lock(txn_entry->mutex); + return txn_entry->existing_states; } -void ObjectLockManagerImpl::AcquiredLock( - const LockBatchEntry& lock_entry, TrackedTransactionLockEntry& txn, - SubTransactionId subtxn_id, const OwnerAsString& owner_as_string, - LocksMapType locks_map) { - TRACE_FUNC(); - VLOG_WITH_FUNC(1) << "lock_entry: " << lock_entry.ToString() - << ", owner: " << owner_as_string(); - auto delta = IntentTypeSetAdd(lock_entry.intent_types); - - std::lock_guard txn_lock(txn.mutex); - auto& locks = locks_map == LocksMapType::kGranted ? txn.granted_locks : txn.waiting_locks; - auto& subtxn_locks = locks[subtxn_id]; - auto it = subtxn_locks.find(lock_entry.key); - if (it == subtxn_locks.end()) { - it = subtxn_locks.emplace(lock_entry.key, TrackedLockEntry(*lock_entry.locked)).first; - } - it->second.state += delta; - ++it->second.ref_count; -} +ObjectLockManager::ObjectLockManager(ThreadPool* thread_pool, server::RpcServerBase& server) + : impl_(std::make_unique(thread_pool, server)) {} -void ObjectLockManagerImpl::ReleasedLock( - const LockBatchEntry& lock_entry, TrackedTransactionLockEntry& txn, - SubTransactionId subtxn_id, const OwnerAsString& owner_as_string, - LocksMapType locks_map) { - TRACE_FUNC(); - VLOG_WITH_FUNC(1) << "lock_entry: " << lock_entry.ToString() - << ", owner: " << owner_as_string(); - auto delta = IntentTypeSetAdd(lock_entry.intent_types); +ObjectLockManager::~ObjectLockManager() = default; - std::lock_guard txn_lock(txn.mutex); - auto& locks = locks_map == LocksMapType::kGranted ? txn.granted_locks : txn.waiting_locks; - auto subtxn_itr = locks.find(subtxn_id); - if (subtxn_itr == locks.end()) { - LOG_WITH_FUNC(DFATAL) << "No locks found for " << owner_as_string() - << ", cannot release lock on " << AsString(lock_entry.key); - return; - } - auto& subtxn_locks = subtxn_itr->second; - auto it = subtxn_locks.find(lock_entry.key); - if (it == subtxn_locks.end()) { - LOG_WITH_FUNC(DFATAL) << "No lock found for " << owner_as_string() << " on " - << AsString(lock_entry.key) << ", cannot release"; - } - auto& entry = it->second; - entry.state -= delta; - --entry.ref_count; - if (entry.state == 0) { - DCHECK_EQ(entry.ref_count, 0) - << "TrackedLockEntry::ref_count for key " << AsString(lock_entry.key) << " expected to " - << "have been 0 here. This could lead to faulty tracking of acquired/waiting object locks " - << "and also issues with garbage collection of free lock entries in ObjectLockManager."; - subtxn_locks.erase(it); - } +void ObjectLockManager::Lock(LockData&& data) { + impl_->Lock(std::move(data)); } -ObjectLockManager::ObjectLockManager() : impl_(std::make_unique()) { } - -ObjectLockManager::~ObjectLockManager() = default; +void ObjectLockManager::Unlock( + const ObjectLockOwner& object_lock_owner, Status resume_with_status) { + impl_->Unlock(object_lock_owner, resume_with_status); +} -bool ObjectLockManager::Lock( - LockBatchEntries& key_to_intent_type, CoarseTimePoint deadline, - const ObjectLockOwner& object_lock_owner) { - return impl_->Lock(key_to_intent_type, deadline, object_lock_owner); +void ObjectLockManager::Poll() { + impl_->Poll(); } -void ObjectLockManager::Unlock( - const std::vector& lock_entry_keys) { - impl_->Unlock(lock_entry_keys); +void ObjectLockManager::Start( + docdb::LocalWaitingTxnRegistry* waiting_txn_registry) { + return impl_->Start(waiting_txn_registry); } -void ObjectLockManager::Unlock(const ObjectLockOwner& object_lock_owner) { - impl_->Unlock(object_lock_owner); +void ObjectLockManager::Shutdown() { + impl_->Shutdown(); } void ObjectLockManager::DumpStatusHtml(std::ostream& out) { @@ -638,4 +1181,9 @@ size_t ObjectLockManager::TEST_WaitingLocksSize() const { return impl_->TEST_WaitingLocksSize(); } +std::unordered_map + ObjectLockManager::TEST_GetLockStateMapForTxn(const TransactionId& txn) const { + return impl_->TEST_GetLockStateMapForTxn(txn); +} + } // namespace yb::docdb diff --git a/src/yb/docdb/object_lock_manager.h b/src/yb/docdb/object_lock_manager.h index 02267f03c01c..25da8c5f024e 100644 --- a/src/yb/docdb/object_lock_manager.h +++ b/src/yb/docdb/object_lock_manager.h @@ -13,18 +13,27 @@ #pragma once -#include -#include -#include - #include "yb/docdb/docdb_fwd.h" -#include "yb/docdb/object_lock_data.h" +#include "yb/docdb/lock_util.h" + +#include "yb/server/server_fwd.h" + +#include "yb/util/status_callback.h" + +namespace yb { + +class ThreadPool; -#include "yb/util/monotime.h" -#include "yb/util/ref_cnt_buffer.h" -#include "yb/util/tostring.h" +namespace docdb { -namespace yb::docdb { +struct LockData { + DetermineKeysToLockResult key_to_lock; + CoarseTimePoint deadline; + ObjectLockOwner object_lock_owner; + TabletId status_tablet; + MonoTime start_time; + StdStatusCallback callback; +}; // Helper struct used for keying table/object locks of a transaction. struct TrackedLockEntryKey { @@ -50,32 +59,32 @@ class ObjectLockManagerImpl; // server maintains an instance of the ObjectLockManager. class ObjectLockManager { public: - ObjectLockManager(); + ObjectLockManager(ThreadPool* thread_pool, server::RpcServerBase& server); ~ObjectLockManager(); - // Attempt to lock a batch of keys and track the lock against the given object_lock_owner key. The - // call may be blocked waiting for other conflicting locks to be released. If the entries don't - // exist, they are created. On success, the lock state is exists in-memory until an explicit - // release is called (or the process restarts). - // - // Returns false if was not able to acquire lock until deadline. - MUST_USE_RESULT bool Lock( - LockBatchEntries& key_to_intent_type, CoarseTimePoint deadline, - const ObjectLockOwner& object_lock_owner); - - // Release the batch of locks, if they were acquired at the first place. - void Unlock(const std::vector& lock_entry_keys); + // Attempt to lock a batch of keys and track the lock against data.object_lock_owner key. The + // callback is executed with failure if the locks aren't able to be acquired within the deadline. + void Lock(LockData&& data); // Release all locks held against the given object_lock_owner. - void Unlock(const ObjectLockOwner& object_lock_owner); + void Unlock(const ObjectLockOwner& object_lock_owner, Status resume_with_status); + + void Poll(); + + void Start(docdb::LocalWaitingTxnRegistry* waiting_txn_registry); + + void Shutdown(); void DumpStatusHtml(std::ostream& out); size_t TEST_GrantedLocksSize() const; size_t TEST_WaitingLocksSize() const; + std::unordered_map + TEST_GetLockStateMapForTxn(const TransactionId& txn) const; private: std::unique_ptr impl_; }; -} // namespace yb::docdb +} // namespace docdb +} // namespace yb diff --git a/src/yb/docdb/pgsql_operation.cc b/src/yb/docdb/pgsql_operation.cc index 633664f60849..1325c053b9aa 100644 --- a/src/yb/docdb/pgsql_operation.cc +++ b/src/yb/docdb/pgsql_operation.cc @@ -2221,6 +2221,8 @@ Result> PgsqlReadOperation::ExecuteSample() { // Current number of rows to skip before collecting next one for sample double rowstoskip = sampling_state.rowstoskip(); // Variables for the random numbers generator + SCHECK(sampling_state.has_rand_state(), InvalidArgument, + "Invalid sampling state, random state is missing"); YbgPrepareMemoryContext(); YbgReservoirState rstate = NULL; YbgSamplerCreate( diff --git a/src/yb/docdb/scan_choices.cc b/src/yb/docdb/scan_choices.cc index 1c11ad79c28e..bbd87ddb1c8a 100644 --- a/src/yb/docdb/scan_choices.cc +++ b/src/yb/docdb/scan_choices.cc @@ -933,7 +933,7 @@ ScanChoicesPtr ScanChoices::Create( // hash columns in a hash partitioned table. And the hash code column cannot be skip'ed without // skip'ing all hash columns as well. if (prefixlen != 0 && !valid_prefixlen) { - LOG(ERROR) + LOG(DFATAL) << "Prefix length: " << prefixlen << " is invalid for schema: " << "num_hash_cols: " << num_hash_cols << ", num_key_cols: " << num_key_cols; } diff --git a/src/yb/docdb/shared_lock_manager-test.cc b/src/yb/docdb/shared_lock_manager-test.cc index 18a704fd7efb..9e97a30a6bf3 100644 --- a/src/yb/docdb/shared_lock_manager-test.cc +++ b/src/yb/docdb/shared_lock_manager-test.cc @@ -202,8 +202,8 @@ TEST_F(SharedLockManagerTest, DumpKeys) { ASSERT_NOK(lb2.status()); ASSERT_STR_CONTAINS( lb2.status().ToString(), - "[{ key: 666F6F intent_types: [kStrongRead, kStrongWrite] existing_state: 0 }, " - "{ key: 626172 intent_types: [kStrongRead, kStrongWrite] existing_state: 0 }]"); + "[{ key: 666F6F intent_types: [kStrongRead, kStrongWrite] }, " + "{ key: 626172 intent_types: [kStrongRead, kStrongWrite] }]"); } } // namespace docdb diff --git a/src/yb/docdb/wait_queue.cc b/src/yb/docdb/wait_queue.cc index 3f172abecd24..99b1cfb4337e 100644 --- a/src/yb/docdb/wait_queue.cc +++ b/src/yb/docdb/wait_queue.cc @@ -2063,7 +2063,7 @@ class WaitQueue::Impl { } filter = [&res, &resume_status](const auto& waiter) { const auto& opt_serial_no = waiter->GetPgSessionRequestVersion(); - if (opt_serial_no && *opt_serial_no < res->pg_session_req_version) { + if (opt_serial_no && res.ok() && *opt_serial_no < res->pg_session_req_version) { resume_status = STATUS_EC_FORMAT( Expired, TransactionError(TransactionErrorCode::kDeadlock), "Couldn't acquire locks due to a potential deadlock"); diff --git a/src/yb/dockv/primitive_value.cc b/src/yb/dockv/primitive_value.cc index 48caabb079b9..ab59ad335933 100644 --- a/src/yb/dockv/primitive_value.cc +++ b/src/yb/dockv/primitive_value.cc @@ -174,7 +174,7 @@ std::string VarIntToString(const std::string& str_val) { VarInt varint; auto status = varint.DecodeFromComparable(str_val); if (!status.ok()) { - LOG(ERROR) << "Unable to decode varint: " << status.message().ToString(); + LOG(DFATAL) << "Unable to decode varint: " << status.message().ToString(); return ""; } return varint.ToString(); @@ -184,7 +184,7 @@ std::string DecimalToString(const std::string& str_val) { util::Decimal decimal; auto status = decimal.DecodeFromComparable(str_val); if (!status.ok()) { - LOG(ERROR) << "Unable to decode decimal"; + LOG(DFATAL) << "Unable to decode decimal"; return ""; } return decimal.ToString(); diff --git a/src/yb/dockv/schema_packing.cc b/src/yb/dockv/schema_packing.cc index 5c17d732d907..a7daab8325c6 100644 --- a/src/yb/dockv/schema_packing.cc +++ b/src/yb/dockv/schema_packing.cc @@ -33,7 +33,7 @@ #include "yb/util/fast_varint.h" #include "yb/util/flags/flag_tags.h" -DEFINE_test_flag(bool, dcheck_for_missing_schema_packing, false, +DEFINE_test_flag(bool, dcheck_for_missing_schema_packing, true, "Whether we use check failure for missing schema packing in debug builds"); namespace yb::dockv { diff --git a/src/yb/fs/fs_manager.cc b/src/yb/fs/fs_manager.cc index 30801bc9dba8..a02e3c98d2c6 100644 --- a/src/yb/fs/fs_manager.cc +++ b/src/yb/fs/fs_manager.cc @@ -921,7 +921,7 @@ void FsManager::DumpFileSystemTree(ostream& out) { std::vector objects; Status s = env_->GetChildren(root, &objects); if (!s.ok()) { - LOG(ERROR) << "Unable to list the fs-tree: " << s.ToString(); + LOG(DFATAL) << "Unable to list the fs-tree: " << s.ToString(); return; } diff --git a/src/yb/hnsw/hnsw-test.cc b/src/yb/hnsw/hnsw-test.cc index 37d82ba3a8b8..79b2a2cfed6f 100644 --- a/src/yb/hnsw/hnsw-test.cc +++ b/src/yb/hnsw/hnsw-test.cc @@ -14,14 +14,24 @@ #include "yb/hnsw/hnsw.h" #include "yb/hnsw/hnsw_block_cache.h" +#include "yb/rocksdb/cache.h" + +#include "yb/util/metrics.h" #include "yb/util/random_util.h" +#include "yb/util/size_literals.h" #include "yb/util/test_util.h" +#include "yb/util/thread_holder.h" #include "yb/util/tsan_util.h" #include "yb/vector_index/vector_index_fwd.h" #include "yb/vector_index/distance.h" #include "yb/vector_index/usearch_include_wrapper_internal.h" +using namespace std::chrono_literals; +using namespace yb::size_literals; + +METRIC_DEFINE_entity(table); + namespace yb::hnsw { using IndexImpl = unum::usearch::index_dense_gt; @@ -79,10 +89,14 @@ class YbHnswTest : public YBTest { } } - void VerifySearch(const Vector& query_vector, size_t max_results) { + void VerifySearch( + const Vector& query_vector, size_t max_results, YbHnswSearchContext* context = nullptr) { + if (!context) { + context = &context_; + } vector_index::VectorFilter filter = AcceptAllVectors(); auto usearch_results = index_.filtered_search(query_vector.data(), max_results, filter); - auto yb_hnsw_results = yb_hnsw_.Search(query_vector.data(), max_results, filter, context_); + auto yb_hnsw_results = yb_hnsw_.Search(query_vector.data(), max_results, filter, *context); ASSERT_EQ(usearch_results.count, yb_hnsw_results.size()); for (size_t j = 0; j != usearch_results.count; ++j) { std::decay_t expected( @@ -91,18 +105,26 @@ class YbHnswTest : public YBTest { } } - std::vector PrepareRandom(size_t num_vectors, size_t num_searches); + std::vector PrepareRandom(bool load, size_t num_vectors, size_t num_searches); Status InitYbHnsw(bool load); void TestPerf(); void TestSimple(bool load); + void TestRandom(bool load, size_t background_threads); size_t dimensions_ = 8; size_t max_vectors_ = 65536; std::mt19937_64 rng_{42}; unum::usearch::metric_punned_t metric_; IndexImpl index_; - BlockCachePtr block_cache_ = std::make_shared(*Env::Default()); + std::unique_ptr metric_registry_ = std::make_unique(); + MetricEntityPtr metric_entity_ = METRIC_ENTITY_table.Instantiate(metric_registry_.get(), "test"); + BlockCachePtr block_cache_ = std::make_shared( + *Env::Default(), + MemTracker::GetRootTracker()->FindOrCreateTracker(1_GB, "block_cache"), + metric_entity_, + 8_MB, + 4); YbHnsw yb_hnsw_; YbHnswSearchContext context_; }; @@ -144,10 +166,11 @@ TEST_F(YbHnswTest, Persistence) { TestSimple(/* load= */ true); } -std::vector YbHnswTest::PrepareRandom(size_t num_vectors, size_t num_searches) { +std::vector YbHnswTest::PrepareRandom( + bool load, size_t num_vectors, size_t num_searches) { EXPECT_LE(num_vectors, max_vectors_); InsertRandomVectors(num_vectors); - EXPECT_OK(InitYbHnsw(false)); + EXPECT_OK(InitYbHnsw(load)); std::vector query_vectors(num_searches); for (auto& vector : query_vectors) { @@ -156,16 +179,49 @@ std::vector YbHnswTest::PrepareRandom(size_t num_vectors, size_t num_sea return query_vectors; } -TEST_F(YbHnswTest, Random) { - constexpr size_t kNumVectors = 16384; +void YbHnswTest::TestRandom(bool load, size_t background_threads = 0) { + constexpr size_t kNumVectors = 65535; constexpr size_t kNumSearches = 1024; constexpr size_t kMaxResults = 20; - auto query_vectors = PrepareRandom(kNumVectors, kNumSearches); - - for (const auto& query_vector : query_vectors) { - ASSERT_NO_FATALS(VerifySearch(query_vector, kMaxResults)); + auto query_vectors = PrepareRandom(load, kNumVectors, kNumSearches); + + if (background_threads) { + ThreadHolder threads; + for (size_t i = 0; i < background_threads; ++i) { + threads.AddThread([this, &stop = threads.stop_flag(), &query_vectors] { + YbHnswSearchContext context; + while (!stop.load()) { + size_t index = RandomUniformInt(0, query_vectors.size() - 1); + ASSERT_NO_FATALS(VerifySearch(query_vectors[index], kMaxResults, &context)); + } + }); + } + threads.WaitAndStop(10s); + } else { + for (const auto& query_vector : query_vectors) { + ASSERT_NO_FATALS(VerifySearch(query_vector, kMaxResults)); + } } + + LOG(INFO) << "Hit: " << block_cache_->metrics().hit->value(); + LOG(INFO) << "Queries: " << block_cache_->metrics().query->value(); + LOG(INFO) << "Read bytes: " << block_cache_->metrics().read->value(); + LOG(INFO) << "Evicted bytes: " << block_cache_->metrics().evict->value(); + LOG(INFO) << "Added bytes: " << block_cache_->metrics().add->value(); + LOG(INFO) << "Removed bytes: " << block_cache_->metrics().remove->value(); +} + +TEST_F(YbHnswTest, Random) { + TestRandom(false); +} + +TEST_F(YbHnswTest, Cache) { + TestRandom(true); +} + +TEST_F(YbHnswTest, ConcurrentCache) { + TestRandom(true, 4); } void YbHnswTest::TestPerf() { @@ -176,7 +232,7 @@ void YbHnswTest::TestPerf() { max_vectors_ = num_vectors; - auto query_vectors = PrepareRandom(num_vectors, num_searches); + auto query_vectors = PrepareRandom(false, num_vectors, num_searches); YbHnswSearchContext context; vector_index::VectorFilter filter = AcceptAllVectors(); MonoTime start = MonoTime::Now(); diff --git a/src/yb/hnsw/hnsw.cc b/src/yb/hnsw/hnsw.cc index 744b504ef4c6..bbc571cbd724 100644 --- a/src/yb/hnsw/hnsw.cc +++ b/src/yb/hnsw/hnsw.cc @@ -21,6 +21,7 @@ #include "yb/util/cast.h" #include "yb/util/env.h" #include "yb/util/flags.h" +#include "yb/util/scope_exit.h" #include "yb/util/size_literals.h" using namespace yb::size_literals; @@ -90,15 +91,10 @@ class YbHnswBuilder { const std::string& path) : index_(index), block_cache_(block_cache), path_(path) {} - Result> Build() { + Result> Build() { header_.Init(index_); PrepareVectors(); - VLOG_WITH_FUNC(4) - << "Size: " << index_.size() << ", max level: " << header_.max_level << ", dimensions: " - << header_.dimensions << ", connectivity: " << header_.config.connectivity - << ", connectivity_base: " << header_.config.connectivity_base - << ", expansion_search: " << header_.config.expansion_search << ", layers: " - << AsString(header_.layers); + VLOG_WITH_FUNC(4) << "Size: " << index_.size() << ", header: " << header_.ToString(); auto tmp_path = path_ + ".tmp"; RETURN_NOT_OK(block_cache_.env().NewWritableFile(tmp_path, &out_)); @@ -113,10 +109,9 @@ class YbHnswBuilder { RETURN_NOT_OK(block_cache_.env().RenameFile(tmp_path, path_)); std::unique_ptr file; RETURN_NOT_OK(block_cache_.env().NewRandomAccessFile(path_, &file)); - auto file_block_cache = std::make_unique(std::move(file), &builder_); - auto result = file_block_cache.get(); - block_cache_.Register(std::move(file_block_cache)); - return std::pair(result, header_); + auto file_block_cache = std::make_unique( + block_cache_, std::move(file), &builder_); + return std::pair(std::move(file_block_cache), header_); } private: @@ -366,6 +361,11 @@ void Header::Init(const unum::usearch::index_dense_gt& i max_vectors_per_non_base_block = CalcMaxVectorsPerLayerBlock(max_block_size, config.connectivity); } +YbHnsw::YbHnsw(Metric& metric) : metric_(metric) { +} + +YbHnsw::~YbHnsw() = default; + Status YbHnsw::Import( const unum::usearch::index_dense_gt& index, const std::string& path, BlockCachePtr block_cache) { @@ -379,17 +379,18 @@ Status YbHnsw::Init(const std::string& path, BlockCachePtr block_cache) { block_cache_ = std::move(block_cache); std::unique_ptr file; RETURN_NOT_OK(block_cache_->env().NewRandomAccessFile(path, &file)); - auto file_block_cache = std::make_unique(std::move(file)); - header_ = VERIFY_RESULT(file_block_cache->Load()); - file_block_cache_ = file_block_cache.get(); - block_cache_->Register(std::move(file_block_cache)); + file_block_cache_ = std::make_unique(*block_cache_, std::move(file)); + header_ = VERIFY_RESULT(file_block_cache_->Load()); return Status::OK(); } YbHnsw::SearchResult YbHnsw::Search( const std::byte* query_vector, size_t max_results, const vector_index::VectorFilter& filter, YbHnswSearchContext& context) const { - auto [best_vector, best_dist] = SearchInNonBaseLayers(query_vector); + context.search_cache.Bind(header_, *file_block_cache_); + auto se = ScopeExit([&context] { context.search_cache.Release(); }); + auto [best_vector, best_dist] = SearchInNonBaseLayers( + query_vector, context.search_cache); SearchInBaseLayer(query_vector, best_vector, best_dist, max_results, filter, context); return MakeResult(max_results, context); } @@ -402,23 +403,23 @@ YbHnsw::SearchResult YbHnsw::MakeResult(size_t max_results, YbHnswSearchContext& SearchResult result; result.reserve(top.size()); for (auto [distance, vector] : top) { - result.emplace_back(GetVectorData(vector), distance); + result.emplace_back(context.search_cache.GetVectorData(vector), distance); } return result; } std::pair YbHnsw::SearchInNonBaseLayers( - const std::byte* query_vector) const { + const std::byte* query_vector, SearchCache& cache) const { auto best_vector = header_.entry; - auto best_dist = Distance(query_vector, best_vector); + auto best_dist = Distance(query_vector, best_vector, cache); VLOG_WITH_FUNC(4) << "best_vector: " << best_vector << ", best_dist: " << best_dist; for (auto level = header_.max_level; level > 0;) { auto updated = false; VLOG_WITH_FUNC(4) << "level: " << level << ", best_vector: " << best_vector << ", best_dist: " << best_dist; - for (auto neighbor : GetNeighborsInNonBaseLayer(level, best_vector)) { - auto neighbor_dist = Distance(query_vector, neighbor); + for (auto neighbor : cache.GetNeighborsInNonBaseLayer(level, best_vector)) { + auto neighbor_dist = Distance(query_vector, neighbor, cache); VLOG_WITH_FUNC(4) << "level: " << level << ", neighbor: " << neighbor << ", neighbor_dist: " << neighbor_dist; @@ -447,6 +448,7 @@ void YbHnsw::SearchInBaseLayer( visited.clear(); auto& next = context.next; next.clear(); + auto& cache = context.search_cache; // We will visit at least entry vector and its neighbors. // So could use the following as initial capacity for visited. @@ -456,7 +458,7 @@ void YbHnsw::SearchInBaseLayer( auto extra_top_limit = std::max( header_.config.expansion_search, max_results) - max_results; next.push({best_dist, best_vector}); - if (!filter || filter(GetVectorData(best_vector))) { + if (!filter || filter(cache.GetVectorData(best_vector))) { top.push({best_dist, best_vector}); } visited.set(best_vector); @@ -468,19 +470,19 @@ void YbHnsw::SearchInBaseLayer( break; } next.pop(); - auto neighbors = GetNeighborsInBaseLayer(vector); + auto neighbors = cache.GetNeighborsInBaseLayer(vector); visited.reserve(visited.size() + std::ranges::size(neighbors)); for (auto neighbor : neighbors) { if (visited.set(neighbor)) { continue; } - auto neighbor_dist = Distance(query_vector, neighbor); + auto neighbor_dist = Distance(query_vector, neighbor, cache); if (top.size() < top_limit || extra_top.size() < extra_top_limit || neighbor_dist < best_dist) { next.push({neighbor_dist, neighbor}); - if (!filter || filter(GetVectorData(neighbor))) { + if (!filter || filter(cache.GetVectorData(neighbor))) { if (top.size() == top_limit) { auto extra_push = top.top().first; if (neighbor_dist < extra_push) { @@ -503,11 +505,56 @@ void YbHnsw::SearchInBaseLayer( } } -boost::iterator_range> YbHnsw::GetNeighborsInBaseLayer( - size_t vector) const { +YbHnsw::DistanceType YbHnsw::Distance(const std::byte* lhs, const std::byte* rhs) const { + using unum::usearch::byte_t; + return metric_(pointer_cast(lhs), pointer_cast(rhs)); +} + +YbHnsw::DistanceType YbHnsw::Distance( + const std::byte* lhs, size_t vector, SearchCache& cache) const { + return Distance(lhs, cache.CoordinatesPtr(vector)); +} + +boost::iterator_range> YbHnsw::MakeCoordinates( + const std::byte* ptr) const { + auto start = MisalignedPtr(ptr); + return boost::make_iterator_range(start, start + header_.dimensions); +} + +boost::iterator_range> YbHnsw::Coordinates( + size_t vector, SearchCache& cache) const { + return MakeCoordinates(cache.CoordinatesPtr(vector)); +} + +const std::byte* SearchCache::Data(size_t index) { + auto& block = blocks_[index]; + if (block) { + return block; + } + auto data = CHECK_RESULT(file_block_cache_->Take(index)); + used_blocks_.push_back(index); + return block = data; +} + +void SearchCache::Bind(std::reference_wrapper header, FileBlockCache& cache) { + DCHECK(used_blocks_.empty()); + header_ = &header.get(); + file_block_cache_ = &cache; + blocks_.resize(cache.size()); +} + +void SearchCache::Release() { + for (auto block : used_blocks_) { + blocks_[block] = nullptr; + file_block_cache_->Release(block); + } + used_blocks_.clear(); +} + +boost::iterator_range> SearchCache::GetNeighborsInBaseLayer( + size_t vector) { auto vector_data = VectorHeader(vector); - auto base_ptr = file_block_cache_->Data(*YB_MISALIGNED_PTR( - vector_data, VectorData, base_layer_neighbors_block)); + auto base_ptr = Data(*YB_MISALIGNED_PTR(vector_data, VectorData, base_layer_neighbors_block)); auto begin = *YB_MISALIGNED_PTR(vector_data, VectorData, base_layer_neighbors_begin); auto end = *YB_MISALIGNED_PTR(vector_data, VectorData, base_layer_neighbors_end); return boost::make_iterator_range( @@ -515,13 +562,19 @@ boost::iterator_range> YbHnsw::GetNeighborsInBaseL MisalignedPtr(base_ptr + end * kNeighborSize)); } -boost::iterator_range> YbHnsw::GetNeighborsInNonBaseLayer( - size_t level, size_t vector) const { - auto max_vectors_per_block = header_.max_vectors_per_non_base_block; +MisalignedPtr SearchCache::VectorHeader(size_t vector) { + return MisalignedPtr(BlockPtr( + header_->vector_data_block, header_->vector_data_amount_per_block, vector, + header_->vector_data_size)); +} + +boost::iterator_range> SearchCache::GetNeighborsInNonBaseLayer( + size_t level, size_t vector) { + auto max_vectors_per_block = header_->max_vectors_per_non_base_block; auto block_index = vector / max_vectors_per_block; vector %= max_vectors_per_block; - auto& layer = header_.layers[level]; - auto base_ptr = file_block_cache_->Data(layer.block + block_index); + auto& layer = header_->layers[level]; + auto base_ptr = Data(layer.block + block_index); auto finish = Load(base_ptr + vector * kNeighborsRefSize); auto start = vector > 0 ? Load(base_ptr + (vector - 1) * kNeighborsRefSize) : 0; @@ -540,54 +593,28 @@ boost::iterator_range> YbHnsw::GetNeighborsInNonBa return result; } -const std::byte* YbHnsw::BlockPtr( - size_t block, size_t entries_per_block, size_t entry, size_t entry_size) const { +const std::byte* SearchCache::BlockPtr( + size_t block, size_t entries_per_block, size_t entry, size_t entry_size) { block += entry / entries_per_block; entry %= entries_per_block; - return file_block_cache_->Data(block) + entry * entry_size; + return Data(block) + entry * entry_size; } -Slice YbHnsw::GetVectorDataSlice(size_t vector) const { +Slice SearchCache::GetVectorDataSlice(size_t vector) { auto vector_data = VectorHeader(vector); - auto base_ptr = file_block_cache_->Data( + auto base_ptr = Data( *YB_MISALIGNED_PTR(vector_data, VectorData, aux_data_block)); auto begin = *YB_MISALIGNED_PTR(vector_data, VectorData, aux_data_begin); auto end = *YB_MISALIGNED_PTR(vector_data, VectorData, aux_data_end); return Slice(base_ptr + begin, base_ptr + end); } -vector_index::VectorId YbHnsw::GetVectorData(size_t vector) const { +vector_index::VectorId SearchCache::GetVectorData(size_t vector) { return vector_index::TryFullyDecodeVectorId(GetVectorDataSlice(vector)); } -YbHnsw::DistanceType YbHnsw::Distance(const std::byte* lhs, const std::byte* rhs) const { - using unum::usearch::byte_t; - return metric_(pointer_cast(lhs), pointer_cast(rhs)); -} - -YbHnsw::DistanceType YbHnsw::Distance(const std::byte* lhs, size_t vector) const { - return Distance(lhs, CoordinatesPtr(vector)); -} - -MisalignedPtr YbHnsw::VectorHeader(size_t vector) const { - return MisalignedPtr(BlockPtr( - header_.vector_data_block, header_.vector_data_amount_per_block, vector, - header_.vector_data_size)); -} - -const std::byte* YbHnsw::CoordinatesPtr(size_t vector) const { +const std::byte* SearchCache::CoordinatesPtr(size_t vector) { return VectorHeader(vector).raw() + offsetof(VectorData, coordinates); } -boost::iterator_range> YbHnsw::MakeCoordinates( - const std::byte* ptr) const { - auto start = MisalignedPtr(ptr); - return boost::make_iterator_range(start, start + header_.dimensions); -} - -boost::iterator_range> YbHnsw::Coordinates( - size_t vector) const { - return MakeCoordinates(CoordinatesPtr(vector)); -} - } // namespace yb::hnsw diff --git a/src/yb/hnsw/hnsw.h b/src/yb/hnsw/hnsw.h index a1e0408332e9..40f16d46b9ee 100644 --- a/src/yb/hnsw/hnsw.h +++ b/src/yb/hnsw/hnsw.h @@ -38,6 +38,34 @@ namespace yb::hnsw { struct YbHnswVectorData; +// Provides access to a raw bytes data for a single search. +// Could be reused between searches using Bind/Release method. +class SearchCache { + public: + const std::byte* Data(size_t index); + + void Bind(std::reference_wrapper header, FileBlockCache& cache); + void Release(); + + boost::iterator_range> GetNeighborsInNonBaseLayer( + size_t level, size_t vector); + MisalignedPtr VectorHeader(size_t vector); + boost::iterator_range> GetNeighborsInBaseLayer( + size_t vector); + vector_index::VectorId GetVectorData(size_t vector); + const std::byte* CoordinatesPtr(size_t vector); + + private: + Slice GetVectorDataSlice(size_t vector); + const std::byte* BlockPtr( + size_t block, size_t entries_per_block, size_t entry, size_t entry_size); + + const Header* header_ = nullptr; + FileBlockCache* file_block_cache_ = nullptr; + std::vector blocks_; + std::vector used_blocks_; +}; + struct YbHnswSearchContext { using HeapEntry = std::pair; @@ -56,6 +84,7 @@ struct YbHnswSearchContext { Top top; ExtraTop extra_top; NextQueue next; + SearchCache search_cache; }; class YbHnsw { @@ -65,7 +94,8 @@ class YbHnsw { using Metric = unum::usearch::metric_punned_t; using SearchResult = std::vector>; - explicit YbHnsw(Metric& metric) : metric_(metric) {} + explicit YbHnsw(Metric& metric); + ~YbHnsw(); // Imports specified index to YbHnsw structure, also storing this structure to disk. Status Import( @@ -86,36 +116,25 @@ class YbHnsw { } private: - std::pair SearchInNonBaseLayers(const std::byte* query_vector) const; + std::pair SearchInNonBaseLayers( + const std::byte* query_vector, SearchCache& cache) const; void SearchInBaseLayer( const std::byte* query_vector, VectorNo best_vector, DistanceType best_dist, size_t max_results, const vector_index::VectorFilter& filter, YbHnswSearchContext& context) const; SearchResult MakeResult(size_t max_results, YbHnswSearchContext& context) const; - boost::iterator_range> GetNeighborsInNonBaseLayer( - size_t level, size_t vector) const; - - boost::iterator_range> GetNeighborsInBaseLayer( - size_t vector) const; - - const std::byte* BlockPtr( - size_t block, size_t entries_per_block, size_t entry, size_t entry_size) const; - - Slice GetVectorDataSlice(size_t vector) const; - vector_index::VectorId GetVectorData(size_t vector) const; DistanceType Distance(const std::byte* lhs, const std::byte* rhs) const; - DistanceType Distance(const std::byte* lhs, size_t vector) const; - MisalignedPtr VectorHeader(size_t vector) const; - const std::byte* CoordinatesPtr(size_t vector) const; + DistanceType Distance(const std::byte* lhs, size_t vector, SearchCache& cache) const; boost::iterator_range> MakeCoordinates( const std::byte* ptr) const; - boost::iterator_range> Coordinates(size_t vector) const; + boost::iterator_range> Coordinates( + size_t vector, SearchCache& cache) const; Metric& metric_; Header header_; - std::shared_ptr block_cache_; - FileBlockCache* file_block_cache_ = nullptr; + BlockCachePtr block_cache_; + FileBlockCachePtr file_block_cache_; }; } // namespace yb::hnsw diff --git a/src/yb/hnsw/hnsw_block_cache.cc b/src/yb/hnsw/hnsw_block_cache.cc index 9f6c9ce0034d..cfa297a7ab7a 100644 --- a/src/yb/hnsw/hnsw_block_cache.cc +++ b/src/yb/hnsw/hnsw_block_cache.cc @@ -13,13 +13,42 @@ #include "yb/hnsw/hnsw_block_cache.h" +#include + #include "yb/hnsw/block_writer.h" #include "yb/util/crc.h" +#include "yb/util/metrics.h" #include "yb/util/size_literals.h" using namespace yb::size_literals; +METRIC_DEFINE_counter(table, vector_index_cache_hit, "Vector index block cache hits", + yb::MetricUnit::kCacheHits, + "Number of hits of vector index block cache"); + +METRIC_DEFINE_counter(table, vector_index_cache_query, "Vector index block cache query", + yb::MetricUnit::kCacheQueries, + "Number of queries of vector index block cache"); + +METRIC_DEFINE_counter(table, vector_index_read, "Vector index block read bytes", + yb::MetricUnit::kBytes, + "Number of bytes read by vector index block cache"); + +METRIC_DEFINE_counter(table, vector_index_cache_add, "Vector index block bytes added to cache", + yb::MetricUnit::kBytes, + "Number of bytes added to vector index block cache"); + +METRIC_DEFINE_counter(table, vector_index_cache_evict, + "Vector index block bytes evicted from cache", + yb::MetricUnit::kBytes, + "Number of bytes evicted from vector index block cache"); + +METRIC_DEFINE_counter(table, vector_index_cache_remove, + "Vector index block bytes removed from cache", + yb::MetricUnit::kBytes, + "Number of bytes removed from vector index block cache"); + namespace yb::hnsw { namespace { @@ -151,9 +180,168 @@ void Deserialize(size_t version, Out& out, Args&&... args) { } // namespace -void BlockCache::Register(FileBlockCachePtr&& file_block_cache) { - std::lock_guard guard(mutex_); - files_.push_back(std::move(file_block_cache)); +BlockCacheMetrics::BlockCacheMetrics(const MetricEntityPtr& entity) + : hit(METRIC_vector_index_cache_hit.Instantiate(entity)), + query(METRIC_vector_index_cache_query.Instantiate(entity)), + read(METRIC_vector_index_read.Instantiate(entity)), + add(METRIC_vector_index_cache_add.Instantiate(entity)), + evict(METRIC_vector_index_cache_evict.Instantiate(entity)), + remove(METRIC_vector_index_cache_remove.Instantiate(entity)) { +} + +struct CachedBlock : boost::intrusive::list_base_hook<> { + BlockCacheShard* shard; + size_t end; + size_t size; + // Guarded by BlockCacheShard mutex. + int64_t use_count; + std::atomic data{nullptr}; + std::mutex load_mutex; + DataBlock content GUARDED_BY(load_mutex); + + Result Load( + RandomAccessFile& file, MemTracker& mem_tracker, BlockCacheMetrics& metrics) { + std::lock_guard lock(load_mutex); + auto result = data.load(std::memory_order_acquire); + if (result) { + return result; + } + if (!content.empty()) { + // It could happen that block is in the process of eviction, in this case we could just + // restore data pointing to content instead of reloading it + data.store(result = content.data(), std::memory_order_release); + return result; + } + mem_tracker.Consume(size); + content.resize(size); + Slice read_result; + RETURN_NOT_OK(file.Read(end - size, size, &read_result, content.data())); + RSTATUS_DCHECK_EQ( + read_result.size(), content.size(), Corruption, + Format("Wrong number of read bytes in block $0 - $1", end - size, size)); + metrics.read->IncrementBy(read_result.size()); + data.store(result = content.data(), std::memory_order_release); + return result; + } + + void Unload(MemTracker& mem_tracker) { + std::lock_guard lock(load_mutex); + if (data.load(std::memory_order_acquire)) { + // The Load was called during eviction, no need to unload data. + return; + } + content = {}; + mem_tracker.Release(size); + } +}; + +class alignas(CACHELINE_SIZE) BlockCacheShard { + public: + void Init(size_t capacity, BlockCache& block_cache) { + capacity_ = capacity; + block_cache_ = &block_cache; + } + + Result Take(CachedBlock& block, RandomAccessFile& file) { + block_cache_->metrics().query->Increment(); + { + std::lock_guard lock(mutex_); + ++block.use_count; + // Remove block from LRU while it is used. + if (block.is_linked()) { + DCHECK_EQ(block.use_count, 1); + RemoveBlockFromLRU(block); + } + + auto data = block.data.load(std::memory_order_acquire); + if (data) { + block_cache_->metrics().hit->Increment(); + return data; + } + } + return VERIFY_RESULT( + block.Load(file, *block_cache_->mem_tracker(), block_cache_->metrics())); + } + + void Release(CachedBlock& block) { + boost::container::small_vector evicted_blocks; + { + std::lock_guard lock(mutex_); + DCHECK(!block.is_linked()); + if (--block.use_count != 0) { + return; + } + Evict(block.size, evicted_blocks); + block_cache_->metrics().add->IncrementBy(block.size); + consumption_ += block.size; + lru_.push_back(block); + } + size_t evicted_bytes = 0; + for (auto* evicted_block : evicted_blocks) { + evicted_block->Unload(*block_cache_->mem_tracker()); + evicted_bytes += evicted_block->size; + } + if (evicted_bytes) { + block_cache_->metrics().evict->IncrementBy(evicted_bytes); + } + } + + void Remove(CachedBlock& block) { + std::lock_guard lock(mutex_); + DCHECK_EQ(block.use_count, 0); + if (block.is_linked()) { + RemoveBlockFromLRU(block); + } + } + + private: + using Blocks = boost::intrusive::list; + + void RemoveBlockFromLRU(CachedBlock& block) REQUIRES(mutex_) { + block_cache_->metrics().remove->IncrementBy(block.size); + lru_.erase(lru_.iterator_to(block)); + consumption_ -= block.size; + } + + void Evict( + size_t space_required, + boost::container::small_vector_base& evicted_blocks) REQUIRES(mutex_) { + space_required = std::min(space_required, capacity_); + auto it = lru_.begin(); + while (consumption_ + space_required > capacity_ && it != lru_.end()) { + auto& block = *it; + ++it; + block.data.store(nullptr, std::memory_order_release); + lru_.erase(lru_.iterator_to(block)); + consumption_ -= block.size; + evicted_blocks.push_back(&block); + } + } + + size_t capacity_ = 0; + BlockCache* block_cache_ = nullptr; + std::mutex mutex_; + Blocks lru_ GUARDED_BY(mutex_); + size_t consumption_ GUARDED_BY(mutex_) = 0; +}; + +BlockCache::BlockCache( + Env& env, const MemTrackerPtr& mem_tracker, const MetricEntityPtr& metric_entity, + size_t capacity, size_t num_shard_bits) + : env_(env), + mem_tracker_(mem_tracker), + metrics_(std::make_unique(metric_entity)), + shards_mask_((1ULL << num_shard_bits) - 1), + shards_(std::make_unique(shards_mask_ + 1)) { + for (size_t i = 0; i <= shards_mask_; ++i) { + shards_[i].Init(capacity >> num_shard_bits, *this); + } +} + +BlockCache::~BlockCache() = default; + +BlockCacheShard& BlockCache::NextShard() { + return shards_[(next_shard_++) & shards_mask_]; } DataBlock FileBlockCacheBuilder::MakeFooter(const Header& header) const { @@ -177,28 +365,44 @@ DataBlock FileBlockCacheBuilder::MakeFooter(const Header& header) const { } FileBlockCache::FileBlockCache( - std::unique_ptr file, FileBlockCacheBuilder* builder) - : file_(std::move(file)) { + BlockCache& block_cache, std::unique_ptr file, + FileBlockCacheBuilder* builder) + : block_cache_(block_cache), file_(std::move(file)) { if (!builder) { return; } auto& blocks = builder->blocks(); - blocks_.reserve(blocks.size()); size_t total_size = 0; - for (auto& block : blocks) { - total_size += block.size(); - blocks_.emplace_back(BlockInfo { - .end = total_size, - .content = std::move(block), - }); + size_t index = 0; + AllocateBlocks(blocks.size()); + for (auto& data : blocks) { + total_size += data.size(); + auto& block = blocks_[index]; + block.shard = &block_cache_.NextShard(); + block.end = total_size; + block.size = data.size(); + block.content = std::move(data); + block_cache.mem_tracker()->Consume(block.size); + block.use_count = 1; + block.shard->Release(block); + ++index; } blocks.clear(); } -FileBlockCache::~FileBlockCache() = default; +FileBlockCache::~FileBlockCache() { + for (size_t i = 0; i != size_; ++i) { + blocks_[i].shard->Remove(blocks_[i]); + } +} + +void FileBlockCache::AllocateBlocks(size_t size) { + size_ = size; + blocks_.reset(new CachedBlock[size]); +} Result
FileBlockCache::Load() { - DCHECK(blocks_.empty()); + DCHECK_EQ(blocks_, nullptr); using FooterSizeType = uint64_t; auto file_size = VERIFY_RESULT(file_->Size()); @@ -226,20 +430,26 @@ Result
FileBlockCache::Load() { SliceReader reader(footer_data); size_t version = reader.Read(); Deserialize(version, header, reader); - blocks_.resize(reader.Left() / sizeof(uint64_t)); + AllocateBlocks(reader.Left() / sizeof(uint64_t)); size_t prev_end = 0; - for (size_t i = 0; i < blocks_.size(); i++) { + for (size_t i = 0; i < size_; i++) { + blocks_[i].shard = &block_cache_.NextShard(); blocks_[i].end = reader.Read(); - blocks_[i].content.resize(blocks_[i].end - prev_end); - Slice read_result; - RETURN_NOT_OK(file_->Read( - prev_end, blocks_[i].content.size(), &read_result, blocks_[i].content.data())); - RSTATUS_DCHECK_EQ( - read_result.size(), blocks_[i].content.size(), Corruption, - Format("Wrong number of read bytes in block $0", i)); + blocks_[i].size = blocks_[i].end - prev_end; + blocks_[i].use_count = 0; prev_end = blocks_[i].end; } return header; } +Result FileBlockCache::Take(size_t index) { + auto& block = blocks_[index]; + return block.shard->Take(block, *file_); +} + +void FileBlockCache::Release(size_t index) { + auto& block = blocks_[index]; + block.shard->Release(block); +} + } // namespace yb::hnsw diff --git a/src/yb/hnsw/hnsw_block_cache.h b/src/yb/hnsw/hnsw_block_cache.h index 4142aeb575be..c179cf9de264 100644 --- a/src/yb/hnsw/hnsw_block_cache.h +++ b/src/yb/hnsw/hnsw_block_cache.h @@ -16,6 +16,8 @@ #include "yb/hnsw/types.h" #include "yb/util/env.h" +#include "yb/util/metrics_fwd.h" +#include "yb/util/mem_tracker.h" namespace yb::hnsw { @@ -35,41 +37,73 @@ class FileBlockCacheBuilder { std::vector blocks_; }; +class BlockCacheShard; +struct CachedBlock; + +struct BlockCacheMetrics { + explicit BlockCacheMetrics(const MetricEntityPtr& entity); + + CounterPtr hit; + CounterPtr query; + CounterPtr read; + CounterPtr add; + CounterPtr evict; + CounterPtr remove; +}; + class FileBlockCache { public: - explicit FileBlockCache( - std::unique_ptr file, FileBlockCacheBuilder* builder = nullptr); + FileBlockCache( + BlockCache& block_cache, std::unique_ptr file, + FileBlockCacheBuilder* builder = nullptr); ~FileBlockCache(); Result
Load(); - const std::byte* Data(size_t index) { - return blocks_[index].content.data(); + size_t size() const { + return size_; } + Result Take(size_t index); + void Release(size_t index); + private: + void AllocateBlocks(size_t size); + + BlockCache& block_cache_; std::unique_ptr file_; - struct BlockInfo { - size_t end; - DataBlock content; - }; - std::vector blocks_; + std::unique_ptr blocks_; + size_t size_ = 0; }; class BlockCache { public: - explicit BlockCache(Env& env) : env_(env) {} + BlockCache( + Env& env, const MemTrackerPtr& mem_tracker, const MetricEntityPtr& metric_entity, + size_t capacity, size_t num_shard_bits); + ~BlockCache(); - void Register(FileBlockCachePtr&& file_block_cache); + BlockCacheShard& NextShard(); Env& env() const { return env_; } + const MemTrackerPtr& mem_tracker() const { + return mem_tracker_; + } + + BlockCacheMetrics& metrics() const { + return *metrics_; + } + private: Env& env_; - std::mutex mutex_; - std::vector files_ GUARDED_BY(mutex_); + const MemTrackerPtr mem_tracker_; + std::unique_ptr metrics_; + const size_t shards_mask_; + std::atomic next_shard_ = 0; + std::unique_ptr shards_; }; Status WriteFooter(); diff --git a/src/yb/hnsw/types.h b/src/yb/hnsw/types.h index b406835ec4c9..da88ac6171dc 100644 --- a/src/yb/hnsw/types.h +++ b/src/yb/hnsw/types.h @@ -35,6 +35,10 @@ struct Config { uint64_t connectivity_base = 0; uint64_t connectivity = 0; uint64_t expansion_search = 0; // TODO(vector_index) Don't need to store it. + + std::string ToString() const { + return YB_STRUCT_TO_STRING(connectivity_base, connectivity, expansion_search); + } }; struct LayerInfo { @@ -62,6 +66,12 @@ struct Header { std::vector layers; void Init(const unum::usearch::index_dense_gt& index); + + std::string ToString() const { + return YB_STRUCT_TO_STRING( + dimensions, vector_data_size, entry, max_level, config, max_block_size, + max_vectors_per_non_base_block, vector_data_block, vector_data_amount_per_block, layers); + } }; } // namespace yb::hnsw diff --git a/src/yb/integration-tests/cassandra_cpp_driver-test.cc b/src/yb/integration-tests/cassandra_cpp_driver-test.cc index c78204bdd04f..e7f3fa4631ff 100644 --- a/src/yb/integration-tests/cassandra_cpp_driver-test.cc +++ b/src/yb/integration-tests/cassandra_cpp_driver-test.cc @@ -448,11 +448,9 @@ class Metrics { const auto result = ts.GetMetricFromHost( host_port, &METRIC_ENTITY_server, entity_id, CHECK_NOTNULL(metric_proto), "total_count"); - if (!result.ok()) { - LOG(ERROR) << "Failed to get metric " << metric_proto->name() << " from TS" - << ts_index << ": " << host_port << " with error " << result.status(); - } - ASSERT_OK(result); + ASSERT_TRUE(result.ok()) + << "Failed to get metric " << metric_proto->name() << " from TS" + << ts_index << ": " << host_port << " with error " << result.status(); *CHECK_NOTNULL(value) = *result; } @@ -1635,7 +1633,7 @@ TEST_F_EX(CppCassandraDriverTest, TestCreateUniqueIndexIntent, CppCassandraDrive "Overwrite failed"); SleepFor(MonoDelta::FromMilliseconds(kSleepTimeMs)); } else { - LOG(ERROR) << "Deleting & Inserting failed for " << i; + LOG(WARNING) << "Deleting & Inserting failed for " << i; } } @@ -1658,7 +1656,7 @@ TEST_F_EX(CppCassandraDriverTest, TestCreateUniqueIndexIntent, CppCassandraDrive "Overwrite failed"); SleepFor(MonoDelta::FromMilliseconds(kSleepTimeMs)); } else { - LOG(ERROR) << "Deleting & Inserting failed for " << i; + LOG(WARNING) << "Deleting & Inserting failed for " << i; } } @@ -1716,7 +1714,7 @@ TEST_F_EX( "Overwrite failed"); SleepFor(MonoDelta::FromMilliseconds(kSleepTimeMs)); } else { - LOG(ERROR) << "Deleting & Inserting failed for " << i; + LOG(WARNING) << "Deleting & Inserting failed for " << i; } } @@ -1740,7 +1738,7 @@ TEST_F_EX( "Overwrite failed"); SleepFor(MonoDelta::FromMilliseconds(kSleepTimeMs)); } else { - LOG(ERROR) << "Deleting & Inserting failed for " << i; + LOG(WARNING) << "Deleting & Inserting failed for " << i; } } @@ -2369,8 +2367,8 @@ void DoTestCreateUniqueIndexWithOnlineWrites(CppCassandraDriverTestIndex* test, if (!duplicate_insert_failed) { LOG(INFO) << "Successfully inserted the duplicate value"; } else { - LOG(ERROR) << "Giving up on inserting the duplicate value after " - << kMaxRetries << " tries."; + LOG(WARNING) << "Giving up on inserting the duplicate value after " + << kMaxRetries << " tries."; } LOG(INFO) << "Waited on the Create Index to finish. Status = " diff --git a/src/yb/integration-tests/cdcsdk_gflag-test.cc b/src/yb/integration-tests/cdcsdk_gflag-test.cc index a483d2bcfbe5..2fdc852d90cf 100644 --- a/src/yb/integration-tests/cdcsdk_gflag-test.cc +++ b/src/yb/integration-tests/cdcsdk_gflag-test.cc @@ -17,7 +17,7 @@ #include "yb/util/test_macros.h" DECLARE_int32(cdc_snapshot_batch_size); -DECLARE_int32(cdc_max_stream_intent_records); +DECLARE_uint64(cdc_max_stream_intent_records); namespace yb { namespace cdc { diff --git a/src/yb/integration-tests/cdcsdk_test_base.h b/src/yb/integration-tests/cdcsdk_test_base.h index 49dec8d965fa..826a6446c16b 100644 --- a/src/yb/integration-tests/cdcsdk_test_base.h +++ b/src/yb/integration-tests/cdcsdk_test_base.h @@ -58,6 +58,7 @@ DECLARE_bool(ysql_yb_allow_replication_slot_ordering_modes); DECLARE_bool(cdc_send_null_before_image_if_not_exists); DECLARE_bool(enable_tablet_split_of_replication_slot_streamed_tables); DECLARE_bool(TEST_simulate_load_txn_for_cdc); +DECLARE_bool(TEST_dcheck_for_missing_schema_packing); namespace yb { using client::YBClient; @@ -146,6 +147,7 @@ class CDCSDKTestBase : public YBTest { ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_enable_packed_row_for_colocated_table) = true; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_dcheck_for_missing_schema_packing) = false; } void TearDown() override; diff --git a/src/yb/integration-tests/cdcsdk_ysql_test_base.cc b/src/yb/integration-tests/cdcsdk_ysql_test_base.cc index 332e97d1fe1b..8bc8a684f00b 100644 --- a/src/yb/integration-tests/cdcsdk_ysql_test_base.cc +++ b/src/yb/integration-tests/cdcsdk_ysql_test_base.cc @@ -1579,7 +1579,7 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { Status CDCSDKYsqlTest::UpdatePublicationTableList( const xrepl::StreamId& stream_id, const std::vector table_ids, - const uint64_t& session_id) { + uint64_t session_id) { UpdatePublicationTableListRequestPB req; UpdatePublicationTableListResponsePB resp; @@ -1920,9 +1920,9 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { if (get_changes_result.ok()) { change_resp = *get_changes_result; } else { - LOG(ERROR) << "Encountered error while calling GetChanges on tablet: " - << tablets[tablet_idx].tablet_id() - << ", status: " << get_changes_result.status(); + LOG(WARNING) << "Encountered error while calling GetChanges on tablet: " + << tablets[tablet_idx].tablet_id() + << ", status: " << get_changes_result.status(); break; } @@ -1994,7 +1994,7 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { if (init_virtual_wal) { Status s = InitVirtualWAL(stream_id, table_ids, session_id, std::move(slot_hash_range)); if (!s.ok()) { - LOG(ERROR) << "Error while trying to initialize virtual WAL: " << s; + LOG(WARNING) << "Error while trying to initialize virtual WAL: " << s; RETURN_NOT_OK(s); } } @@ -2013,8 +2013,8 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { if (get_changes_result.ok()) { change_resp = *get_changes_result; } else { - LOG(ERROR) << "Encountered error while calling GetConsistentChanges on stream: " - << stream_id << ", status: " << get_changes_result.status(); + LOG(WARNING) << "Encountered error while calling GetConsistentChanges on stream: " + << stream_id << ", status: " << get_changes_result.status(); RETURN_NOT_OK(get_changes_result); } @@ -2055,7 +2055,7 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { auto result = UpdateAndPersistLSN(stream_id, confirmed_flush_lsn, restart_lsn, session_id); if (!result.ok()) { - LOG(ERROR) << "UpdateRestartLSN failed: " << result; + LOG(WARNING) << "UpdateRestartLSN failed: " << result; RETURN_NOT_OK(result); } } @@ -2103,9 +2103,9 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { if (get_changes_result.ok()) { change_resp = *get_changes_result; } else { - LOG(ERROR) << "Encountered error while calling GetChanges on tablet: " - << tablets[tablet_idx].tablet_id() - << ", status: " << get_changes_result.status(); + LOG(WARNING) << "Encountered error while calling GetChanges on tablet: " + << tablets[tablet_idx].tablet_id() + << ", status: " << get_changes_result.status(); break; } @@ -2903,8 +2903,8 @@ Result CDCSDKYsqlTest::GetUniverseId(PostgresMiniCluster* cluster) { total_seen_records += change_resp.cdc_sdk_proto_records_size(); first_iter = false; } else { - LOG(ERROR) << "Encountered error while calling GetChanges on tablet: " - << tablets[0].tablet_id(); + LOG(WARNING) << "Encountered error while calling GetChanges on tablet: " + << tablets[0].tablet_id(); break; } } diff --git a/src/yb/integration-tests/cdcsdk_ysql_test_base.h b/src/yb/integration-tests/cdcsdk_ysql_test_base.h index 4a03855115df..ba7a9b8d88df 100644 --- a/src/yb/integration-tests/cdcsdk_ysql_test_base.h +++ b/src/yb/integration-tests/cdcsdk_ysql_test_base.h @@ -84,7 +84,7 @@ DECLARE_int32(rocksdb_level0_file_num_compaction_trigger); DECLARE_int32(timestamp_history_retention_interval_sec); DECLARE_bool(tablet_enable_ttl_file_filter); DECLARE_int32(timestamp_syscatalog_history_retention_interval_sec); -DECLARE_int32(cdc_max_stream_intent_records); +DECLARE_uint64(cdc_max_stream_intent_records); DECLARE_bool(enable_single_record_update); DECLARE_bool(enable_truncate_cdcsdk_table); DECLARE_bool(enable_load_balancing); @@ -573,7 +573,7 @@ class CDCSDKYsqlTest : public CDCSDKTestBase { Status UpdatePublicationTableList( const xrepl::StreamId& stream_id, const std::vector table_ids, - const uint64_t& session_id = kVWALSessionId1); + uint64_t session_id = kVWALSessionId1); void TestIntentGarbageCollectionFlag( const uint32_t num_tservers, diff --git a/src/yb/integration-tests/cluster_itest_util.cc b/src/yb/integration-tests/cluster_itest_util.cc index 82f9d9b9385e..52a1ab336387 100644 --- a/src/yb/integration-tests/cluster_itest_util.cc +++ b/src/yb/integration-tests/cluster_itest_util.cc @@ -519,13 +519,28 @@ Status WaitUntilNumberOfAliveTServersEqual(int n_tservers, n_tservers, timeout.ToMilliseconds())); } -Result CreateTabletServerMap(ExternalMiniCluster* cluster) { - auto proxy = cluster->num_masters() > 1 - ? cluster->GetLeaderMasterProxy() - : cluster->GetMasterProxy(); - return CreateTabletServerMap(proxy, &cluster->proxy_cache()); +// TODO: switch ExternalMiniCluster::GetLeaderMaster* to return error if leader is not elected to +// unify with MiniCluster. +Result GetLeaderMasterClusterProxy(MiniCluster* cluster) { + return cluster->num_masters() > 1 + ? VERIFY_RESULT(cluster->GetLeaderMasterProxy()) + : cluster->GetMasterProxy(); } +Result GetLeaderMasterClusterProxy(ExternalMiniCluster* cluster) { + return cluster->num_masters() > 1 ? cluster->GetLeaderMasterProxy() + : cluster->GetMasterProxy(); +} + +template +Result CreateTabletServerMap(MiniClusterType* cluster) { + return CreateTabletServerMap( + VERIFY_RESULT(GetLeaderMasterClusterProxy(cluster)), &cluster->proxy_cache()); +} + +template Result CreateTabletServerMap(MiniCluster* cluster); +template Result CreateTabletServerMap(ExternalMiniCluster* cluster); + Result CreateTabletServerMap( const master::MasterClusterProxy& proxy, rpc::ProxyCache* proxy_cache) { master::ListTabletServersRequestPB req; diff --git a/src/yb/integration-tests/cluster_itest_util.h b/src/yb/integration-tests/cluster_itest_util.h index 17f8de63d2a1..fe51ae526448 100644 --- a/src/yb/integration-tests/cluster_itest_util.h +++ b/src/yb/integration-tests/cluster_itest_util.h @@ -135,7 +135,9 @@ client::YBSchema SimpleIntKeyYBSchema(); // Create a populated TabletServerMap by interrogating the master. Result CreateTabletServerMap( const master::MasterClusterProxy& proxy, rpc::ProxyCache* cache); -Result CreateTabletServerMap(ExternalMiniCluster* cluster); + +template +Result CreateTabletServerMap(MiniClusterType* cluster); template auto GetForEachReplica(const std::vector& replicas, diff --git a/src/yb/integration-tests/cql-tablet-split-test.cc b/src/yb/integration-tests/cql-tablet-split-test.cc index affb25701dd6..30b2391c4137 100644 --- a/src/yb/integration-tests/cql-tablet-split-test.cc +++ b/src/yb/integration-tests/cql-tablet-split-test.cc @@ -306,20 +306,20 @@ load_generator::ReadStatus CqlSecondaryIndexReader::PerformRead( "for v: '$0', expected key: '$1', key_index: $2", expected_value, key_str, key_index); }; if (!iter.Next()) { - LOG(ERROR) << "No rows found " << values_formatter(); + LOG(WARNING) << "No rows found " << values_formatter(); return load_generator::ReadStatus::kNoRows; } auto row = iter.Row(); const auto k = row.Value(0).ToString(); if (k != key_str) { - LOG(ERROR) << "Invalid k " << values_formatter() << " got k: " << k; + LOG(WARNING) << "Invalid k " << values_formatter() << " got k: " << k; return load_generator::ReadStatus::kInvalidRead; } if (iter.Next()) { return load_generator::ReadStatus::kExtraRows; - LOG(ERROR) << "More than 1 row found " << values_formatter(); + LOG(WARNING) << "More than 1 row found " << values_formatter(); do { - LOG(ERROR) << "k: " << iter.Row().Value(0).ToString(); + LOG(WARNING) << "k: " << iter.Row().Value(0).ToString(); } while (iter.Next()); } return load_generator::ReadStatus::kOk; diff --git a/src/yb/integration-tests/external_daemon.cc b/src/yb/integration-tests/external_daemon.cc index 626952b48577..5fa19eef763f 100644 --- a/src/yb/integration-tests/external_daemon.cc +++ b/src/yb/integration-tests/external_daemon.cc @@ -268,8 +268,7 @@ Status ExternalDaemon::StartProcess(const vector& user_flags) { argv.push_back("--log_dir="); // Tell the server to dump its port information so we can pick it up. - const string info_path = GetServerInfoPath(); - argv.push_back("--server_dump_info_path=" + info_path); + argv.push_back("--server_dump_info_path=" + GetServerInfoPath()); argv.push_back("--server_dump_info_format=pb"); // We use ephemeral ports in many tests. They don't work for production, but are OK @@ -330,7 +329,7 @@ Status ExternalDaemon::StartProcess(const vector& user_flags) { auto p = std::make_unique(exe_, argv); p->PipeParentStdout(); p->PipeParentStderr(); - auto default_output_prefix = Format("[$0]", daemon_id_); + auto default_output_prefix = DefaultOutputPrefix(); LOG(INFO) << "Running " << default_output_prefix << ": " << exe_ << "\n" << JoinStrings(argv, "\n"); if (!FLAGS_external_daemon_heap_profile_prefix.empty()) { @@ -356,6 +355,12 @@ Status ExternalDaemon::StartProcess(const vector& user_flags) { stderr_tailer_thread_->SetListener(listener); } + process_.swap(p); + return Status::OK(); +} + +Status ExternalDaemon::WaitProcessReady() { + auto p = process_.get(); // The process is now starting -- wait for the bound port info to show up. Stopwatch sw; sw.start(); @@ -381,14 +386,13 @@ Status ExternalDaemon::StartProcess(const vector& user_flags) { return STATUS( TimedOut, Format( "Timed out after $0s waiting for process ($1) to write info file ($2)", - kProcessStartTimeoutSeconds, exe_, info_path)); + kProcessStartTimeoutSeconds, exe_, GetServerInfoPath())); } RETURN_NOT_OK(BuildServerStateFromInfoPath()); - LOG(INFO) << "Started " << default_output_prefix << " " << exe_ << " as pid " << p->pid(); + LOG(INFO) << "Started " << DefaultOutputPrefix() << " " << exe_ << " as pid " << p->pid(); VLOG(1) << exe_ << " instance information:\n" << status_->DebugString(); - process_.swap(p); return Status::OK(); } @@ -449,7 +453,7 @@ pid_t ExternalDaemon::pid() const { } void ExternalDaemon::Shutdown(SafeShutdown safe_shutdown, RequireExitCode0 require_exit_code_0) { - if (!process_) { + if (!process_ || !status_) { return; } @@ -543,6 +547,10 @@ std::string ExternalDaemon::ProcessNameAndPidStr() { return Format("$0 with pid $1", exe_, process_->pid()); } +std::string ExternalDaemon::DefaultOutputPrefix() { + return Format("[$0]", daemon_id_); +} + HostPort ExternalDaemon::bound_rpc_hostport() const { CHECK(status_); CHECK_GE(status_->bound_rpc_addresses_size(), 1); diff --git a/src/yb/integration-tests/external_daemon.h b/src/yb/integration-tests/external_daemon.h index 0731f939285a..534dd29b4315 100644 --- a/src/yb/integration-tests/external_daemon.h +++ b/src/yb/integration-tests/external_daemon.h @@ -259,6 +259,8 @@ class ExternalDaemon : public RefCountedThreadSafe { return std::make_unique(proxy_cache_, bound_rpc_addr()); } + Status WaitProcessReady(); + protected: friend class RefCountedThreadSafe; virtual ~ExternalDaemon(); @@ -282,6 +284,8 @@ class ExternalDaemon : public RefCountedThreadSafe { std::string ProcessNameAndPidStr(); + std::string DefaultOutputPrefix(); + const std::string daemon_id_; rpc::Messenger* messenger_; rpc::ProxyCache* proxy_cache_; diff --git a/src/yb/integration-tests/external_mini_cluster.cc b/src/yb/integration-tests/external_mini_cluster.cc index 5ece1f5e011a..6e1480fa5693 100644 --- a/src/yb/integration-tests/external_mini_cluster.cc +++ b/src/yb/integration-tests/external_mini_cluster.cc @@ -401,11 +401,13 @@ Status ExternalMiniCluster::Start(rpc::Messenger* messenger) { for (size_t i = 1; i <= opts_.num_tablet_servers; i++) { RETURN_NOT_OK_PREPEND( - AddTabletServer( - ExternalMiniClusterOptions::kDefaultStartCqlProxy, {}, -1, - /* wait_for_registration */ false), + LaunchTabletServer( + ExternalMiniClusterOptions::kDefaultStartCqlProxy, {}, -1), Format("Failed starting tablet server $0", i)); } + for (const auto& ts : tablet_servers_) { + RETURN_NOT_OK(ts->WaitProcessReady()); + } RETURN_NOT_OK(WaitForTabletServerCount( opts_.num_tablet_servers, kTabletServerRegistrationTimeout)); } else { @@ -1440,6 +1442,22 @@ string ExternalMiniCluster::GetBindIpForTabletServer(size_t index) const { Status ExternalMiniCluster::AddTabletServer( bool start_cql_proxy, const std::vector& extra_flags, int num_drives, bool wait_for_registration) { + auto idx = VERIFY_RESULT(LaunchTabletServer(start_cql_proxy, extra_flags, num_drives)); + auto ts = tablet_servers_[idx]; + RETURN_NOT_OK(ts->WaitProcessReady()); + if (!wait_for_registration) { + return Status::OK(); + } + RETURN_NOT_OK(WaitForTabletServerToRegister(ts->uuid(), kTabletServerRegistrationTimeout)); + if (opts_.enable_ysql && opts_.wait_for_tservers_to_accept_ysql_connections) { + RETURN_NOT_OK(WaitForTabletServersToAcceptYSQLConnection( + {idx}, MonoTime::Now() + kTabletServerRegistrationTimeout)); + } + return Status::OK(); +} + +Result ExternalMiniCluster::LaunchTabletServer( + bool start_cql_proxy, const std::vector& extra_flags, int num_drives) { CHECK(GetLeaderMaster() != nullptr) << "Must have started at least 1 master before adding tablet servers"; @@ -1507,13 +1525,13 @@ Status ExternalMiniCluster::AddTabletServer( num_drives = opts_.num_drives; } - scoped_refptr ts = new ExternalTabletServer( + auto ts = make_scoped_refptr( idx, messenger_, proxy_cache_.get(), exe, GetDataPath(Format("ts-$0", idx + 1)), num_drives, GetBindIpForTabletServer(idx), ts_rpc_port, ts_http_port, redis_rpc_port, redis_http_port, cql_rpc_port, cql_http_port, pgsql_rpc_port, ysql_conn_mgr_rpc_port, pgsql_http_port, master_hostports, SubstituteInFlags(flags, idx)); - RETURN_NOT_OK(ts->Start(start_cql_proxy)); + RETURN_NOT_OK(ts->Launch(start_cql_proxy)); tablet_servers_.push_back(ts); // Add yb controller for the new ts if we already have controllers for existing TSs. @@ -1522,15 +1540,7 @@ Status ExternalMiniCluster::AddTabletServer( RETURN_NOT_OK(AddYbControllerServer(ts)); } - if (wait_for_registration) { - RETURN_NOT_OK(WaitForTabletServerToRegister(ts->uuid(), kTabletServerRegistrationTimeout)); - if (opts_.enable_ysql && opts_.wait_for_tservers_to_accept_ysql_connections) { - RETURN_NOT_OK(WaitForTabletServersToAcceptYSQLConnection( - {idx}, MonoTime::Now() + kTabletServerRegistrationTimeout)); - } - } - - return Status::OK(); + return idx; } Status ExternalMiniCluster::RemoveTabletServer(const std::string& ts_uuid, MonoTime deadline) { @@ -1785,25 +1795,62 @@ Result> ExternalMiniClu return result; } -Result ExternalMiniCluster::GetTabletStatus( - const ExternalTabletServer& ts, const yb::TabletId& tablet_id) { +namespace { + +rpc::RpcController DefaultRpcController() { rpc::RpcController rpc; rpc.set_timeout(kDefaultTimeout); + return rpc; +} + +Status StatusFromError(const TabletServerErrorPB& error) { + return StatusFromPB(error.status()) + .CloneAndPrepend(Format("Code $0", TabletServerErrorPB::Code_Name(error.code()))); +} + +template +concept HasTabletServerError = requires(Response response) { + { response.error() } -> std::convertible_to; +}; + +template +Result CheckedResponse(const Response& response) { + if (response.has_error()) { + return StatusFromError(response.error()); + } + return response; +} + +} // namespace + +Result ExternalMiniCluster::GetTabletStatus( + const ExternalTabletServer& ts, const TabletId& tablet_id) { + auto rpc = DefaultRpcController(); tserver::GetTabletStatusRequestPB req; req.set_tablet_id(tablet_id); tserver::GetTabletStatusResponsePB resp; RETURN_NOT_OK(GetProxy(&ts).GetTabletStatus(req, &resp, &rpc)); - if (resp.has_error()) { - return StatusFromPB(resp.error().status()).CloneAndPrepend( - Format("Code $0", TabletServerErrorPB::Code_Name(resp.error().code()))); + return CheckedResponse(resp); } - return resp; + +Result ExternalMiniCluster::GetTabletPeerHealth( + const ExternalTabletServer& ts, const std::vector& tablet_ids) { + auto rpc = DefaultRpcController(); + + tserver::CheckTserverTabletHealthRequestPB req; + for (const auto& tablet_id : tablet_ids) { + *req.mutable_tablet_ids()->Add() = tablet_id; + } + + tserver::CheckTserverTabletHealthResponsePB resp; + RETURN_NOT_OK(GetProxy(&ts).CheckTserverTabletHealth(req, &resp, &rpc)); + return CheckedResponse(resp); } Result ExternalMiniCluster::GetSplitKey( - const yb::TabletId& tablet_id) { + const TabletId& tablet_id) { size_t attempts = 50; while (attempts > 0) { --attempts; @@ -1817,8 +1864,7 @@ Result ExternalMiniCluster::GetSplitKey( // There's a small chance that a leader is changed after GetTabletLeaderIndex() and before // GetSplitKey() is started, in this case we should re-attempt. if (response.error().code() != TabletServerErrorPB::NOT_THE_LEADER) { - return StatusFromPB(response.error().status()).CloneAndPrepend( - Format("Code $0", TabletServerErrorPB::Code_Name(response.error().code()))); + return StatusFromError(response.error()); } LOG(WARNING) << Format( @@ -1829,7 +1875,7 @@ Result ExternalMiniCluster::GetSplitKey( } Result ExternalMiniCluster::GetSplitKey( - const ExternalTabletServer& ts, const yb::TabletId& tablet_id, bool fail_on_response_error) { + const ExternalTabletServer& ts, const TabletId& tablet_id, bool fail_on_response_error) { rpc::RpcController rpc; rpc.set_timeout(kDefaultTimeout); @@ -1839,32 +1885,24 @@ Result ExternalMiniCluster::GetSplitKey( tserver::GetSplitKeyResponsePB resp; RETURN_NOT_OK(GetProxy(&ts).GetSplitKey(req, &resp, &rpc)); if (fail_on_response_error && resp.has_error()) { - return StatusFromPB(resp.error().status()).CloneAndPrepend( - Format("Code $0", TabletServerErrorPB::Code_Name(resp.error().code()))); + return StatusFromError(resp.error()); } return resp; } Status ExternalMiniCluster::FlushTabletsOnSingleTServer( - ExternalTabletServer* ts, const std::vector tablet_ids, - tserver::FlushTabletsRequestPB_Operation operation) { - tserver::FlushTabletsRequestPB req; - tserver::FlushTabletsResponsePB resp; - rpc::RpcController controller; - controller.set_timeout(10s * kTimeMultiplier); + size_t idx, const std::vector& tablet_ids) { + return tablet_servers_[idx]->FlushTablets(tablet_ids); +} - req.set_dest_uuid(ts->uuid()); - req.set_operation(operation); - for (const auto& tablet_id : tablet_ids) { - req.add_tablet_ids(tablet_id); - } - if (tablet_ids.empty()) { - req.set_all_tablets(true); - } +Status ExternalMiniCluster::CompactTabletsOnSingleTServer( + size_t idx, const std::vector& tablet_ids) { + return tablet_servers_[idx]->CompactTablets(tablet_ids); +} - auto ts_admin_service_proxy = std::make_unique( - proxy_cache_.get(), ts->bound_rpc_addr()); - return ts_admin_service_proxy->FlushTablets(req, &resp, &controller); +Status ExternalMiniCluster::LogGCOnSingleTServer( + size_t idx, const std::vector& tablet_ids, bool rollover) { + return tablet_servers_[idx]->LogGC(tablet_ids, rollover); } Result ExternalMiniCluster::ListTablets( @@ -2032,7 +2070,7 @@ ExternalMaster* ExternalMiniCluster::GetLeaderMaster() { } Result ExternalMiniCluster::GetTabletLeaderIndex( - const yb::TabletId& tablet_id, bool require_lease) { + const TabletId& tablet_id, bool require_lease) { for (size_t i = 0; i < num_tablet_servers(); ++i) { auto tserver = tablet_server(i); if (tserver->IsProcessAlive() && !tserver->IsProcessPaused()) { @@ -2225,9 +2263,7 @@ Status ExternalMiniCluster::StartElection(ExternalMaster* master) { rpc.set_timeout(opts_.timeout); RETURN_NOT_OK(master_proxy->RunLeaderElection(req, &resp, &rpc)); if (resp.has_error()) { - return StatusFromPB(resp.error().status()) - .CloneAndPrepend(Format("Code $0", - TabletServerErrorPB::Code_Name(resp.error().code()))); + return StatusFromError(resp.error()); } return Status::OK(); } @@ -2496,7 +2532,7 @@ Status ExternalMaster::Start(bool shell_mode) { flags.Add("master_addresses", master_addrs_); } RETURN_NOT_OK(StartProcess(flags.value())); - return Status::OK(); + return WaitProcessReady(); } Status ExternalMaster::Restart() { @@ -2544,7 +2580,14 @@ ExternalTabletServer::~ExternalTabletServer() { Status ExternalTabletServer::Start( bool start_cql_proxy, bool set_proxy_addrs, - std::vector> extra_flags) { + const std::vector>& extra_flags) { + RETURN_NOT_OK(Launch(start_cql_proxy, set_proxy_addrs, extra_flags)); + return WaitProcessReady(); +} + +Status ExternalTabletServer::Launch( + bool start_cql_proxy, bool set_proxy_addrs, + const std::vector>& extra_flags) { auto dirs = FsRootDirs(root_dir_, num_drives_); for (const auto& dir : dirs) { RETURN_NOT_OK(Env::Default()->CreateDirs(dir)); @@ -2573,9 +2616,7 @@ Status ExternalTabletServer::Start( flags.Add(flag_value.first, flag_value.second); } - RETURN_NOT_OK(StartProcess(flags.value())); - - return Status::OK(); + return StartProcess(flags.value()); } Status ExternalTabletServer::BuildServerStateFromInfoPath() { @@ -2679,6 +2720,44 @@ Result ExternalTabletServer::SignalPostmaster(int signal) { return kill(postmaster_pid, signal); } +Status ExternalTabletServer::FlushTablets(const std::vector& tablet_ids) { + return ExecuteFlushTablets(tablet_ids, tserver::FlushTabletsRequestPB::FLUSH, [](auto&){}); +} + +Status ExternalTabletServer::CompactTablets(const std::vector& tablet_ids) { + return ExecuteFlushTablets(tablet_ids, tserver::FlushTabletsRequestPB::COMPACT, [](auto&){}); +} + +Status ExternalTabletServer::LogGC(const std::vector& tablet_ids, bool rollover) { + return ExecuteFlushTablets( + tablet_ids, tserver::FlushTabletsRequestPB::LOG_GC, [rollover](auto& req) { + req.set_rollover(rollover); + }); +} + +template +Status ExternalTabletServer::ExecuteFlushTablets( + const std::vector& tablet_ids, tserver::FlushTabletsRequestPB::Operation operation, + const F& f) { + tserver::FlushTabletsRequestPB req; + tserver::FlushTabletsResponsePB resp; + rpc::RpcController controller; + controller.set_timeout(10s * kTimeMultiplier); + + req.set_dest_uuid(uuid()); + req.set_operation(operation); + for (const auto& tablet_id : tablet_ids) { + req.add_tablet_ids(tablet_id); + } + if (tablet_ids.empty()) { + req.set_all_tablets(true); + } + f(req); + + auto ts_admin_service_proxy = Proxy(); + return ts_admin_service_proxy->FlushTablets(req, &resp, &controller); +} + Status RestartAllMasters(ExternalMiniCluster* cluster) { for (size_t i = 0; i != cluster->num_masters(); ++i) { cluster->master(i)->Shutdown(); diff --git a/src/yb/integration-tests/external_mini_cluster.h b/src/yb/integration-tests/external_mini_cluster.h index e5747d381125..a6e395891e0f 100644 --- a/src/yb/integration-tests/external_mini_cluster.h +++ b/src/yb/integration-tests/external_mini_cluster.h @@ -429,9 +429,7 @@ class ExternalMiniCluster : public MiniClusterBase { // Return the client messenger used by the ExternalMiniCluster. rpc::Messenger* messenger(); - rpc::ProxyCache& proxy_cache() { - return *proxy_cache_; - } + rpc::ProxyCache& proxy_cache() override { return *proxy_cache_; } // Get the master leader consensus proxy. consensus::ConsensusServiceProxy GetLeaderConsensusProxy(); @@ -500,16 +498,23 @@ class ExternalMiniCluster : public MiniClusterBase { Result GetSegmentCounts(ExternalTabletServer* ts); Result GetTabletStatus( - const ExternalTabletServer& ts, const yb::TabletId& tablet_id); + const ExternalTabletServer& ts, const TabletId& tablet_id); + + Result GetTabletPeerHealth( + const ExternalTabletServer& ts, const std::vector& tablet_ids); - Result GetSplitKey(const yb::TabletId& tablet_id); - Result GetSplitKey(const ExternalTabletServer& ts, - const yb::TabletId& tablet_id, bool fail_on_response_error = true); + Result GetSplitKey(const TabletId& tablet_id); + Result GetSplitKey( + const ExternalTabletServer& ts, const TabletId& tablet_id, + bool fail_on_response_error = true); // Flushes all tablets if tablets_ids is empty. Status FlushTabletsOnSingleTServer( - ExternalTabletServer* ts, const std::vector tablet_ids, - tserver::FlushTabletsRequestPB_Operation operation); + size_t idx, const std::vector& tablet_ids); + Status CompactTabletsOnSingleTServer( + size_t idx, const std::vector& tablet_ids); + Status LogGCOnSingleTServer( + size_t idx, const std::vector& tablet_ids, bool rollover); Status WaitForTSToCrash(const ExternalTabletServer* ts, const MonoDelta& timeout = MonoDelta::FromSeconds(60)); @@ -621,6 +626,9 @@ class ExternalMiniCluster : public MiniClusterBase { friend class UpgradeTestBase; FRIEND_TEST(MasterFailoverTest, TestKillAnyMaster); + Result LaunchTabletServer( + bool start_cql_proxy, const std::vector& extra_flags, int num_drives); + void ConfigureClientBuilder(client::YBClientBuilder* builder) override; Result DoGetLeaderMasterBoundRpcAddr() override; @@ -778,7 +786,12 @@ class ExternalTabletServer : public ExternalDaemon { Status Start( bool start_cql_proxy = ExternalMiniClusterOptions::kDefaultStartCqlProxy, bool set_proxy_addrs = true, - std::vector> extra_flags = {}); + const std::vector>& extra_flags = {}); + + Status Launch( + bool start_cql_proxy = ExternalMiniClusterOptions::kDefaultStartCqlProxy, + bool set_proxy_addrs = true, + const std::vector>& extra_flags = {}); void UpdateMasterAddress(const std::vector& master_addrs); @@ -847,7 +860,16 @@ class ExternalTabletServer : public ExternalDaemon { const MetricPrototype* metric_proto, const char* value_field) const; + Status FlushTablets(const std::vector& tablet_ids); + Status CompactTablets(const std::vector& tablet_ids); + Status LogGC(const std::vector& tablet_ids, bool rollover); + protected: + template + Status ExecuteFlushTablets( + const std::vector& tablet_ids, tserver::FlushTabletsRequestPB::Operation operation, + const F& f); + Status DeleteServerInfoPaths() override; bool ServerInfoPathsExist() override; diff --git a/src/yb/integration-tests/external_mini_cluster_secure_test.cc b/src/yb/integration-tests/external_mini_cluster_secure_test.cc index 7d10419b1952..4a2719c6abb7 100644 --- a/src/yb/integration-tests/external_mini_cluster_secure_test.cc +++ b/src/yb/integration-tests/external_mini_cluster_secure_test.cc @@ -20,6 +20,7 @@ #include "yb/rpc/messenger.h" #include "yb/rpc/secure_stream.h" +#include "yb/util/backoff_waiter.h" #include "yb/util/file_util.h" #include "yb/util/env_util.h" #include "yb/util/string_util.h" @@ -294,10 +295,14 @@ class ExternalMiniClusterSecureWithInterCATest : public ExternalMiniClusterSecur "-p", cluster_->ysql_hostport(0).port(), sslparam, "-c", "select now();" ); - LOG(INFO) << "Running " << ToString(ysqlsh_command); - Subprocess proc(ysqlsh_command[0], ysqlsh_command); - proc.SetEnv("PGPASSWORD", "yugabyte"); - ASSERT_OK(proc.Run()); + ASSERT_OK(WaitFor([&ysqlsh_command] { + LOG(INFO) << "Running " << ToString(ysqlsh_command); + Subprocess proc(ysqlsh_command[0], ysqlsh_command); + proc.SetEnv("PGPASSWORD", "yugabyte"); + auto status = proc.Run(); + WARN_NOT_OK(status, "Failed executing ysqlsh"); + return status.ok(); + }, 10s * kTimeMultiplier, "Connected to ysqlsh")); } }; diff --git a/src/yb/integration-tests/metacache_refresh-itest.cc b/src/yb/integration-tests/metacache_refresh-itest.cc index 439b65bb6266..9614b41e8a9d 100644 --- a/src/yb/integration-tests/metacache_refresh-itest.cc +++ b/src/yb/integration-tests/metacache_refresh-itest.cc @@ -83,7 +83,6 @@ class MetacacheRefreshITest : public MiniClusterTestWithClientPrimaryKey()->Type(DataType::STRING)->NotNull(); schema_builder.AddColumn("value")->Type(DataType::INT64)->NotNull(); - schema_builder.SetSchemaName(kPgsqlSchemaName); EXPECT_OK(client_->CreateNamespaceIfNotExists( kPgsqlNamespaceName, YQLDatabase::YQL_DATABASE_PGSQL, "" /* creator_role_name */, kNamespaceId)); diff --git a/src/yb/integration-tests/mini_cluster.cc b/src/yb/integration-tests/mini_cluster.cc index c5a182ae29b2..565a3d452f95 100644 --- a/src/yb/integration-tests/mini_cluster.cc +++ b/src/yb/integration-tests/mini_cluster.cc @@ -110,6 +110,7 @@ DECLARE_int32(transaction_table_num_tablets); DECLARE_int64(rocksdb_compact_flush_rate_limit_bytes_per_sec); DECLARE_string(fs_data_dirs); DECLARE_string(use_private_ip); +DECLARE_bool(TEST_enable_ysql_operation_lease_expiry_check); namespace yb { @@ -217,6 +218,11 @@ Status MiniCluster::StartAsync( // We are testing public/private IPs using mini cluster. So set mode to 'cloud'. ANNOTATE_UNPROTECTED_WRITE(FLAGS_use_private_ip) = "cloud"; + // todo(zdrudi): There are currently use after free issues with how the minicluster handles the + // pg process. The background ysql lease checker can call a method on a null pointer. This is only + // an issue in the test harness so we disable the tserver's ysql op lease check for miniclusters. + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_enable_ysql_operation_lease_expiry_check) = false; + // This dictates the RF of newly created tables. SetAtomicFlag(options_.num_tablet_servers >= 3 ? 3 : 1, &FLAGS_replication_factor); FLAGS_memstore_size_mb = 16; @@ -1650,7 +1656,7 @@ void ActivateCompactionTimeLogging(MiniCluster* cluster) { void DumpDocDB(MiniCluster* cluster, ListPeersFilter filter) { auto peers = ListTabletPeers(cluster, filter); for (const auto& peer : peers) { - peer->shared_tablet()->TEST_DocDBDumpToLog(tablet::IncludeIntents::kTrue); + peer->shared_tablet()->TEST_DocDBDumpToLog(docdb::IncludeIntents::kTrue); } } diff --git a/src/yb/integration-tests/mini_cluster.h b/src/yb/integration-tests/mini_cluster.h index c4e825d8fb33..f7bcf4dd3604 100644 --- a/src/yb/integration-tests/mini_cluster.h +++ b/src/yb/integration-tests/mini_cluster.h @@ -52,6 +52,7 @@ #include "yb/master/master_client.fwd.h" #include "yb/master/master_cluster.proxy.h" #include "yb/master/master_fwd.h" +#include "yb/master/mini_master.h" #include "yb/master/ts_descriptor.h" #include "yb/tablet/tablet_fwd.h" @@ -68,10 +69,6 @@ using namespace std::literals; namespace yb { -namespace master { -class MiniMaster; -} - namespace server { class SkewedClockDeltaChanger; } @@ -277,7 +274,14 @@ class MiniCluster : public MiniClusterBase { Status WaitForLoadBalancerToStabilize(MonoDelta timeout); template - Result GetLeaderMasterProxy(); + Result GetLeaderMasterProxy() { + return T(proxy_cache_.get(), VERIFY_RESULT(DoGetLeaderMasterBoundRpcAddr())); + } + + template + T GetMasterProxy() { + return T(proxy_cache_.get(), mini_master()->bound_rpc_addr()); + } std::string GetClusterId() { return options_.cluster_id; } @@ -292,6 +296,8 @@ class MiniCluster : public MiniClusterBase { std::string GetTabletServerHTTPAddresses() const override; + rpc::ProxyCache& proxy_cache() override { return *proxy_cache_; } + private: void ConfigureClientBuilder(client::YBClientBuilder* builder) override; @@ -501,11 +507,6 @@ void DumpDocDB(MiniCluster* cluster, ListPeersFilter filter = ListPeersFilter::k std::vector DumpDocDBToStrings( MiniCluster* cluster, ListPeersFilter filter = ListPeersFilter::kLeaders); -template -Result MiniCluster::GetLeaderMasterProxy() { - return T(proxy_cache_.get(), VERIFY_RESULT(DoGetLeaderMasterBoundRpcAddr())); -} - void DisableFlushOnShutdown(MiniCluster& cluster, bool disable); } // namespace yb diff --git a/src/yb/integration-tests/mini_cluster_base.h b/src/yb/integration-tests/mini_cluster_base.h index 17706f3b2948..e47f25de843e 100644 --- a/src/yb/integration-tests/mini_cluster_base.h +++ b/src/yb/integration-tests/mini_cluster_base.h @@ -65,6 +65,8 @@ class MiniClusterBase { virtual std::string GetTabletServerHTTPAddresses() const = 0; + virtual rpc::ProxyCache& proxy_cache() = 0; + protected: virtual ~MiniClusterBase() = default; diff --git a/src/yb/integration-tests/object_lock-test.cc b/src/yb/integration-tests/object_lock-test.cc index 60c067bb9d99..d700a8d8caf2 100644 --- a/src/yb/integration-tests/object_lock-test.cc +++ b/src/yb/integration-tests/object_lock-test.cc @@ -31,6 +31,7 @@ #include "yb/master/master.h" #include "yb/master/master_cluster_client.h" #include "yb/master/master_ddl.proxy.h" +#include "yb/master/master_ddl_client.h" #include "yb/master/mini_master.h" #include "yb/master/object_lock_info_manager.h" #include "yb/master/test_async_rpc_manager.h" @@ -64,6 +65,7 @@ DECLARE_uint64(ysql_lease_refresher_interval_ms); DECLARE_double(TEST_tserver_ysql_lease_refresh_failure_prob); DECLARE_bool(enable_load_balancing); DECLARE_uint64(object_lock_cleanup_interval_ms); +DECLARE_bool(TEST_olm_skip_sending_wait_for_probes); namespace yb { @@ -104,6 +106,7 @@ class ObjectLockTest : public MiniClusterTestWithClient { kDefaultYSQLLeaseRefreshIntervalMilli; ANNOTATE_UNPROTECTED_WRITE(FLAGS_enable_load_balancing) = false; ANNOTATE_UNPROTECTED_WRITE(FLAGS_object_lock_cleanup_interval_ms) = 500; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_olm_skip_sending_wait_for_probes) = true; MiniClusterTestWithClient::SetUp(); MiniClusterOptions opts; opts.num_tablet_servers = 3; @@ -170,7 +173,7 @@ class ObjectLockTest : public MiniClusterTestWithClient { [tablet_servers]() -> Result { for (const auto& ts : tablet_servers) { auto lease_info = VERIFY_RESULT(ts->server()->GetYSQLLeaseInfo()); - if (!lease_info.is_live()) { + if (!lease_info.is_live) { return false; } } @@ -379,7 +382,8 @@ std::future AcquireLockGloballyAsync( session_host_uuid, owner, database_id, object_id, TableLockType::ACCESS_EXCLUSIVE, lease_epoch, client->Clock(), opt_deadline); auto callback = [promise](const Status& s) { promise->set_value(s); }; - client->AcquireObjectLocksGlobalAsync(req, std::move(callback), rpc_timeout); + client->AcquireObjectLocksGlobalAsync( + req, std::move(callback), ToCoarse(MonoTime::Now() + rpc_timeout)); return future; } @@ -439,7 +443,8 @@ Status ReleaseLockGlobally( auto req = ReleaseRequestFor( session_host_uuid, owner, lease_epoch, client->Clock(), apply_after); Synchronizer sync; - client->ReleaseObjectLocksGlobalAsync(req, sync.AsStdStatusCallback(), rpc_timeout); + client->ReleaseObjectLocksGlobalAsync( + req, sync.AsStdStatusCallback(), ToCoarse(MonoTime::Now() + rpc_timeout)); return sync.Wait(); } @@ -921,39 +926,39 @@ TEST_F(ObjectLockTest, TServerLeaseExpiresBeforeExclusiveLockRequest) { ASSERT_OK(cluster_->mini_tablet_server(idx_to_take_down)->Start()); } -TEST_F(ObjectLockTest, TServerLeaseExpiresAfterExclusiveLockRequest) { - auto kBlockingRequestTimeout = MonoDelta::FromSeconds(20); - ASSERT_GT(kBlockingRequestTimeout.ToMilliseconds(), FLAGS_master_ysql_operation_lease_ttl_ms); - auto idx_to_take_down = 0; - auto uuid_to_take_down = TSUuid(idx_to_take_down); - { - auto* tserver0 = cluster_->mini_tablet_server(idx_to_take_down); - auto tserver0_proxy = TServerProxyFor(tserver0); - ASSERT_OK(AcquireLockAt( - &tserver0_proxy, uuid_to_take_down, kTxn1, kDatabaseID, kObjectId)); - } - auto master_proxy = ASSERT_RESULT(MasterLeaderProxy()); - auto future = AcquireLockGloballyAsync( - &master_proxy, TSUuid(1), kTxn2, kDatabaseID, kObjectId, kLeaseEpoch, nullptr, std::nullopt, - kBlockingRequestTimeout); - - ASSERT_OK(WaitFor( - [&]() -> bool { - return cluster_->mini_tablet_server(idx_to_take_down) - ->server() - ->ts_local_lock_manager() - ->TEST_WaitingLocksSize() > 0; - }, - kBlockingRequestTimeout, - "Timed out waiting for acquire lock request to block on the master")); - LOG(INFO) << "Shutting down tablet server " << uuid_to_take_down; - ASSERT_NOTNULL(cluster_->find_tablet_server(uuid_to_take_down))->Shutdown(); - // Now wait for the lease to expire. After the lease expires the lock acquisition should succeed. - LOG(INFO) << Format("Waiting for tablet server $0 to lose its lease", uuid_to_take_down); - ASSERT_OK(WaitForTServerLeaseToExpire(uuid_to_take_down, kBlockingRequestTimeout)); - ASSERT_OK(ResolveFutureStatus(future)); - ASSERT_OK(cluster_->mini_tablet_server(idx_to_take_down)->Start()); -} +// TODO: Enable this test once https://github.com/yugabyte/yugabyte-db/issues/27192 is addressed. +// TEST_F(ObjectLockTest, TServerLeaseExpiresAfterExclusiveLockRequest) { +// auto kBlockingRequestTimeout = MonoDelta::FromSeconds(20); +// ASSERT_GT(kBlockingRequestTimeout.ToMilliseconds(), FLAGS_master_ysql_operation_lease_ttl_ms); +// auto idx_to_take_down = 0; +// auto uuid_to_take_down = TSUuid(idx_to_take_down); +// { +// auto* tserver0 = cluster_->mini_tablet_server(idx_to_take_down); +// auto tserver0_proxy = TServerProxyFor(tserver0); +// ASSERT_OK(AcquireLockAt( +// &tserver0_proxy, uuid_to_take_down, kTxn1, kDatabaseID, kObjectId)); +// } +// auto master_proxy = ASSERT_RESULT(MasterLeaderProxy()); +// auto future = AcquireLockGloballyAsync( +// &master_proxy, TSUuid(1), kTxn2, kDatabaseID, kObjectId, kLeaseEpoch, nullptr, +// std::nullopt, kBlockingRequestTimeout); +// ASSERT_OK(WaitFor( +// [&]() -> bool { +// return cluster_->mini_tablet_server(idx_to_take_down) +// ->server() +// ->ts_local_lock_manager() +// ->TEST_WaitingLocksSize() > 0; +// }, +// kBlockingRequestTimeout, +// "Timed out waiting for acquire lock request to block on the master")); +// LOG(INFO) << "Shutting down tablet server " << uuid_to_take_down; +// ASSERT_NOTNULL(cluster_->find_tablet_server(uuid_to_take_down))->Shutdown(); +// // Now wait for the lease to expire. After that, the lock acquisition should succeed. +// LOG(INFO) << Format("Waiting for tablet server $0 to lose its lease", uuid_to_take_down); +// ASSERT_OK(WaitForTServerLeaseToExpire(uuid_to_take_down, kBlockingRequestTimeout)); +// ASSERT_OK(ResolveFutureStatus(future)); +// ASSERT_OK(cluster_->mini_tablet_server(idx_to_take_down)->Start()); +// } TEST_F(ObjectLockTest, TServerHeldExclusiveLocksReleasedAfterRestart) { // Bump up the lease lifetime to verify the lease is lost when a new tserver process registers. @@ -1092,6 +1097,47 @@ TEST_F(ExternalObjectLockTest, TServerCanAcquireLocksAfterLeaseExpiry) { EXPECT_THAT(status, EqualsStatus(BuildLeaseEpochMismatchErrorStatus(kLeaseEpoch, lease_epoch))); } +TEST_F(ExternalObjectLockTest, RefreshYsqlLease) { + auto ts = tablet_server(0); + auto master_proxy = cluster_->GetLeaderMasterProxy(); + + // Acquire a lock on behalf of another ts. + ASSERT_OK(AcquireLockGlobally( + &master_proxy, tablet_server(1)->uuid(), kTxn1, kDatabaseID, kObjectId, kLeaseEpoch, nullptr, + std::nullopt, kTimeout)); + + master::MasterDDLClient ddl_client{cluster_->GetLeaderMasterProxy()}; + + auto lease_refresh_time_ms = MonoTime::Now().GetDeltaSinceMin().ToMilliseconds(); + // Request a lease refresh on behalf of ts with no lease epoch in the request. + // Master should respond with our ts' current lease epoch, the acquired lock entries, and + // new_lease. + auto info = ASSERT_RESULT(ddl_client.RefreshYsqlLease( + ts->uuid(), ts->instance_id().instance_seqno(), + lease_refresh_time_ms, {})); + ASSERT_TRUE(info.new_lease()); + ASSERT_EQ(info.lease_epoch(), kLeaseEpoch); + ASSERT_TRUE(info.has_ddl_lock_entries()); + ASSERT_GE(info.ddl_lock_entries().lock_entries_size(), 1); + + // Request a lease refresh on behalf of ts with the incorrect lease epoch in the request. + // Expect the master to respond with our ts' current lease epoch, the acquired lock entries, and + // new_lease. + info = ASSERT_RESULT(ddl_client.RefreshYsqlLease( + ts->uuid(), ts->instance_id().instance_seqno(), lease_refresh_time_ms, 0)); + ASSERT_TRUE(info.new_lease()); + ASSERT_EQ(info.lease_epoch(), kLeaseEpoch); + ASSERT_TRUE(info.has_ddl_lock_entries()); + ASSERT_GE(info.ddl_lock_entries().lock_entries_size(), 1); + + // Request a lease refresh on behalf of ts with the correct lease epoch in the request. + // Expect the master to omit most information and set new_lease to false. + info = ASSERT_RESULT(ddl_client.RefreshYsqlLease( + ts->uuid(), ts->instance_id().instance_seqno(), lease_refresh_time_ms, kLeaseEpoch)); + ASSERT_FALSE(info.new_lease()); + ASSERT_FALSE(info.has_ddl_lock_entries()); +} + class MultiMasterObjectLockTest : public ObjectLockTest { protected: int num_masters() override { @@ -1176,6 +1222,10 @@ TEST_F(ExternalObjectLockTestOneTS, TabletServerKillsSessionsWhenItAcquiresNewLe constexpr std::string_view kTableName = "test_table"; auto conn = ASSERT_RESULT(cluster_->ConnectToDB("yugabyte", kTSIdx)); ASSERT_OK(conn.Execute(Format("CREATE TABLE $0 (k INT PRIMARY KEY, v INT)", kTableName))); + // Disable the tserver's lease expiry check task so acquiring a new lease is what prompts the + // tserver to kill its pg sessions, not the tserver itself deciding its lease has expired. + ASSERT_OK(cluster_->SetFlag( + tablet_server(kTSIdx), "TEST_enable_ysql_operation_lease_expiry_check", "false")); ASSERT_OK(cluster_->SetFlag(tablet_server(kTSIdx), kTServerYsqlLeaseRefreshFlagName, "false")); auto cluster_client = master::MasterClusterClient(cluster_->GetLeaderMasterProxy()); @@ -1203,6 +1253,44 @@ TEST_F(ExternalObjectLockTestOneTS, TabletServerKillsSessionsWhenItAcquiresNewLe timeout, "Wait for tserver to accept new pg sessions")); } +TEST_F(ExternalObjectLockTestOneTS, TabletServerKillsSessionsWhenItsLeaseExpires) { + constexpr size_t kTSIdx = 0; + MonoDelta timeout = MonoDelta::FromSeconds(10); + auto ts_uuid = tablet_server(kTSIdx)->uuid(); + constexpr std::string_view kTableName = "test_table"; + auto conn = ASSERT_RESULT(cluster_->ConnectToDB("yugabyte", kTSIdx)); + ASSERT_OK(conn.Execute(Format("CREATE TABLE $0 (k INT PRIMARY KEY, v INT)", kTableName))); + ASSERT_OK(cluster_->SetFlag(tablet_server(kTSIdx), kTServerYsqlLeaseRefreshFlagName, "false")); + auto cluster_client = + master::MasterClusterClient(cluster_->GetLeaderMasterProxy()); + ASSERT_OK(WaitFor( + [&conn, kTableName]() -> Result { + auto result = conn.FetchRow(Format("SELECT count(*) from $0", kTableName)); + if (result.ok()) { + return false; + } + if (PGSessionKilledStatus(result.status())) { + return true; + } + return result.status(); + }, + timeout, "Wait for pg session to be killed")); + // At this point we shouldn't be able to start a new session. + ExternalClusterPGConnectionOptions conn_options; + conn_options.timeout_secs = 2; + auto conn_result = cluster_->ConnectToDB(std::move(conn_options)); + ASSERT_FALSE(conn_result.ok()); + ASSERT_OK( + cluster_->SetFlag(tablet_server(kTSIdx), kTServerYsqlLeaseRefreshFlagName, "true")); + // Once re-acquiring the lease, the tserver should accept new sessions again. + ASSERT_OK(WaitFor( + [this, kTSIdx]() -> Result { + auto result = cluster_->ConnectToDB("yugabyte", kTSIdx); + return result.ok(); + }, + timeout, "Wait for tserver to accept new pg sessions")); +} + class ExternalObjectLockTestOneTSWithoutLease : public ExternalObjectLockTestOneTS { public: ExternalMiniClusterOptions MakeExternalMiniClusterOptions() override; @@ -1455,8 +1543,10 @@ bool StatusContainsMessage(const Status& status, std::string_view s) { } bool PGSessionKilledStatus(const Status& status) { - constexpr std::string_view message = "server closed the connection unexpectedly"; - return status.IsNetworkError() && StatusContainsMessage(status, message); + constexpr std::string_view closed_message = "server closed the connection unexpectedly"; + constexpr std::string_view shutdown_message = "Object Lock Manager Shutdown"; + return status.IsNetworkError() && (StatusContainsMessage(status, closed_message) || + StatusContainsMessage(status, shutdown_message)); } bool SameCodeAndMessage(const Status& lhs, const Status& rhs) { @@ -1492,13 +1582,15 @@ ExternalMiniClusterOptions ExternalObjectLockTest::MakeExternalMiniClusterOption opts.replication_factor = ReplicationFactor(); opts.enable_ysql = true; opts.extra_master_flags = { - "--TEST_enable_object_locking_for_table_locks", - Format("--master_ysql_operation_lease_ttl_ms=$0", kDefaultMasterYSQLLeaseTTLMilli), - Format("--object_lock_cleanup_interval_ms=$0", kDefaultMasterObjectLockCleanupIntervalMilli), - "--enable_load_balancing=false"}; + "--TEST_enable_object_locking_for_table_locks", + Format("--master_ysql_operation_lease_ttl_ms=$0", kDefaultMasterYSQLLeaseTTLMilli), + Format("--object_lock_cleanup_interval_ms=$0", kDefaultMasterObjectLockCleanupIntervalMilli), + "--enable_load_balancing=false", + "--TEST_olm_skip_sending_wait_for_probes=false"}; opts.extra_tserver_flags = { "--TEST_enable_object_locking_for_table_locks", - Format("--ysql_lease_refresher_interval_ms=$0", kDefaultYSQLLeaseRefreshIntervalMilli)}; + Format("--ysql_lease_refresher_interval_ms=$0", kDefaultYSQLLeaseRefreshIntervalMilli), + "--TEST_olm_skip_sending_wait_for_probes=false"}; return opts; } diff --git a/src/yb/integration-tests/raft_consensus-itest.cc b/src/yb/integration-tests/raft_consensus-itest.cc index b432998ed1a4..9a58f82e3d06 100644 --- a/src/yb/integration-tests/raft_consensus-itest.cc +++ b/src/yb/integration-tests/raft_consensus-itest.cc @@ -3634,8 +3634,7 @@ TEST_F(RaftConsensusITest, CatchupAfterLeaderRestarted) { LOG(INFO)<< "Written data. Flush tablet and restart the rest of the replicas"; for (size_t ts_idx = 0; ts_idx < cluster_->num_tablet_servers(); ++ts_idx) { if (ts_idx != paused_ts_idx) { - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer( - cluster_->tablet_server(ts_idx), {tablet_id_}, FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(ts_idx, {tablet_id_})); cluster_->tablet_server(ts_idx)->Shutdown(); ASSERT_OK(cluster_->tablet_server(ts_idx)->Restart()); } diff --git a/src/yb/integration-tests/remote_bootstrap-itest.cc b/src/yb/integration-tests/remote_bootstrap-itest.cc index 26cae973ba7d..a0391dd5d9c2 100644 --- a/src/yb/integration-tests/remote_bootstrap-itest.cc +++ b/src/yb/integration-tests/remote_bootstrap-itest.cc @@ -1808,8 +1808,7 @@ TEST_F(RemoteBootstrapITest, TestRemoteBootstrapFromClosestPeer) { // Run Log GC on the leader peer and check that the follower is still able to serve as rbs source. // The follower would request to remotely anchor the log on the last received op id. auto leader_ts = cluster_->tablet_server(crash_test_leader_index_); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer( - leader_ts, {crash_test_tablet_id_}, tserver::FlushTabletsRequestPB::LOG_GC)); + ASSERT_OK(leader_ts->LogGC({crash_test_tablet_id_}, false)); ASSERT_NE(crash_test_leader_index_, 2); AddTServerInZone("z2"); @@ -2011,6 +2010,9 @@ RemoteBootstrapITest::FindTablet( } TEST_F(RemoteBootstrapITest, TestRBSWithLazySuperblockFlush) { + vector master_flags; + master_flags.push_back("--TEST_system_table_num_tablets=3"); + vector ts_flags; // Enable lazy superblock flush. ts_flags.push_back("--lazily_flush_superblock=true"); @@ -2033,7 +2035,7 @@ TEST_F(RemoteBootstrapITest, TestRBSWithLazySuperblockFlush) { ts_flags.push_back("--TEST_skip_force_superblock_flush=true"); ASSERT_NO_FATALS(StartCluster( - ts_flags, /* master_flags = */ {}, /* num_tablet_servers = */ 3, /* enable_ysql = */ true)); + ts_flags, master_flags, /* num_tablet_servers = */ 3, /* enable_ysql = */ true)); RBSWithLazySuperblockFlush(/* num_tables */ 20); } diff --git a/src/yb/integration-tests/snapshot-test.cc b/src/yb/integration-tests/snapshot-test.cc index 5d3970e16baf..1a620e253cdd 100644 --- a/src/yb/integration-tests/snapshot-test.cc +++ b/src/yb/integration-tests/snapshot-test.cc @@ -1085,8 +1085,7 @@ TEST_F_EX(SnapshotTest, CrashAfterFlushedFrontierSaved, SnapshotExternalMiniClus for (int iter = 0; iter < kNumIters; ++iter) { const auto log_prefix = Format("Iteration $0: ", iter); - ASSERT_OK( - cluster_->FlushTabletsOnSingleTServer(ts1, {}, tserver::FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(ts1->FlushTablets({})); const auto snapshot_id = ASSERT_RESULT(snapshot_util.CreateSnapshot(table)); auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap(master_proxy, &client->proxy_cache())); for (const auto& tablet_id : tablet_ids) { diff --git a/src/yb/integration-tests/tablet-split-itest-base.cc b/src/yb/integration-tests/tablet-split-itest-base.cc index 269448b122fc..af13faec48c0 100644 --- a/src/yb/integration-tests/tablet-split-itest-base.cc +++ b/src/yb/integration-tests/tablet-split-itest-base.cc @@ -830,16 +830,6 @@ Status TabletSplitExternalMiniClusterITest::SplitTablet(const std::string& table return Status::OK(); } -Status TabletSplitExternalMiniClusterITest::FlushTabletsOnSingleTServer( - size_t tserver_idx, const std::vector tablet_ids, bool is_compaction) { - auto tserver = cluster_->tablet_server(tserver_idx); - auto flush_op_type = is_compaction ? - tserver::FlushTabletsRequestPB::COMPACT : - tserver::FlushTabletsRequestPB::FLUSH; - RETURN_NOT_OK(cluster_->FlushTabletsOnSingleTServer(tserver, tablet_ids, flush_op_type)); - return Status::OK(); -} - Result> TabletSplitExternalMiniClusterITest::GetTestTableTabletIds( size_t tserver_idx) { std::set tablet_ids; @@ -1032,7 +1022,7 @@ Status TabletSplitExternalMiniClusterITest::SplitTabletCrashMaster( if (change_split_boundary) { RETURN_NOT_OK(WriteRows(kNumRows * 2, kNumRows)); for (size_t i = 0; i < cluster_->num_tablet_servers(); i++) { - RETURN_NOT_OK(FlushTabletsOnSingleTServer(i, {tablet_id}, false)); + RETURN_NOT_OK(cluster_->FlushTabletsOnSingleTServer(i, {tablet_id})); } } diff --git a/src/yb/integration-tests/tablet-split-itest-base.h b/src/yb/integration-tests/tablet-split-itest-base.h index e603b9fa9c58..648f275b45eb 100644 --- a/src/yb/integration-tests/tablet-split-itest-base.h +++ b/src/yb/integration-tests/tablet-split-itest-base.h @@ -229,9 +229,6 @@ class TabletSplitExternalMiniClusterITest : public TabletSplitITestBase tablet_ids, bool is_compaction); - Result> GetTestTableTabletIds(size_t tserver_idx); Result> GetTestTableTabletIds(); diff --git a/src/yb/integration-tests/tablet-split-itest.cc b/src/yb/integration-tests/tablet-split-itest.cc index 4e686caa317d..eb28661443b3 100644 --- a/src/yb/integration-tests/tablet-split-itest.cc +++ b/src/yb/integration-tests/tablet-split-itest.cc @@ -100,6 +100,7 @@ #include "yb/util/status_format.h" #include "yb/util/status_log.h" #include "yb/util/sync_point.h" +#include "yb/util/test_thread_holder.h" #include "yb/util/tsan_util.h" using std::string; @@ -137,6 +138,7 @@ DECLARE_int32(retryable_request_timeout_secs); DECLARE_int32(rocksdb_base_background_compactions); DECLARE_int32(rocksdb_max_background_compactions); DECLARE_int32(rocksdb_level0_file_num_compaction_trigger); +DECLARE_int32(TEST_simulate_long_remote_bootstrap_sec); DECLARE_bool(enable_automatic_tablet_splitting); DECLARE_bool(TEST_pause_rbs_before_download_wal); DECLARE_int64(tablet_split_low_phase_shard_count_per_node); @@ -154,6 +156,7 @@ DECLARE_int32(TEST_nodes_per_cloud); DECLARE_int32(replication_factor); DECLARE_int32(txn_max_apply_batch_records); DECLARE_int32(TEST_pause_and_skip_apply_intents_task_loop_ms); +DECLARE_bool(TEST_pause_rbs_before_download_wal); DECLARE_bool(TEST_pause_tserver_get_split_key); DECLARE_bool(TEST_reject_delete_not_serving_tablet_rpc); DECLARE_int32(timestamp_history_retention_interval_sec); @@ -1962,9 +1965,7 @@ TEST_F(AutomaticTabletSplitExternalMiniClusterITest, CrashedSplitIsRestarted) { std::this_thread::sleep_for(2s); // Flush to ensure SST files are generated so splitting can occur. for (size_t i = 0; i < cluster_->num_tablet_servers(); ++i) { - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(i), - {tablet_id}, - tserver::FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(i, {tablet_id})); } const auto kCrashTime = 10s; @@ -2011,9 +2012,7 @@ class AutomaticTabletSplitAddServerITest: public AutomaticTabletSplitITest { } void BuildTServerMap() { - master::MasterClusterProxy master_proxy( - proxy_cache_.get(), cluster_->mini_master()->bound_rpc_addr()); - ts_map_ = ASSERT_RESULT(itest::CreateTabletServerMap(master_proxy, proxy_cache_.get())); + ts_map_ = ASSERT_RESULT(itest::CreateTabletServerMap(cluster_.get())); } void AddTabletToNewTServer(const TabletId& tablet_id, @@ -2574,7 +2573,7 @@ Status TabletSplitSingleServerITest::TestSplitBeforeParentDeletion(bool hide_onl } const auto split_hash_code = VERIFY_RESULT(WriteRowsAndGetMiddleHashCode(kNumRows)); - const TabletId parent_id = VERIFY_RESULT(SplitTabletAndValidate(split_hash_code, kNumRows)); + const auto parent_tablet_id = VERIFY_RESULT(SplitTabletAndValidate(split_hash_code, kNumRows)); auto child_ids = ListActiveTabletIdsForTable(cluster_.get(), table_->id()); auto resp = VERIFY_RESULT(SendMasterRpcSyncSplitTablet(*child_ids.begin())); @@ -2585,7 +2584,7 @@ Status TabletSplitSingleServerITest::TestSplitBeforeParentDeletion(bool hide_onl ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_skip_deleting_split_tablets) = false; auto catalog_mgr = VERIFY_RESULT(catalog_manager()); RETURN_NOT_OK(WaitFor([&]() -> Result { - auto parent = catalog_mgr->GetTabletInfo(parent_id); + auto parent = catalog_mgr->GetTabletInfo(parent_tablet_id); if (!parent.ok()) { if (parent.status().IsNotFound()) { return true; @@ -2857,8 +2856,7 @@ TEST_P(TabletSplitExternalMiniClusterCrashITest, CrashLeaderTest) { ASSERT_OK(WaitForTabletsExcept(2, leader_idx, tablet_id)); // Wait for both child tablets have leaders elected. - auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap( - cluster_->GetLeaderMasterProxy(), &cluster_->proxy_cache())); + auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap(cluster_.get())); auto tablet_ids = CHECK_RESULT(GetTestTableTabletIds()); for (const auto& id : tablet_ids) { if (id != tablet_id) { @@ -2991,8 +2989,7 @@ TEST_F_EX( const auto kWaitForTabletsRunningTimeout = 20s * kTimeMultiplier; const auto server_to_bootstrap_idx = 0; - auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap( - cluster_->GetLeaderMasterProxy(), &cluster_->proxy_cache())); + auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap(cluster_.get())); CreateSingleTablet(); const auto source_tablet_id = CHECK_RESULT(GetOnlyTestTabletId()); @@ -3039,8 +3036,7 @@ TEST_F_EX( auto* ts = cluster_->tablet_server(i); if (i != server_to_bootstrap_idx) { ASSERT_OK(cluster_->WaitForAllIntentsApplied(ts, 15s * kTimeMultiplier)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer( - ts, {source_tablet_id}, tserver::FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(ts->FlushTablets({source_tablet_id})); // Prevent leader changes. ASSERT_OK(cluster_->SetFlag(ts, "enable_leader_failure_detection", "false")); } @@ -3170,8 +3166,7 @@ TEST_F_EX( for (size_t ts_idx = 0; ts_idx < cluster_->num_tablet_servers(); ++ts_idx) { auto* ts = cluster_->tablet_server(ts_idx); if (ts->IsProcessAlive()) { - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer( - ts, {source_tablet_id}, tserver::FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(ts->FlushTablets({source_tablet_id})); ASSERT_OK(WaitForAnySstFiles(*ts, source_tablet_id)); } } @@ -3495,8 +3490,7 @@ TEST_F_EX(TabletSplitITest, SplitOpApplyAfterLeaderChange, TabletSplitExternalMi ASSERT_OK(cluster_->SetFlagOnMasters("enable_load_balancing", "false")); - auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap( - cluster_->GetLeaderMasterProxy(), &cluster_->proxy_cache())); + auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap(cluster_.get())); CreateSingleTablet(); ASSERT_OK(WriteRowsAndFlush(kNumRows)); @@ -3791,4 +3785,178 @@ INSTANTIATE_TEST_CASE_P( "TEST_crash_before_source_tablet_mark_split_done", "TEST_crash_after_tablet_split_completed")); +namespace { + +constexpr auto kMaxAcceptableFollowerLagMs = 2000; + +Status CheckFollowerLag( + ExternalMiniCluster& cluster, const itest::TabletServerMap& ts_map, const TabletId& tablet_id, + const std::string& prefix) { + constexpr auto kRpcTimeout = 60s * kTimeMultiplier; + LOG(INFO) << prefix << " " << tablet_id << ":"; + for (auto& tserver : cluster.tserver_daemons()) { + const auto tserver_id = tserver->uuid(); + + consensus::ConsensusStatePB cstate; + auto status = itest::GetConsensusState( + ts_map.find(tserver_id)->second.get(), tablet_id, consensus::CONSENSUS_CONFIG_COMMITTED, + kRpcTimeout, &cstate); + + const auto tablet_peer_str = Format("$0 T $1 P $2", prefix, tablet_id, tserver_id); + if (status.ok()) { + LOG(INFO) << "Committed config for " << tablet_peer_str << " has " + << cstate.config().peers().size() + << " peers: " << cstate.config().ShortDebugString(); + + auto peer_health_result = cluster.GetTabletPeerHealth(*tserver, {tablet_id}); + if (peer_health_result.ok()) { + SCHECK_EQ(peer_health_result->tablet_healths_size(), 1, InternalError, ""); + const auto follower_lag_ms = peer_health_result->tablet_healths(0).follower_lag_ms(); + LOG(INFO) << "Follower lag for " << tablet_peer_str << ": " << follower_lag_ms; + SCHECK_LT(follower_lag_ms, kMaxAcceptableFollowerLagMs, InternalError, ""); + } else if (peer_health_result.status().IsIllegalState()) { + LOG(INFO) << "Not getting follower lag for " << tablet_peer_str << " due to " + << peer_health_result.status(); + } else { + LOG(INFO) << "Error getting follower lag for " << tablet_peer_str << ": " << status; + return peer_health_result.status(); + } + } else if (status.IsNotFound()) { + LOG(INFO) << "Raft consensus for " << tablet_peer_str << " is not found"; + } else if (status.IsIllegalState()) { + LOG(INFO) << "No raft consensus for " << tablet_peer_str << ": " << status; + } else { + LOG(INFO) << "Error getting Raft consensus for " << tablet_peer_str << ": " << status; + return status; + } + } + return Status::OK(); +} + +} // namespace + +TEST_F_EX(TabletSplitITest, SplitWithParentTabletMove, TabletSplitExternalMiniClusterITest) { + constexpr auto kTimeout = 15s * kTimeMultiplier; + ASSERT_OK(cluster_->SetFlagOnMasters("enable_load_balancing", "false")); + ASSERT_OK(cluster_->SetFlagOnTServers("TEST_skip_deleting_split_tablets", "true")); + + CreateSingleTablet(); + ASSERT_OK(cluster_->AddTabletServer()); + ASSERT_OK(cluster_->WaitForTabletServerCount(4, kTimeout)); + auto ts_map = ASSERT_RESULT(itest::CreateTabletServerMap(cluster_.get())); + auto* added_tserver = cluster_->tablet_server(3); + const auto added_tserver_id = added_tserver->uuid(); + + ASSERT_OK(WriteRowsAndFlush()); + const auto parent_tablet_id = ASSERT_RESULT(GetOnlyTestTabletId()); + LOG(INFO) << "Parent tablet id: " << parent_tablet_id; + + // Delay committing operations on leader. We want SPLIT_OP to be added to Raft log but not + // applied. And we can't block apply code path because it will hold ReplicaState mutex and block + // Raft functioning for parent tablet, so we won't be able to reproduce the issue because RBS + // won't be triggered. So instead we pause RaftConsensus::UpdateMajorityReplicated on parent + // tablet leader that will pause both advancing committed op id and appling operations. + const auto parent_leader_idx = CHECK_RESULT(cluster_->GetTabletLeaderIndex(parent_tablet_id)); + auto* const parent_leader_tserver = cluster_->tablet_server(parent_leader_idx); + const auto parent_leader_tserver_id = parent_leader_tserver->uuid(); + auto* const parent_leader_tserver_details = ts_map[parent_leader_tserver_id].get(); + ASSERT_OK( + cluster_->SetFlag(parent_leader_tserver, "TEST_pause_update_majority_replicated", "true")); + + OpId last_logged_op_id; + ASSERT_OK(itest::WaitForServerToBeQuiet( + kTimeout, {parent_leader_tserver_details}, parent_tablet_id, &last_logged_op_id, + itest::MustBeCommitted::kFalse)); + + ASSERT_OK(SplitTablet(parent_tablet_id)); + + // Wait for SPLIT_OP to be added to leader Raft log. + ASSERT_OK(itest::WaitForServersToAgree( + kTimeout, {parent_leader_tserver_details}, parent_tablet_id, last_logged_op_id.index + 1, + /* actual_index = */ nullptr, itest::MustBeCommitted::kFalse)); + + ASSERT_OK(cluster_->SetFlag(added_tserver, "TEST_pause_rbs_before_download_wal", "true")); + + LOG(INFO) << "Adding server " << added_tserver_id << " for parent tablet " << parent_tablet_id; + // AddServer RPC only returns when CONFIG_CHANGE_OP is majority replicaed, so we do it async to + // avoid deadlock inside test. + TestThreadHolder thread_holder; + thread_holder.AddThreadFunctor( + [&, added_tserver_details = ts_map[added_tserver_id].get()]() { + auto status = itest::AddServer( + parent_leader_tserver_details, parent_tablet_id, added_tserver_details, + consensus::PeerMemberType::PRE_VOTER, boost::none, kTimeout); + ERROR_NOT_OK(status, "AddServer error: "); + ASSERT_OK(status); + }); + + // Give some time for RBS to start and reach downloading WAL files. We can't wait for this event + // explicitly because with the fix RBS won't happen. + SleepFor(5s * kTimeMultiplier); + + // Unpause RaftConsensus::UpdateMajorityReplicated on parent tablet leader. + ASSERT_OK( + cluster_->SetFlag(parent_leader_tserver, "TEST_pause_update_majority_replicated", "false")); + + thread_holder.JoinAll(); + NO_PENDING_FATALS(); + + ASSERT_OK(WaitFor([&] -> Result { + const auto parent_leader_idx = CHECK_RESULT(cluster_->GetTabletLeaderIndex(parent_tablet_id)); + const auto parent_leader_tserver_id = cluster_->tablet_server(parent_leader_idx)->uuid(); + + consensus::ConsensusStatePB cstate; + auto status = itest::GetConsensusState( + ts_map[parent_leader_tserver_id].get(), parent_tablet_id, + consensus::CONSENSUS_CONFIG_COMMITTED, kRpcTimeout, &cstate); + + if (!status.ok()) { + return false; + } + + return cstate.config().peers_size() == 4; + }, kTimeout, "Wait for parent tablet peer to have committed Raft config with 4 peers")); + + ASSERT_OK(cluster_->SetFlag(added_tserver, "TEST_pause_rbs_before_download_wal", "false")); + ASSERT_OK(WaitUntilTabletRunning(ts_map[added_tserver_id].get(), parent_tablet_id, kTimeout)); + LOG(INFO) << "Parent tablet peer on added tserver has completed bootstrap"; + + ASSERT_OK(CheckFollowerLag(*cluster_, ts_map, parent_tablet_id, "Parent tablet")); + + const auto test_table_id = ASSERT_RESULT(GetTestTableId()); + std::vector child_tablet_ids; + ASSERT_OK(WaitFor( + [&] -> Result { + auto tablets_resp = VERIFY_RESULT(cluster_->ListTablets(added_tserver)); + for (const auto& status_and_schema : tablets_resp.status_and_schema()) { + const auto& tablet_status = status_and_schema.tablet_status(); + if (tablet_status.table_id() != test_table_id) { + continue; + } + if (tablet_status.tablet_id() == parent_tablet_id) { + continue; + } + // Child tablet + return tablet_status.state() == tablet::RaftGroupStatePB::RUNNING; + } + return true; + }, + kTimeout, + "Wait for child tablets to become either running or deleted (won't be listed by ListTablets) " + "on added tserver")); + + SleepFor((kMaxAcceptableFollowerLagMs + 100) * 1ms); + + master::GetTableLocationsResponsePB resp; + ASSERT_OK(itest::GetTableLocations( + cluster_.get(), table_->name(), kTimeout, RequireTabletsRunning::kFalse, &resp)); + + for (const auto& tablet_loc : resp.tablet_locations()) { + const auto& tablet_id = tablet_loc.tablet_id(); + ASSERT_OK(CheckFollowerLag( + *cluster_, ts_map, tablet_id, + std::string(tablet_id == parent_tablet_id ? "Parent" : "Child") + " tablet")); + } +} + } // namespace yb diff --git a/src/yb/integration-tests/upgrade-tests/ysql_major_extension_upgrade-test.cc b/src/yb/integration-tests/upgrade-tests/ysql_major_extension_upgrade-test.cc index 48cc5c871420..54b1cf7c5564 100644 --- a/src/yb/integration-tests/upgrade-tests/ysql_major_extension_upgrade-test.cc +++ b/src/yb/integration-tests/upgrade-tests/ysql_major_extension_upgrade-test.cc @@ -43,6 +43,8 @@ TEST_F(YsqlMajorExtensionUpgradeTest, Simple) { ASSERT_OK(ExecuteStatement(Format("CREATE EXTENSION pg_partman"))); ASSERT_OK(ExecuteStatement(Format("CREATE EXTENSION pg_cron"))); ASSERT_OK(ExecuteStatement(Format("CREATE EXTENSION pgaudit"))); + ASSERT_OK(ExecuteStatement(Format("CREATE EXTENSION cube"))); + ASSERT_OK(ExecuteStatement(Format("CREATE EXTENSION earthdistance"))); ASSERT_OK(UpgradeClusterToCurrentVersion()); } diff --git a/src/yb/integration-tests/upgrade-tests/ysql_major_upgrade-test.cc b/src/yb/integration-tests/upgrade-tests/ysql_major_upgrade-test.cc index e59f7d6368f4..22f98851e21e 100644 --- a/src/yb/integration-tests/upgrade-tests/ysql_major_upgrade-test.cc +++ b/src/yb/integration-tests/upgrade-tests/ysql_major_upgrade-test.cc @@ -1651,4 +1651,76 @@ TEST_F(YsqlMajorUpgradeTest, YbSuperuserRole) { ASSERT_OK(UpgradeClusterToCurrentVersion(kNoDelayBetweenNodes)); } +TEST_F(YsqlMajorUpgradeTest, Analyze) { + constexpr std::string_view kStatsUpdateError = + "YSQL DDLs, and catalog modifications are not allowed during a major YSQL upgrade"; + constexpr std::string_view kNoRandStateError = "Invalid sampling state, random state is missing"; + using ExpectedErrors = std::optional>; + auto check_analyze = [this](std::optional server, ExpectedErrors expected_errors) { + auto conn = ASSERT_RESULT(CreateConnToTs(server)); + auto status = conn.ExecuteFormat("ANALYZE $0", kSimpleTableName); + if (!expected_errors) { + ASSERT_OK(status); + } else { + ASSERT_NOK(status); + for (const auto& err : *expected_errors) { + if (status.ToString().find(err) != std::string::npos) { + return; + } + } + FAIL() << "Unexpected error " << status.ToString(); + } + }; + ASSERT_OK(CreateSimpleTable()); + check_analyze(kAnyTserver, std::nullopt); + ASSERT_OK(RestartAllMastersInCurrentVersion(kNoDelayBetweenNodes)); + ASSERT_OK(PerformYsqlMajorCatalogUpgrade()); + check_analyze(kAnyTserver, {{kStatsUpdateError}}); + LOG(INFO) << "Restarting yb-tserver " << kMixedModeTserverPg15 << " in current version"; + auto mixed_mode_pg15_tserver = cluster_->tablet_server(kMixedModeTserverPg15); + ASSERT_OK(RestartTServerInCurrentVersion( + *mixed_mode_pg15_tserver, /*wait_for_cluster_to_stabilize=*/true)); + check_analyze(kMixedModeTserverPg11, {{kNoRandStateError, kStatsUpdateError}}); + check_analyze(kMixedModeTserverPg15, {{kNoRandStateError, kStatsUpdateError}}); + ASSERT_OK(UpgradeAllTserversFromMixedMode()); + check_analyze(kAnyTserver, {{kStatsUpdateError}}); + ASSERT_OK(FinalizeUpgrade()); + check_analyze(kAnyTserver, std::nullopt); +} + +TEST_F(YsqlMajorUpgradeTest, EnumTypes) { + ASSERT_OK(ExecuteStatements({ + "CREATE TYPE color AS ENUM ('red', 'green', 'blue', 'yellow')", + "CREATE TABLE paint_log (id serial, shade color) PARTITION BY HASH (shade)", + "CREATE TABLE paint_log_p0 PARTITION OF paint_log FOR VALUES WITH (MODULUS 2, REMAINDER 0)", + "CREATE TABLE paint_log_p1 PARTITION OF paint_log FOR VALUES WITH (MODULUS 2, REMAINDER 1)", + "INSERT INTO paint_log (shade) VALUES ('red'), ('green'), ('blue'), ('yellow')" + })); + auto conn = ASSERT_RESULT(cluster_->ConnectToDB()); + auto type_oid = ASSERT_RESULT(conn.FetchRow( + "SELECT oid FROM pg_type WHERE typname = 'color'")); + + const auto fetch_partition_data = [&](const std::string& partition) { + return conn.FetchRows( + Format("SELECT id, shade::text FROM $0 ORDER BY shade", partition)); + }; + + const auto fetch_enum_data = [&]() { + return conn.FetchRows(Format( + "SELECT oid, enumsortorder, enumlabel FROM pg_enum WHERE enumtypid = $0" + " ORDER BY enumsortorder", type_oid)); + }; + + auto paint_log_p0_res = ASSERT_RESULT(fetch_partition_data("paint_log_p0")); + auto paint_log_p1_res = ASSERT_RESULT(fetch_partition_data("paint_log_p1")); + auto enum_oids = ASSERT_RESULT(fetch_enum_data()); + + ASSERT_OK(UpgradeClusterToCurrentVersion(kNoDelayBetweenNodes)); + + conn = ASSERT_RESULT(cluster_->ConnectToDB()); + ASSERT_VECTORS_EQ(ASSERT_RESULT(fetch_partition_data("paint_log_p0")), paint_log_p0_res); + ASSERT_VECTORS_EQ(ASSERT_RESULT(fetch_partition_data("paint_log_p1")), paint_log_p1_res); + ASSERT_VECTORS_EQ(ASSERT_RESULT(fetch_enum_data()), enum_oids); +} + } // namespace yb diff --git a/src/yb/integration-tests/wait_states-itest.cc b/src/yb/integration-tests/wait_states-itest.cc index 8d6b187b8a85..174bda091b94 100644 --- a/src/yb/integration-tests/wait_states-itest.cc +++ b/src/yb/integration-tests/wait_states-itest.cc @@ -1035,6 +1035,11 @@ class AshTestVerifyPgOccurrence : public AshTestVerifyPgOccurrenceBase, public ::testing::WithParamInterface { public: AshTestVerifyPgOccurrence() : AshTestVerifyPgOccurrenceBase(GetParam()) {} + + protected: + void OverrideMiniClusterOptions(MiniClusterOptions* options) override { + options->wait_for_pg = false; + } }; INSTANTIATE_TEST_SUITE_P( diff --git a/src/yb/integration-tests/xcluster/xcluster_ysql-test.cc b/src/yb/integration-tests/xcluster/xcluster_ysql-test.cc index efa64269178d..e105871eede6 100644 --- a/src/yb/integration-tests/xcluster/xcluster_ysql-test.cc +++ b/src/yb/integration-tests/xcluster/xcluster_ysql-test.cc @@ -111,6 +111,7 @@ DECLARE_uint64(snapshot_coordinator_poll_interval_ms); DECLARE_uint32(cdc_wal_retention_time_secs); DECLARE_int32(catalog_manager_bg_task_wait_ms); DECLARE_bool(TEST_enable_sync_points); +DECLARE_bool(TEST_dcheck_for_missing_schema_packing); namespace yb { @@ -2181,6 +2182,7 @@ void XClusterYsqlTest::ValidateRecordsXClusterWithCDCSDK( ANNOTATE_UNPROTECTED_WRITE(FLAGS_update_min_cdc_indices_interval_secs) = 1; } std::vector tables_vector = {kNTabletsPerTable, kNTabletsPerTable}; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_dcheck_for_missing_schema_packing) = false; ASSERT_OK(SetUpWithParams(tables_vector, tables_vector, 1)); // 2. Setup replication. diff --git a/src/yb/master/backfill_index.cc b/src/yb/master/backfill_index.cc index b6d591bf0e13..75c325345a4f 100644 --- a/src/yb/master/backfill_index.cc +++ b/src/yb/master/backfill_index.cc @@ -841,9 +841,9 @@ Status BackfillTable::UpdateRowsProcessedForIndexTable(const uint64_t number_row Status BackfillTable::UpdateSafeTime(const Status& s, HybridTime ht) { if (!s.ok()) { // Move on to ABORTED permission. - LOG_WITH_PREFIX(ERROR) + LOG_WITH_PREFIX(DFATAL) << "Failed backfill. Could not compute safe time for " - << yb::ToString(indexed_table_) << " " << s; + << AsString(indexed_table_) << " " << s; if (!timestamp_chosen_.exchange(true)) { RETURN_NOT_OK(Abort()); } @@ -958,8 +958,8 @@ Status BackfillTable::DoBackfill() { Status BackfillTable::Done(const Status& s, const std::unordered_set& failed_indexes) { if (!s.ok()) { - LOG_WITH_PREFIX(ERROR) << "failed to backfill the index: " << yb::ToString(failed_indexes) - << " due to " << s; + LOG_WITH_PREFIX(WARNING) << "failed to backfill the index: " << AsString(failed_indexes) + << " due to " << s; RETURN_NOT_OK_PREPEND( MarkIndexesAsFailed(failed_indexes, s.message().ToBuffer()), "Couldn't mark indexes as failed"); @@ -1168,8 +1168,8 @@ Status BackfillTable::AllowCompactionsToGCDeleteMarkers( DVLOG(3) << __PRETTY_FUNCTION__; auto res = master_->catalog_manager()->FindTableById(index_table_id); if (!res && res.status().IsNotFound()) { - LOG(ERROR) << "Index " << index_table_id << " was not found." - << " This is ok in case somebody issued a delete index. : " << res.ToString(); + LOG(WARNING) << "Index " << index_table_id << " was not found." + << " This is ok in case somebody issued a delete index. : " << res.ToString(); return Status::OK(); } scoped_refptr index_table_info = VERIFY_RESULT_PREPEND(std::move(res), @@ -1192,8 +1192,8 @@ Status BackfillTable::AllowCompactionsToGCDeleteMarkers( auto index_table_rlock = index_table_info->LockForRead(); auto state = index_table_rlock->pb.state(); if (!index_table_rlock->is_running() || FLAGS_TEST_simulate_cannot_enable_compactions) { - LOG(ERROR) << "Index " << index_table_id << " is in state " - << SysTablesEntryPB_State_Name(state) << " : cannot enable compactions on it"; + LOG(WARNING) << "Index " << index_table_id << " is in state " + << SysTablesEntryPB_State_Name(state) << " : cannot enable compactions on it"; // Treating it as success so that we can proceed with updating other indexes. return Status::OK(); } @@ -1421,7 +1421,9 @@ void GetSafeTimeForTablet::UnregisterAsyncTaskCallback() { } else { safe_time = HybridTime(resp_.safe_time()); if (safe_time.is_special()) { - LOG(ERROR) << "GetSafeTime for " << tablet_->ToString() << " got " << safe_time; + status = STATUS_FORMAT( + InternalError, "GetSafeTime for $0 got $1", tablet_->ToString(), safe_time); + LOG(DFATAL) << status; } else { VLOG(3) << "GetSafeTime for " << tablet_->ToString() << " got " << safe_time; } diff --git a/src/yb/master/catalog_entity_info.cc b/src/yb/master/catalog_entity_info.cc index 971abec0bc81..f64d82754b6c 100644 --- a/src/yb/master/catalog_entity_info.cc +++ b/src/yb/master/catalog_entity_info.cc @@ -511,11 +511,11 @@ Result TableInfo::GetSchema() const { } bool TableInfo::has_pgschema_name() const { - return LockForRead()->schema().has_pgschema_name(); + return LockForRead()->schema().has_depricated_pgschema_name(); } const string TableInfo::pgschema_name() const { - return LockForRead()->schema().pgschema_name(); + return LockForRead()->schema().depricated_pgschema_name(); } bool TableInfo::has_pg_type_oid() const { @@ -1211,13 +1211,13 @@ Result PersistentTableInfo::GetCurrentDdlTransactionId() const { bool PersistentTableInfo::IsXClusterDDLReplicationDDLQueueTable() const { return pb.table_type() == PGSQL_TABLE_TYPE && - schema().pgschema_name() == xcluster::kDDLQueuePgSchemaName && + schema().depricated_pgschema_name() == xcluster::kDDLQueuePgSchemaName && name() == xcluster::kDDLQueueTableName; } bool PersistentTableInfo::IsXClusterDDLReplicationReplicatedDDLsTable() const { return pb.table_type() == PGSQL_TABLE_TYPE && - schema().pgschema_name() == xcluster::kDDLQueuePgSchemaName && + schema().depricated_pgschema_name() == xcluster::kDDLQueuePgSchemaName && name() == xcluster::kDDLReplicatedTableName; } @@ -1351,8 +1351,8 @@ std::string DdlLogEntry::id() const { // ObjectLockInfo // ================================================================================================ -std::optional ObjectLockInfo::RefreshYsqlOperationLease( - const NodeInstancePB& instance) { +std::variant +ObjectLockInfo::RefreshYsqlOperationLease(const NodeInstancePB& instance) { auto l = LockForWrite(); { std::lock_guard l(mutex_); @@ -1360,7 +1360,7 @@ std::optional ObjectLockInfo::RefreshYsqlOperationLea } if (l->pb.lease_info().live_lease() && l->pb.lease_info().instance_seqno() == instance.instance_seqno()) { - return std::nullopt; + return l->pb.lease_info(); } auto& lease_info = *l.mutable_data()->pb.mutable_lease_info(); lease_info.set_live_lease(true); diff --git a/src/yb/master/catalog_entity_info.h b/src/yb/master/catalog_entity_info.h index 91f29ff9eaaf..cdaeb412599e 100644 --- a/src/yb/master/catalog_entity_info.h +++ b/src/yb/master/catalog_entity_info.h @@ -1067,8 +1067,8 @@ class ObjectLockInfo : public MetadataCowWrapper { // Return the user defined type's ID. Does not require synchronization. virtual const std::string& id() const override { return ts_uuid_; } - std::optional RefreshYsqlOperationLease(const NodeInstancePB& instance) - EXCLUDES(mutex_); + std::variant + RefreshYsqlOperationLease(const NodeInstancePB& instance) EXCLUDES(mutex_); virtual void Load(const SysObjectLockEntryPB& metadata) override; diff --git a/src/yb/master/catalog_manager.cc b/src/yb/master/catalog_manager.cc index c8dba5454585..923f7334a988 100644 --- a/src/yb/master/catalog_manager.cc +++ b/src/yb/master/catalog_manager.cc @@ -869,8 +869,25 @@ IndexStatusPB::BackfillStatus GetBackfillStatus(const IndexInfoPB& index) { } bool IsPgCronJobTable(const CreateTableRequestPB& req) { - return req.has_schema() && req.schema().has_pgschema_name() && - req.schema().pgschema_name() == "cron" && req.name() == "job"; + if (req.has_internal_table_type()) { + return req.internal_table_type() == InternalTableType::PG_CRON_JOB_TABLE; + } + + // (DEPRECATE_EOL 2.27) In upgrade mode - process request from old TS. + return req.has_schema() && + req.schema().depricated_pgschema_name() == "cron" && req.name() == "job"; +} + +bool IsXClusterDDLReplicationTable(const CreateTableRequestPB& req) { + if (req.has_internal_table_type()) { + return req.internal_table_type() == InternalTableType::XCLUSTER_DDL_REPLICATION_TABLE; + } + + // (DEPRECATE_EOL 2.27) In upgrade mode - process request from old TS. + return req.has_schema() && + req.schema().depricated_pgschema_name() == xcluster::kDDLQueuePgSchemaName && + (req.name() == xcluster::kDDLQueueTableName || + req.name() == xcluster::kDDLReplicatedTableName); } Result ToQLStmtType( @@ -1277,8 +1294,8 @@ void CatalogManager::ValidateIndexTablesPostLoad( IndexStatusPB::BackfillStatus backfill_status) { DCHECK(status.ok()); if (!status.ok()) { - LOG(ERROR) << "ValidateIndexTablesPostLoad: Failed to get backfill status for " - << "index table " << index_id << ": " << status; + LOG(WARNING) << "ValidateIndexTablesPostLoad: Failed to get backfill status for " + << "index table " << index_id << ": " << status; return; } @@ -2168,7 +2185,7 @@ void CatalogManager::CompleteShutdown() { if (async_task_pool_) { async_task_pool_->Shutdown(); } - + object_lock_info_manager_->Shutdown(); // It's OK if the visitor adds more entries even after we finish; it won't start any new tasks for // those entries. AbortAndWaitForAllTasks(); @@ -3955,7 +3972,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req, const bool colocated = (is_colocated_via_database || req.has_tablegroup_id() || is_vector_index) && // Any tables created in the xCluster DDL replication extension should not be colocated. - schema.SchemaName() != xcluster::kDDLQueuePgSchemaName; + !IsXClusterDDLReplicationTable(req); SCHECK(!colocated || req.has_table_id(), InvalidArgument, "Colocated table should specify a table ID"); @@ -5527,7 +5544,7 @@ scoped_refptr CatalogManager::CreateTableInfo(const CreateTableReques SchemaToPB(schema, metadata->mutable_schema()); if (FLAGS_TEST_create_table_with_empty_pgschema_name) { // Use empty string (default proto val) so that this passes has_pgschema_name() checks. - metadata->mutable_schema()->set_pgschema_name(""); + metadata->mutable_schema()->set_depricated_pgschema_name(""); } partition_schema.ToPB(metadata->mutable_partition_schema()); // For index table, set index details (indexed table id and whether the index is local). @@ -7815,7 +7832,8 @@ Status CatalogManager::GetTableSchemaInternal(const GetTableSchemaRequestPB* req // Due to pgschema_name being added after 2.13, older YSQL tables may not have this field. // So backfill pgschema_name for older YSQL tables. Skip for some special cases. - if (l->table_type() == TableType::PGSQL_TABLE_TYPE && resp->schema().pgschema_name().empty() && + if (l->table_type() == TableType::PGSQL_TABLE_TYPE && + resp->schema().depricated_pgschema_name().empty() && !table->is_system() && !table->IsSequencesSystemTable() && !table->IsColocationParentTable()) { TRACE("Acquired catalog manager lock for schema name lookup"); @@ -7826,7 +7844,7 @@ Status CatalogManager::GetTableSchemaInternal(const GetTableSchemaRequestPB* req "Unable to find schema name for YSQL table $0.$1 due to error: $2", table->namespace_name(), table->name(), pgschema_name.ToString()); } else { - resp->mutable_schema()->set_pgschema_name(*pgschema_name); + resp->mutable_schema()->set_depricated_pgschema_name(*pgschema_name); } } @@ -7945,7 +7963,7 @@ Status CatalogManager::GetTablegroupSchema(const GetTablegroupSchemaRequestPB* r schema_req.mutable_table()->set_table_id(table_id); Status s = GetTableSchema(&schema_req, &schema_resp); if (!s.ok() || schema_resp.has_error()) { - LOG(ERROR) << "Error while getting table schema: " << s; + LOG(WARNING) << "Error while getting table schema: " << s; return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } resp->add_get_table_schema_response_pbs()->Swap(&schema_resp); @@ -7981,7 +7999,7 @@ Status CatalogManager::GetColocatedTabletSchema(const GetColocatedTabletSchemaRe listTablesReq.set_exclude_system_tables(true); Status status = ListTables(&listTablesReq, &ListTablesResp); if (!status.ok() || ListTablesResp.has_error()) { - LOG(ERROR) << "Error while listing tables: " << status; + LOG(WARNING) << "Error while listing tables: " << status; return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, status); } @@ -7998,7 +8016,7 @@ Status CatalogManager::GetColocatedTabletSchema(const GetColocatedTabletSchemaRe schemaReq.mutable_table()->set_table_id(t.id()); status = GetTableSchema(&schemaReq, &schemaResp); if (!status.ok() || schemaResp.has_error()) { - LOG(ERROR) << "Error while getting table schema: " << status; + LOG(WARNING) << "Error while getting table schema: " << status; return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, status); } resp->add_get_table_schema_response_pbs()->Swap(&schemaResp); @@ -8120,7 +8138,7 @@ Status CatalogManager::ListTables(const ListTablesRequestPB* req, table->set_indexed_table_id(table_info->indexed_table_id()); } table->set_state(ltm->pb.state()); - table->set_pgschema_name(ltm->schema().pgschema_name()); + table->set_pgschema_name(ltm->schema().depricated_pgschema_name()); if (table_info->colocated()) { table->mutable_colocated_info()->set_colocated(true); if (!table_info->IsColocationParentTable() && ltm->pb.has_parent_table_id()) { @@ -8174,7 +8192,7 @@ scoped_refptr CatalogManager::GetTableInfoFromNamespaceNameAndTableNa auto& table_pb = l->pb; if (!l->started_deleting() && table_pb.namespace_id() == ns->id() && - boost::iequals(table_pb.schema().pgschema_name(), pg_schema_name) && + boost::iequals(table_pb.schema().depricated_pgschema_name(), pg_schema_name) && boost::iequals(table_pb.name(), table_name)) { return table; } @@ -9020,9 +9038,9 @@ Status CatalogManager::CheckIfDatabaseHasReplication(const scoped_refptrIsTableReplicated(table->id())) { - LOG(ERROR) << "Error deleting database: " << database->id() << ", table: " << table->id() - << " is under replication" - << ". Cannot delete a database that contains tables under replication."; + LOG(WARNING) << "Error deleting database: " << database->id() << ", table: " << table->id() + << " is under replication" + << ". Cannot delete a database that contains tables under replication."; return STATUS_FORMAT( InvalidCommand, Format( "Table: $0 is under replication. Cannot delete a database that " @@ -11713,9 +11731,7 @@ Result CatalogManager::CalculateNumTabletsForTableCreation( << " primary servers"; } - if (schema.SchemaName() == xcluster::kDDLQueuePgSchemaName && - (request.name() == xcluster::kDDLQueueTableName || - request.name() == xcluster::kDDLReplicatedTableName)) { + if (IsXClusterDDLReplicationTable(request)) { // xCluster DDL queue tables need to be single tablet tables - This ensures that we have a // singular stream of DDLs which simplifies ordering guarantees. num_tablets = 1; @@ -12626,10 +12642,10 @@ void CatalogManager::RebuildYQLSystemPartitions() { if (system_partitions_tablet_ != nullptr) { Status s = ResultToStatus(GetYqlPartitionsVtable().GenerateAndCacheData()); if (!s.ok()) { - LOG(ERROR) << "Error rebuilding system.partitions: " << s.ToString(); + LOG(WARNING) << "Error rebuilding system.partitions: " << s.ToString(); } } else { - LOG(ERROR) << "Error finding system.partitions vtable."; + LOG(WARNING) << "Error finding system.partitions vtable."; } } } diff --git a/src/yb/master/catalog_manager_ext.cc b/src/yb/master/catalog_manager_ext.cc index 7cd91ae945e2..c6c393748801 100644 --- a/src/yb/master/catalog_manager_ext.cc +++ b/src/yb/master/catalog_manager_ext.cc @@ -194,8 +194,8 @@ struct TableWithTabletsEntries { table_entry.AppendToString(&output); *table_backup_entry->mutable_entry() = ToSysRowEntry(table_id, SysRowEntryType::TABLE, std::move(output)); - if (table_entry.schema().has_pgschema_name() && table_entry.schema().pgschema_name() != "") { - table_backup_entry->set_pg_schema_name(table_entry.schema().pgschema_name()); + if (!table_entry.schema().depricated_pgschema_name().empty()) { + table_backup_entry->set_pg_schema_name(table_entry.schema().depricated_pgschema_name()); } for (const auto& tablet_entry : tablets_entries) { std::string output; diff --git a/src/yb/master/cluster_balance.cc b/src/yb/master/cluster_balance.cc index 2e8fe934ec8f..97c10d49cccd 100644 --- a/src/yb/master/cluster_balance.cc +++ b/src/yb/master/cluster_balance.cc @@ -1853,8 +1853,8 @@ const PlacementInfoPB& ClusterLoadBalancer::GetReadOnlyPlacementFromUuid( } } // Should never get here. - LOG(ERROR) << "Could not find read only cluster with placement uuid: " - << state_->options_->placement_uuid; + LOG(DFATAL) << "Could not find read only cluster with placement uuid: " + << state_->options_->placement_uuid; return replication_info.read_replicas(0); } diff --git a/src/yb/master/master-path-handlers.cc b/src/yb/master/master-path-handlers.cc index f06552e03d6f..3406006d29d5 100644 --- a/src/yb/master/master-path-handlers.cc +++ b/src/yb/master/master-path-handlers.cc @@ -1208,7 +1208,7 @@ void MasterPathHandlers::HandleAllTables( if (result.ok()) { table_row[kYsqlOid] = std::to_string(*result); } else { - LOG(ERROR) << "Failed to get OID of '" << table_uuid << "' ysql table"; + LOG(WARNING) << "Failed to get OID of '" << table_uuid << "' ysql table"; } const auto& schema = table_locked->schema(); @@ -1400,7 +1400,7 @@ void MasterPathHandlers::HandleAllTablesJSON( if (result.ok()) { table_row.ysql_oid = std::to_string(*result); } else { - LOG(ERROR) << "Failed to get OID of '" << table_uuid << "' ysql table"; + LOG(WARNING) << "Failed to get OID of '" << table_uuid << "' ysql table"; } const auto& schema = table_locked->schema(); diff --git a/src/yb/master/master-test.cc b/src/yb/master/master-test.cc index 79b5ab2e8e91..85f38daa502c 100644 --- a/src/yb/master/master-test.cc +++ b/src/yb/master/master-test.cc @@ -120,31 +120,13 @@ class MasterTest : public MasterTestBase { Result> FindNamespaceByName( YQLDatabase db_type, const std::string& name); -}; -Result MasterTest::SendHeartbeat( - TSToMasterCommonPB common, std::optional registration, - std::optional report) { - SysClusterConfigEntryPB config = - VERIFY_RESULT(mini_master_->catalog_manager().GetClusterConfig()); - auto universe_uuid = config.universe_uuid(); + Result SendNewTSRegistrationHeartbeat(const std::string& uuid); - TSHeartbeatRequestPB req; - TSHeartbeatResponsePB resp; - req.mutable_common()->Swap(&common); - if (registration) { - req.mutable_registration()->Swap(®istration.value()); - } - if (report) { - req.mutable_tablet_report()->Swap(&report.value()); - } - req.set_universe_uuid(universe_uuid); - RETURN_NOT_OK(proxy_heartbeat_->TSHeartbeat(req, &resp, ResetAndGetController())); - if (resp.has_error()) { - return StatusFromPB(resp.error().status()); - } - return resp; -} + private: + // Used by SendNewTSRegistrationHeartbeat to avoid host port collisions. + uint32_t registered_ts_count_ = 0; +}; TEST_F(MasterTest, TestPingServer) { // Ping the server. @@ -2859,58 +2841,98 @@ TEST_F(MasterTest, TestGetClosestLiveTserver) { } TEST_F(MasterTest, RefreshYsqlLeaseWithoutRegistration) { - ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_enable_object_locking_for_table_locks) = true; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_enable_ysql_operation_lease) = true; const char* kTsUUID = "my-ts-uuid"; auto ddl_client = MasterDDLClient{std::move(*proxy_ddl_)}; - auto result = ddl_client.RefreshYsqlLease(kTsUUID, 1); + auto result = ddl_client.RefreshYsqlLease( + kTsUUID, 1, MonoTime::Now().GetDeltaSinceMin().ToMilliseconds(), {}); ASSERT_NOK(result); ASSERT_TRUE(result.status().IsNotFound()); } TEST_F(MasterTest, RefreshYsqlLease) { - ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_enable_object_locking_for_table_locks) = true; - const char *kTsUUID = "my-ts-uuid"; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_enable_ysql_operation_lease) = true; + const std::string kTsUUID1 = "ts-uuid1"; + const std::string kTsUUID2 = "ts-uuid2"; + + auto reg_resp1 = ASSERT_RESULT(SendNewTSRegistrationHeartbeat(kTsUUID1)); + ASSERT_FALSE(reg_resp1.needs_reregister()); + + auto ddl_client = MasterDDLClient{std::move(*proxy_ddl_)}; + auto lease_refresh_send_time_ms = MonoTime::Now().GetDeltaSinceMin().ToMilliseconds(); + auto info = ASSERT_RESULT(ddl_client.RefreshYsqlLease( + kTsUUID1, /* instance_seqno */ 1, lease_refresh_send_time_ms, {})); + ASSERT_TRUE(info.new_lease()); + ASSERT_EQ(info.lease_epoch(), 1); + ASSERT_GT( + info.lease_expiry_time_ms(), + lease_refresh_send_time_ms); + + // todo(zdrudi): but we need to do this and check the bootstrap entries... + // Refresh lease again. Since we omitted current lease epoch, master leader should still say this + // is a new lease. + info = ASSERT_RESULT(ddl_client.RefreshYsqlLease( + kTsUUID1, /* instance_seqno */ 1, lease_refresh_send_time_ms, {})); + ASSERT_TRUE(info.new_lease()); + ASSERT_EQ(info.lease_epoch(), 1); + ASSERT_GT(info.lease_expiry_time_ms(), lease_refresh_send_time_ms); + + // Refresh lease again. We included current lease epoch but it's incorrect. + info = ASSERT_RESULT( + ddl_client.RefreshYsqlLease(kTsUUID1, /* instance_seqno */ 1, lease_refresh_send_time_ms, 0)); + ASSERT_TRUE(info.new_lease()); + ASSERT_EQ(info.lease_epoch(), 1); + ASSERT_GT(info.lease_expiry_time_ms(), lease_refresh_send_time_ms); + + // Refresh lease again. Current lease epoch is correct so master leader should not set new lease + // bit. + info = ASSERT_RESULT( + ddl_client.RefreshYsqlLease(kTsUUID1, /* instance_seqno */ 1, lease_refresh_send_time_ms, 1)); + ASSERT_FALSE(info.new_lease()); + ASSERT_GT(info.lease_expiry_time_ms(), lease_refresh_send_time_ms); +} +Result MasterTest::SendHeartbeat( + TSToMasterCommonPB common, std::optional registration, + std::optional report) { SysClusterConfigEntryPB config = - ASSERT_RESULT(mini_master_->catalog_manager().GetClusterConfig()); + VERIFY_RESULT(mini_master_->catalog_manager().GetClusterConfig()); auto universe_uuid = config.universe_uuid(); - // Register the fake TS, without sending any tablet report. - TSRegistrationPB fake_reg; - *fake_reg.mutable_common()->add_private_rpc_addresses() = MakeHostPortPB("localhost", 1000); - *fake_reg.mutable_common()->add_http_addresses() = MakeHostPortPB("localhost", 2000); - *fake_reg.mutable_resources() = master::ResourcesPB(); + TSHeartbeatRequestPB req; + TSHeartbeatResponsePB resp; + req.mutable_common()->Swap(&common); + if (registration) { + req.mutable_registration()->Swap(®istration.value()); + } + if (report) { + req.mutable_tablet_report()->Swap(&report.value()); + } + req.set_universe_uuid(universe_uuid); + RETURN_NOT_OK(proxy_heartbeat_->TSHeartbeat(req, &resp, ResetAndGetController())); + if (resp.has_error()) { + return StatusFromPB(resp.error().status()); + } + return resp; +} + +Result MasterTest::SendNewTSRegistrationHeartbeat(const std::string& uuid) { + TSRegistrationPB reg; + *reg.mutable_common()->add_private_rpc_addresses() = + MakeHostPortPB("localhost", 1000 + registered_ts_count_); + *reg.mutable_common()->add_http_addresses() = + MakeHostPortPB("localhost", 2000 + registered_ts_count_); + *reg.mutable_resources() = master::ResourcesPB(); TSToMasterCommonPB common; - common.mutable_ts_instance()->set_permanent_uuid(kTsUUID); + common.mutable_ts_instance()->set_permanent_uuid(uuid); common.mutable_ts_instance()->set_instance_seqno(1); - { - TSHeartbeatRequestPB req; - TSHeartbeatResponsePB resp; - req.mutable_common()->CopyFrom(common); - req.mutable_registration()->CopyFrom(fake_reg); - req.set_universe_uuid(universe_uuid); - ASSERT_OK(proxy_heartbeat_->TSHeartbeat(req, &resp, ResetAndGetController())); - - ASSERT_FALSE(resp.needs_reregister()); - ASSERT_TRUE(resp.needs_full_tablet_report()); - ASSERT_TRUE(resp.has_tablet_report_limit()); + auto result = SendHeartbeat(common, reg); + if (result.ok()) { + registered_ts_count_++; } - - auto descs = mini_master_->master()->ts_manager()->GetAllDescriptors(); - ASSERT_EQ(1, descs.size()) << "Should have registered the TS"; - auto reg = descs[0]->GetTSRegistrationPB(); - ASSERT_EQ(fake_reg.DebugString(), reg.DebugString()) - << "Master got different registration"; - - auto ts_desc = ASSERT_RESULT(mini_master_->master()->ts_manager()->LookupTSByUUID(kTsUUID)); - ASSERT_EQ(ts_desc, descs[0]); - - auto ddl_client = MasterDDLClient{std::move(*proxy_ddl_)}; - auto info = ASSERT_RESULT(ddl_client.RefreshYsqlLease(kTsUUID, /* instance_seqno */1)); - ASSERT_TRUE(info.new_lease()); - ASSERT_EQ(info.lease_epoch(), 1); + return result; } } // namespace master diff --git a/src/yb/master/master-test_base.cc b/src/yb/master/master-test_base.cc index 3e2aa2c70115..ed2405de78f8 100644 --- a/src/yb/master/master-test_base.cc +++ b/src/yb/master/master-test_base.cc @@ -164,7 +164,10 @@ Status MasterTestBase::CreatePgsqlTable( } request->mutable_partition_schema()->set_hash_schema(PartitionSchemaPB::PGSQL_HASH_SCHEMA); request->mutable_schema()->mutable_table_properties()->set_num_tablets(8); - request->mutable_schema()->set_pgschema_name("public"); + + // SchemaPB::pgschema_name is depricated. See GHI: #12770. + // YB_TODO(#12770): THE LINE MUST BE DELETED AFTER REWORKING PG-SCHEMA-NAME USAGE IN CDC + request->mutable_schema()->set_depricated_pgschema_name("public"); // Dereferencing as the RPCs require const ref for request. Keeping request param as pointer // though, as that helps with readability and standardization. diff --git a/src/yb/master/master.cc b/src/yb/master/master.cc index 332e6769914e..8d4a3bc4dbf9 100644 --- a/src/yb/master/master.cc +++ b/src/yb/master/master.cc @@ -158,6 +158,7 @@ Master::Master(const MasterOptions& opts) state_(kStopped), metric_entity_cluster_( METRIC_ENTITY_cluster.Instantiate(metric_registry_.get(), "yb.cluster")), + master_tablet_server_(new MasterTabletServer(this, metric_entity())), sys_catalog_(new SysCatalogTable(this, metric_registry_.get())), ts_manager_(new TSManager(*sys_catalog_)), catalog_manager_(new CatalogManager(this, sys_catalog_.get())), @@ -175,8 +176,7 @@ Master::Master(const MasterOptions& opts) test_async_rpc_manager_(new TestAsyncRpcManager(this, catalog_manager())), init_future_(init_status_.get_future()), opts_(opts), - maintenance_manager_(new MaintenanceManager(MaintenanceManager::DEFAULT_OPTIONS)), - master_tablet_server_(new MasterTabletServer(this, metric_entity())) { + maintenance_manager_(new MaintenanceManager(MaintenanceManager::DEFAULT_OPTIONS)) { SetConnectionContextFactory(rpc::CreateConnectionContextFactory( GetAtomicFlag(&FLAGS_inbound_rpc_memory_limit), mem_tracker())); @@ -361,7 +361,7 @@ Status Master::StartAsync() { void Master::InitCatalogManagerTask() { Status s = InitCatalogManager(); if (!s.ok()) { - LOG(ERROR) << ToString() << ": Unable to init master catalog manager: " << s.ToString(); + LOG(WARNING) << ToString() << ": Unable to init master catalog manager: " << s; } init_status_.set_value(s); } diff --git a/src/yb/master/master.h b/src/yb/master/master.h index 6e547a3d7be1..22d2ecdc5ede 100644 --- a/src/yb/master/master.h +++ b/src/yb/master/master.h @@ -238,6 +238,7 @@ class Master : public tserver::DbServerBase { void RegisterCertificateReloader(tserver::CertificateReloader reloader) override {} void RegisterPgProcessRestarter(std::function restarter) override {} + void RegisterPgProcessKiller(std::function killer) override {} protected: Status RegisterServices(); @@ -277,6 +278,9 @@ class Master : public tserver::DbServerBase { // The metric entity for the cluster. scoped_refptr metric_entity_cluster_; + // Master's tablet server implementation used to host virtual tables like system.peers. + std::unique_ptr master_tablet_server_; + std::unique_ptr sys_catalog_; std::unique_ptr ts_manager_; std::unique_ptr catalog_manager_; @@ -307,9 +311,6 @@ class Master : public tserver::DbServerBase { // The maintenance manager for this master. std::shared_ptr maintenance_manager_; - // Master's tablet server implementation used to host virtual tables like system.peers. - std::unique_ptr master_tablet_server_; - std::unique_ptr cdc_state_client_init_; std::mutex master_metrics_mutex_; std::map> master_metrics_ GUARDED_BY(master_metrics_mutex_); diff --git a/src/yb/master/master_ddl.proto b/src/yb/master/master_ddl.proto index 34a58d29226d..9731735288cd 100644 --- a/src/yb/master/master_ddl.proto +++ b/src/yb/master/master_ddl.proto @@ -56,6 +56,15 @@ message UDTypeInfoPB { optional NamespaceIdentifierPB namespace = 5; } +// Additional internal table type for different special cases when the special object +// is being created via CreateTableRequestPB. +enum InternalTableType { + UNKNOWN_INTERNAL_TABLE_TYPE = 0; + GENERIC_TABLE = 1; + PG_CRON_JOB_TABLE = 2; + XCLUSTER_DDL_REPLICATION_TABLE = 3; +} + // Create table request (including index table) message CreateTableRequestPB { reserved 4, 32; @@ -136,6 +145,9 @@ message CreateTableRequestPB { } optional XClusterTableInfo xcluster_table_info = 33; + + // Additional internal table type for different special cases. + optional InternalTableType internal_table_type = 34 [ default = GENERIC_TABLE ]; } message CreateTableResponsePB { @@ -774,6 +786,7 @@ message AcquireObjectLocksGlobalRequestPB { optional fixed64 ignore_after_hybrid_time = 6; optional fixed64 propagated_hybrid_time = 7; optional AshMetadataPB ash_metadata = 8; + optional bytes status_tablet = 9; } message AcquireObjectLocksGlobalResponsePB { @@ -803,7 +816,10 @@ message ReleaseObjectLocksGlobalResponsePB { message RefreshYsqlLeaseRequestPB { optional NodeInstancePB instance = 1; - optional bool needs_bootstrap = 2; + // The current lease epoch of the tserver making this request. + // Unset if the tserver doesn't think it has a live lease. + optional uint64 current_lease_epoch = 2; + optional uint64 local_request_send_time_ms = 3; } message RefreshYsqlLeaseInfoPB { @@ -812,6 +828,7 @@ message RefreshYsqlLeaseInfoPB { // TODO: If this ends up being too big, consider adding a way to break this up // into multiple messages. optional tserver.DdlLockEntriesPB ddl_lock_entries = 3; + optional uint64 lease_expiry_time_ms = 4; } message RefreshYsqlLeaseResponsePB { diff --git a/src/yb/master/master_ddl_client.cc b/src/yb/master/master_ddl_client.cc index 2afd29f023cb..9b5c2d5fad41 100644 --- a/src/yb/master/master_ddl_client.cc +++ b/src/yb/master/master_ddl_client.cc @@ -70,10 +70,15 @@ Status MasterDDLClient::WaitForCreateNamespaceDone(const NamespaceId& id, MonoDe } Result MasterDDLClient::RefreshYsqlLease( - const std::string& permanent_uuid, int64_t instance_seqno) { + const std::string& permanent_uuid, int64_t instance_seqno, uint64_t time_ms, + std::optional current_lease_epoch) { RefreshYsqlLeaseRequestPB req; req.mutable_instance()->set_permanent_uuid(permanent_uuid); req.mutable_instance()->set_instance_seqno(instance_seqno); + req.set_local_request_send_time_ms(time_ms); + if (current_lease_epoch) { + req.set_current_lease_epoch(*current_lease_epoch); + } RefreshYsqlLeaseResponsePB resp; rpc::RpcController rpc; RETURN_NOT_OK(proxy_.RefreshYsqlLease(req, &resp, &rpc)); diff --git a/src/yb/master/master_ddl_client.h b/src/yb/master/master_ddl_client.h index 1802a8e67066..fb25a10e694a 100644 --- a/src/yb/master/master_ddl_client.h +++ b/src/yb/master/master_ddl_client.h @@ -34,7 +34,8 @@ class MasterDDLClient { Status WaitForCreateNamespaceDone(const NamespaceId& id, MonoDelta timeout); Result RefreshYsqlLease( - const std::string& permanent_uuid, int64_t instance_seqno); + const std::string& permanent_uuid, int64_t instance_seqno, uint64_t time_ms, + std::optional current_lease_epoch); private: MasterDdlProxy proxy_; diff --git a/src/yb/master/master_heartbeat_service.cc b/src/yb/master/master_heartbeat_service.cc index f922c859df62..7b759a68940f 100644 --- a/src/yb/master/master_heartbeat_service.cc +++ b/src/yb/master/master_heartbeat_service.cc @@ -585,7 +585,7 @@ void MasterHeartbeatServiceImpl::DeleteOrphanedTabletReplica( !catalog_manager_->IsDeletedTabletLoadedFromSysCatalog(tablet_id)) { // See the comment in deleted_tablets_loaded_from_sys_catalog_ declaration for an // explanation of this logic. - LOG(ERROR) << Format( + LOG(WARNING) << Format( "Skipping deletion of orphaned tablet $0, since master has never registered this " "tablet.", tablet_id); return; @@ -1149,12 +1149,12 @@ bool MasterHeartbeatServiceImpl::ProcessCommittedConsensusState( if (report.has_schema_version() && report.schema_version() != table_lock->pb.version()) { if (report.schema_version() > table_lock->pb.version()) { - LOG(ERROR) << "TS " << ts_desc->permanent_uuid() - << " has reported a schema version greater than the current one " - << " for tablet " << tablet->ToString() - << ". Expected version " << table_lock->pb.version() - << " got " << report.schema_version() - << " (corruption)"; + LOG(WARNING) << "TS " << ts_desc->permanent_uuid() + << " has reported a schema version greater than the current one " + << " for tablet " << tablet->ToString() + << ". Expected version " << table_lock->pb.version() + << " got " << report.schema_version() + << " (corruption)"; } else { // TODO: For Alter (rolling apply to tablets), this is an expected transitory state. LOG(INFO) << "TS " << ts_desc->permanent_uuid() @@ -1205,12 +1205,12 @@ bool MasterHeartbeatServiceImpl::ProcessCommittedConsensusState( continue; } if (id_to_version.second > table_lock->pb.version()) { - LOG(ERROR) << "TS " << ts_desc->permanent_uuid() - << " has reported a schema version greater than the current one " - << " for table " << id_to_version.first - << ". Expected version " << table_lock->pb.version() - << " got " << id_to_version.second - << " (corruption)"; + LOG(WARNING) << "TS " << ts_desc->permanent_uuid() + << " has reported a schema version greater than the current one " + << " for table " << id_to_version.first + << ". Expected version " << table_lock->pb.version() + << " got " << id_to_version.second + << " (corruption)"; } else { LOG(INFO) << "TS " << ts_desc->permanent_uuid() << " does not have the latest schema for table " << id_to_version.first diff --git a/src/yb/master/master_snapshot_coordinator.cc b/src/yb/master/master_snapshot_coordinator.cc index 9d3bc50d0c94..809e41cc5e47 100644 --- a/src/yb/master/master_snapshot_coordinator.cc +++ b/src/yb/master/master_snapshot_coordinator.cc @@ -336,7 +336,7 @@ class MasterSnapshotCoordinator::Impl { RETURN_NOT_OK(tablet->snapshots().Create(*sys_catalog_snapshot_data)); } - ScheduleOperations(operations, leader_term); + PostScheduleOperations(std::move(operations), leader_term); if (leader_term >= 0 && snapshot_empty) { // There could be snapshot for 0 tables, so they should be marked as complete right after @@ -542,7 +542,7 @@ class MasterSnapshotCoordinator::Impl { RETURN_NOT_OK(tablet->ApplyOperation( operation, /* batch_idx= */ -1, *rpc::CopySharedMessage(write_batch))); - ScheduleOperations(operations, leader_term); + PostScheduleOperations(std::move(operations), leader_term); return Status::OK(); } @@ -991,7 +991,7 @@ class MasterSnapshotCoordinator::Impl { if (FLAGS_TEST_fatal_on_snapshot_verify) { LOG(DFATAL) << error_msg; } else { - LOG(ERROR) << error_msg; + LOG(WARNING) << error_msg; } } @@ -1379,6 +1379,14 @@ class MasterSnapshotCoordinator::Impl { MasterError(MasterErrorPB::SNAPSHOT_NOT_FOUND)); } + template + void PostScheduleOperations(Operations&& operations, int64_t leader_term) { + context_.Scheduler().io_service().post( + [this, operations = std::move(operations), leader_term] { + ScheduleOperations(operations, leader_term); + }); + } + template void ScheduleOperation(const Operation& operation, const TabletInfoPtr& tablet_info, int64_t leader_term); @@ -1436,6 +1444,7 @@ class MasterSnapshotCoordinator::Impl { if (!l.IsInitializedAndIsLeader()) { return; } + LongOperationTracker long_operation_tracker("Poll", 1s); VLOG(4) << __func__ << "()"; std::vector cleanup_snapshots; TabletSnapshotOperations operations; diff --git a/src/yb/master/master_tablet_service.cc b/src/yb/master/master_tablet_service.cc index f578dccc03ce..44714b48a7f5 100644 --- a/src/yb/master/master_tablet_service.cc +++ b/src/yb/master/master_tablet_service.cc @@ -179,8 +179,8 @@ void MasterTabletServiceImpl::Write(const tserver::WriteRequestPB* req, for (const auto db_oid : db_oids) { if (!master_->catalog_manager()->GetYsqlDBCatalogVersion(db_oid, &catalog_version, &last_breaking_version).ok()) { - LOG_WITH_FUNC(ERROR) << "failed to get db catalog version for " - << db_oid << ", ignoring"; + LOG_WITH_FUNC(DFATAL) << "failed to get db catalog version for " + << db_oid << ", ignoring"; } else { LOG_WITH_FUNC(INFO) << "db catalog version for " << db_oid << ": " << catalog_version << ", breaking version: " @@ -190,7 +190,7 @@ void MasterTabletServiceImpl::Write(const tserver::WriteRequestPB* req, } else { if (!master_->catalog_manager()->GetYsqlCatalogVersion(&catalog_version, &last_breaking_version).ok()) { - LOG_WITH_FUNC(ERROR) << "failed to get catalog version, ignoring"; + LOG_WITH_FUNC(DFATAL) << "failed to get catalog version, ignoring"; } else { LOG_WITH_FUNC(INFO) << "catalog version: " << catalog_version << ", breaking version: " << last_breaking_version; diff --git a/src/yb/master/master_tserver.cc b/src/yb/master/master_tserver.cc index 6363f3c99cd4..1cc75179bcc5 100644 --- a/src/yb/master/master_tserver.cc +++ b/src/yb/master/master_tserver.cc @@ -148,8 +148,7 @@ void MasterTabletServer::get_ysql_db_catalog_version(uint32_t db_oid, master_->catalog_manager()->GetYsqlDBCatalogVersion( db_oid, current_version, last_breaking_version); if (!s.ok()) { - LOG(ERROR) << "Could not get YSQL catalog version for master's tserver API: " - << s.ToUserMessage(); + LOG(WARNING) << "Could not get YSQL catalog version for master's tserver API: " << s; fill_vers(); } } @@ -255,7 +254,7 @@ bool MasterTabletServer::SkipCatalogVersionChecks() { return master_->catalog_manager()->SkipCatalogVersionChecks(); } -Result MasterTabletServer::GetYSQLLeaseInfo() const { +Result MasterTabletServer::GetYSQLLeaseInfo() const { return STATUS(InternalError, "Unexpected call of GetYSQLLeaseInfo"); } diff --git a/src/yb/master/master_tserver.h b/src/yb/master/master_tserver.h index 1c7ef70be9c8..7439a4e5a652 100644 --- a/src/yb/master/master_tserver.h +++ b/src/yb/master/master_tserver.h @@ -125,11 +125,13 @@ class MasterTabletServer : public tserver::TabletServerIf, void SetYsqlDBCatalogVersions( const tserver::DBCatalogVersionDataPB& db_catalog_version_data) override {} - Result GetYSQLLeaseInfo() const override; + Result GetYSQLLeaseInfo() const override; Status RestartPG() const override { return STATUS(NotSupported, "RestartPG not implemented for masters"); } - + Status KillPg() const override { + return STATUS(NotSupported, "KillPg not implemented for masters"); + } const std::string& permanent_uuid() const override; Result GetLocalPgTxnSnapshot( diff --git a/src/yb/master/mini_master.cc b/src/yb/master/mini_master.cc index a2e94289d816..03c958a5597a 100644 --- a/src/yb/master/mini_master.cc +++ b/src/yb/master/mini_master.cc @@ -128,13 +128,11 @@ Status MiniMaster::StartOnPorts(uint16_t rpc_port, uint16_t web_port) { } MasterOptions opts(master_addresses); - Status start_status = StartOnPorts(rpc_port, web_port, &opts); - if (!start_status.ok()) { - LOG(ERROR) << "MiniMaster failed to start on RPC port " << rpc_port - << ", web port " << web_port << ": " << start_status; - // Don't crash here. Handle the error in the caller (e.g. could retry there). - } - return start_status; + // Don't crash here. Handle the error in the caller (e.g. could retry there). + RETURN_NOT_OK_WITH_WARNING( + StartOnPorts(rpc_port, web_port, &opts), + Format("MiniMaster failed to start on RPC port $0, web port $1", rpc_port, web_port)); + return Status::OK(); } Status MiniMaster::StartOnPorts(uint16_t rpc_port, uint16_t web_port, diff --git a/src/yb/master/object_lock_info_manager.cc b/src/yb/master/object_lock_info_manager.cc index da76f5801f2a..2c1f58531d2a 100644 --- a/src/yb/master/object_lock_info_manager.cc +++ b/src/yb/master/object_lock_info_manager.cc @@ -57,6 +57,13 @@ DEFINE_RUNTIME_uint64(master_ysql_operation_lease_ttl_ms, 30 * 1000, "through the YSQL API."); TAG_FLAG(master_ysql_operation_lease_ttl_ms, advanced); +DEFINE_RUNTIME_uint64(ysql_operation_lease_ttl_client_buffer_ms, 2 * 1000, + "The difference between the duration masters and tservers use for ysql " + "operation lease TTLs. This is non-zero to account for clock skew and give " + "tservers time to clean up their existing pg sessions before the master " + "leader ignores them for exclusive table lock requests."); +TAG_FLAG(ysql_operation_lease_ttl_client_buffer_ms, advanced); + DEFINE_NON_RUNTIME_uint64(object_lock_cleanup_interval_ms, 5000, "The interval between runs of the background cleanup task for " "table-level locks held by unresponsive TServers."); @@ -75,6 +82,7 @@ namespace yb { namespace master { using namespace std::literals; +using namespace std::placeholders; using server::MonitoredTaskState; using strings::Substitute; using tserver::AcquireObjectLockRequestPB; @@ -102,6 +110,8 @@ Status ValidateLockRequest( return Status::OK(); } +constexpr auto kTserverRpcsTimeoutDefaultSecs = 60s; + } // namespace class ObjectLockInfoManager::Impl { @@ -110,9 +120,11 @@ class ObjectLockInfoManager::Impl { : master_(master), catalog_manager_(catalog_manager), clock_(master.clock()), - local_lock_manager_( - std::make_shared(clock_, master_.tablet_server())), - poller_(std::bind(&Impl::CleanupExpiredLeaseEpochs, this)) {} + poller_(std::bind(&Impl::CleanupExpiredLeaseEpochs, this)) { + CHECK_OK(ThreadPoolBuilder("object_lock_info_manager").Build(&lock_manager_thread_pool_)); + local_lock_manager_ = std::make_shared( + clock_, master_.tablet_server(), master_, lock_manager_thread_pool_.get()); + } void Start() { poller_.Start( @@ -120,13 +132,28 @@ class ObjectLockInfoManager::Impl { MonoDelta::FromMilliseconds(FLAGS_object_lock_cleanup_interval_ms)); } + void Shutdown() { + poller_.Shutdown(); + tserver::TSLocalLockManager* lock_manager = nullptr; + { + LockGuard lock(mutex_); + object_lock_infos_map_.clear(); + if (local_lock_manager_) { + lock_manager = local_lock_manager_.get(); + } + } + if (lock_manager) { + lock_manager->Shutdown(); + } + } + void LockObject( AcquireObjectLockRequestPB&& req, CoarseTimePoint deadline, StdStatusCallback&& callback); void PopulateDbCatalogVersionCache(ReleaseObjectLockRequestPB& req); Status UnlockObject( - ReleaseObjectLockRequestPB&& req, std::optional leader_epoch = std::nullopt, - std::optional callback = std::nullopt); + ReleaseObjectLockRequestPB&& req, std::optional&& callback = std::nullopt, + std::optional leader_epoch = std::nullopt); Status UnlockObjectSync( const ReleaseObjectLocksGlobalRequestPB& master_request, tserver::ReleaseObjectLockRequestPB&& tserver_request, CoarseTimePoint deadline); @@ -235,11 +262,12 @@ class ObjectLockInfoManager::Impl { }; std::unordered_map txn_host_info_map_ GUARDED_BY(mutex_); + rpc::Poller poller_; + std::unique_ptr lock_manager_thread_pool_; std::shared_ptr local_lock_manager_ GUARDED_BY(mutex_); // Only accessed from a single thread for now, so no need for synchronization. std::unordered_map> expired_lease_epoch_cleanup_tasks_; - rpc::Poller poller_; }; template @@ -267,7 +295,7 @@ class UpdateAllTServers : public std::enable_shared_from_this leader_epoch, std::optional callback); + std::optional&& callback, std::optional leader_epoch); Status Launch(); const Req& request() const override { @@ -291,16 +319,19 @@ class UpdateAllTServers : public std::enable_shared_from_this TServerTaskFor( const TabletServerId& ts_uuid, StdStatusCallback&& callback); @@ -340,6 +371,8 @@ class UpdateTServer : public RetrySpecificTSRpcTask { return Format("$0 for TServer: $1 ", shared_all_tservers_->LogPrefix(), permanent_uuid()); } + MonoTime ComputeDeadline() const override; + protected: void Finished(const Status& status) override; @@ -396,6 +429,7 @@ AcquireObjectLockRequestPB TserverRequestFor( if (master_request.has_ash_metadata()) { req.mutable_ash_metadata()->CopyFrom(master_request.ash_metadata()); } + req.set_status_tablet(master_request.status_tablet()); return req; } @@ -457,6 +491,10 @@ void ObjectLockInfoManager::Start() { impl_->Start(); } +void ObjectLockInfoManager::Shutdown() { + impl_->Shutdown(); +} + void ObjectLockInfoManager::LockObject( const AcquireObjectLocksGlobalRequestPB& req, AcquireObjectLocksGlobalResponsePB& resp, rpc::RpcContext rpc) { @@ -793,8 +831,7 @@ Status ObjectLockInfoManager::Impl::UnlockObjectSync( auto promise = std::make_shared>(); WARN_NOT_OK( UnlockObject( - std::move(tserver_req), std::nullopt, - [promise](const Status& s) { promise->set_value(s); }), + std::move(tserver_req), [promise](const Status& s) { promise->set_value(s); }), "Failed to unlock object"); auto future = promise->get_future(); return ( @@ -804,8 +841,8 @@ Status ObjectLockInfoManager::Impl::UnlockObjectSync( } Status ObjectLockInfoManager::Impl::UnlockObject( - ReleaseObjectLockRequestPB&& req, std::optional leader_epoch, - std::optional callback) { + ReleaseObjectLockRequestPB&& req, std::optional&& callback, + std::optional leader_epoch) { VLOG(1) << __PRETTY_FUNCTION__ << req.ShortDebugString(); if (req.session_host_uuid().empty()) { // session_host_uuid would be unset for release requests that are manually @@ -817,8 +854,8 @@ Status ObjectLockInfoManager::Impl::UnlockObject( RETURN_NOT_OK(s); } auto unlock_objects = std::make_shared>( - master_, catalog_manager_, *this, std::move(req), std::move(leader_epoch), - std::move(callback)); + master_, catalog_manager_, *this, std::move(req), std::move(callback), + std::move(leader_epoch)); return unlock_objects->Launch(); } @@ -860,26 +897,49 @@ void ObjectLockInfoManager::Impl::UnlockObject(const TransactionId& txn_id) { Status ObjectLockInfoManager::Impl::RefreshYsqlLease( const RefreshYsqlLeaseRequestPB& req, RefreshYsqlLeaseResponsePB& resp, rpc::RpcContext& rpc, const LeaderEpoch& epoch) { - if (!FLAGS_enable_ysql_operation_lease && - !FLAGS_TEST_enable_object_locking_for_table_locks) { - return STATUS(NotSupported, "The ysql lease is currently a test feature."); + if (!FLAGS_enable_ysql_operation_lease && !FLAGS_TEST_enable_object_locking_for_table_locks) { + return STATUS(NotSupported, "The ysql lease is currently disabled."); + } + if (!req.has_local_request_send_time_ms()) { + return STATUS(InvalidArgument, "Missing required local_request_send_time_ms"); } + auto master_ttl = GetAtomicFlag(&FLAGS_master_ysql_operation_lease_ttl_ms); + auto buffer = GetAtomicFlag(&FLAGS_ysql_operation_lease_ttl_client_buffer_ms); + CHECK_GT(master_ttl, buffer); + resp.mutable_info()->set_lease_expiry_time_ms( + req.local_request_send_time_ms() + master_ttl - buffer); // Sanity check that the tserver has already registered with the same instance_seqno. RETURN_NOT_OK(master_.ts_manager()->LookupTS(req.instance())); auto object_lock_info = GetOrCreateObjectLockInfo(req.instance().permanent_uuid()); - auto lock_opt = object_lock_info->RefreshYsqlOperationLease(req.instance()); - if (!lock_opt) { - resp.mutable_info()->set_new_lease(false); - if (req.needs_bootstrap()) { + auto lock_variant = object_lock_info->RefreshYsqlOperationLease(req.instance()); + if (auto* lease_info = std::get_if(&lock_variant)) { + resp.mutable_info()->set_lease_epoch(lease_info->lease_epoch()); + if (!req.has_current_lease_epoch() || lease_info->lease_epoch() != req.current_lease_epoch()) { *resp.mutable_info()->mutable_ddl_lock_entries() = ExportObjectLockInfo(); + // From the master leader's perspective this is not a new lease. But the tserver may not be + // aware it has received a new lease because it has not supplied its correct lease epoch. + LOG(INFO) << Format( + "TS $0 ($1) has provided $3 instead of its actual lease epoch $4 in its ysql op lease " + "refresh request. Marking its ysql lease as new", + req.instance().permanent_uuid(), req.instance().instance_seqno(), + req.has_current_lease_epoch() ? std::to_string(req.current_lease_epoch()) : "", + lease_info->lease_epoch()); + resp.mutable_info()->set_new_lease(true); + } else { + resp.mutable_info()->set_new_lease(false); } return Status::OK(); } + auto* lockp = std::get_if(&lock_variant); + CHECK_NOTNULL(lockp); RETURN_NOT_OK(catalog_manager_.sys_catalog()->Upsert(epoch, object_lock_info)); resp.mutable_info()->set_new_lease(true); - resp.mutable_info()->set_lease_epoch(lock_opt->mutable_data()->pb.lease_info().lease_epoch()); - lock_opt->Commit(); + resp.mutable_info()->set_lease_epoch(lockp->mutable_data()->pb.lease_info().lease_epoch()); + lockp->Commit(); *resp.mutable_info()->mutable_ddl_lock_entries() = ExportObjectLockInfo(); + LOG(INFO) << Format( + "Granting a new ysql op lease to TS $0 ($1). Lease epoch $2", req.instance().permanent_uuid(), + req.instance().instance_seqno(), resp.info().lease_epoch()); return Status::OK(); } @@ -921,7 +981,7 @@ std::shared_ptr ObjectLockInfoManager::Impl::ReleaseLocksHeldByE auto session_host_uuid = request.session_host_uuid(); WARN_NOT_OK( UnlockObject( - std::move(request), leader_epoch, [latch](const Status& s) { latch->CountDown(); }), + std::move(request), [latch](const Status& s) { latch->CountDown(); }, leader_epoch), yb::Format("Failed to enqueue request for unlock object $0 $1", session_host_uuid, txn_id)); } return latch; @@ -983,7 +1043,7 @@ void ObjectLockInfoManager::Impl::RelaunchInProgressRequests( VLOG(1) << __func__ << " for " << tserver_uuid << " " << requests.size() << " requests"; for (auto& request : requests) { WARN_NOT_OK( - UnlockObject(std::move(request), leader_epoch), + UnlockObject(std::move(request), std::nullopt /* callback */, leader_epoch), "Failed to enqueue request for unlock object"); } } @@ -992,7 +1052,11 @@ void ObjectLockInfoManager::Impl::Clear() { catalog_manager_.AssertLeaderLockAcquiredForWriting(); LockGuard lock(mutex_); object_lock_infos_map_.clear(); - local_lock_manager_.reset(new tserver::TSLocalLockManager(clock_, master_.tablet_server())); + if (local_lock_manager_) { + local_lock_manager_->Shutdown(); + } + local_lock_manager_.reset(new tserver::TSLocalLockManager( + clock_, master_.tablet_server(), master_, lock_manager_thread_pool_.get())); } std::optional ObjectLockInfoManager::Impl::GetLeaseEpoch(const std::string& ts_uuid) { @@ -1048,6 +1112,10 @@ void ObjectLockInfoManager::Impl::CleanupExpiredLeaseEpochs() { if (object_info_lock->pb.lease_info().live_lease() && current_time.GetDeltaSince(object_info->last_ysql_lease_refresh()) > MonoDelta::FromMilliseconds(GetAtomicFlag(&FLAGS_master_ysql_operation_lease_ttl_ms))) { + LOG(INFO) << Format( + "Tserver $0, instance seqno $1 with ysql lease epoch $2 has just lost its lease", + object_info->id(), object_info_lock->pb.lease_info().instance_seqno(), + object_info_lock->pb.lease_info().lease_epoch()); object_info_lock.mutable_data()->pb.mutable_lease_info()->set_live_lease(false); object_infos_to_write.push_back(object_info.get()); if (object_info_lock->pb.lease_epochs_size() > 0) { @@ -1119,7 +1187,7 @@ UpdateAllTServers::UpdateAllTServers( template UpdateAllTServers::UpdateAllTServers( Master& master, CatalogManager& catalog_manager, ObjectLockInfoManager::Impl& olm, Req&& req, - std::optional leader_epoch, std::optional callback) + std::optional&& callback, std::optional leader_epoch) : master_(master), catalog_manager_(catalog_manager), object_lock_info_manager_(olm), @@ -1127,6 +1195,7 @@ UpdateAllTServers::UpdateAllTServers( txn_id_(FullyDecodeTransactionId(req_.txn_id())), epoch_(std::move(leader_epoch)), callback_(std::move(callback)), + deadline_(CoarseMonoClock::Now() + kTserverRpcsTimeoutDefaultSecs), trace_(Trace::CurrentTrace()) { VLOG(3) << __PRETTY_FUNCTION__; } @@ -1178,6 +1247,15 @@ bool UpdateAllTServers::IsReleaseRequest() const { return true; } +template +Status UpdateAllTServers::VerifyTxnId() { + if (!txn_id_) { + return STATUS_FORMAT( + InvalidArgument, "Could not parse txn_id for the request. $0", txn_id_.status()); + } + return Status::OK(); +} + template std::string UpdateAllTServers::LogPrefix() const { return Format( @@ -1187,45 +1265,82 @@ std::string UpdateAllTServers::LogPrefix() const { } template -Status UpdateAllTServers::Launch() { - auto s = DoLaunch(); - if (!launched_) { - DoCallbackAndRespond(s); +void UpdateAllTServers::LaunchRpcs() { + // todo(zdrudi): special case for 0 tservers with a live lease. This doesn't work. + ts_descriptors_ = object_lock_info_manager_.GetAllTSDescriptorsWithALiveLease(); + statuses_ = std::vector{ts_descriptors_.size(), STATUS(Uninitialized, "")}; + LaunchRpcsFrom(0); +} + +template <> +Status UpdateAllTServers::BeforeRpcs() { + TRACE_FUNC(); + RETURN_NOT_OK(VerifyTxnId()); + RETURN_NOT_OK(ValidateLockRequest(req_, requestor_latest_lease_epoch_)); + std::shared_ptr local_lock_manager; + DCHECK(!epoch_.has_value()) << "Epoch should not yet be set for AcquireObjectLockRequestPB"; + { + SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); + RETURN_NOT_OK(CheckLeaderLockStatus(l, std::nullopt)); + epoch_ = l.epoch(); + local_lock_manager = object_lock_info_manager_.ts_local_lock_manager(); } - return s; + // Update Local State. + launched_ = true; + local_lock_manager->AcquireObjectLocksAsync( + req_, GetClientDeadline(), + [shared_this = shared_from_this()](Status s) { + if (s.ok()) { + s = shared_this->DoPersistRequest(); + } + if (!s.ok()) { + LOG(WARNING) << "Failed to acquire object locks locally at the master " << s; + shared_this->DoCallbackAndRespond(s.CloneAndReplaceCode(Status::kRemoteError)); + return; + } + shared_this->LaunchRpcs(); + }, tserver::WaitForBootstrap::kFalse); + return Status::OK(); } -template -Status UpdateAllTServers::DoLaunch() { - if (!txn_id_) { - return STATUS( - InvalidArgument, "Could not parse txn_id for the request. $0", txn_id_.status().ToString()); - } else if ( - IsReleaseRequest() && object_lock_info_manager_.IsDdlVerificationInProgress(*txn_id_)) { +template <> +Status UpdateAllTServers::BeforeRpcs() { + TRACE_FUNC(); + RETURN_NOT_OK(VerifyTxnId()); + if (object_lock_info_manager_.IsDdlVerificationInProgress(*txn_id_)) { VLOG_WITH_PREFIX(1) << " is already scheduled for ddl verification. " << "Ignoring release request, as it will be released by the ddl verifier."; return Status::OK(); } VLOG_WITH_PREFIX(2) << " processing request: " << req_.ShortDebugString(); - RETURN_NOT_OK(BeforeRpcs()); + if (!epoch_.has_value()) { + SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); + RETURN_NOT_OK(CheckLeaderLockStatus(l, std::nullopt)); + epoch_ = l.epoch(); + } + RETURN_NOT_OK(object_lock_info_manager_.AddToInProgress(*epoch_, req_)); - // Do this check after the BeforeRpcs() call, to ensure that the request was added to - // in progress requests. - if (PREDICT_FALSE(FLAGS_TEST_skip_launch_release_request) && IsReleaseRequest()) { + // Do this check after adding the request to in progress requests. + if (PREDICT_FALSE(FLAGS_TEST_skip_launch_release_request)) { return Status::OK(); } - - // todo(zdrudi): special case for 0 tservers with a live lease. This doesn't work. - ts_descriptors_ = object_lock_info_manager_.GetAllTSDescriptorsWithALiveLease(); - statuses_ = std::vector{ts_descriptors_.size(), STATUS(Uninitialized, "")}; - LaunchFrom(0); launched_ = true; + LaunchRpcs(); return Status::OK(); } template -void UpdateAllTServers::LaunchFrom(size_t start_idx) { +Status UpdateAllTServers::Launch() { + auto s = BeforeRpcs(); + if (!launched_) { + DoCallbackAndRespond(s); + } + return s; +} + +template +void UpdateAllTServers::LaunchRpcsFrom(size_t start_idx) { TRACE("Launching for $0 TServers from $1", ts_descriptors_.size(), start_idx); ts_pending_ = ts_descriptors_.size() - start_idx; VLOG(1) << __func__ << " launching for " << ts_pending_ << " tservers."; @@ -1234,20 +1349,21 @@ void UpdateAllTServers::LaunchFrom(size_t start_idx) { VLOG(1) << "Launching for " << ts_uuid; auto task = TServerTaskFor( ts_uuid, - std::bind( - &UpdateAllTServers::Done, this->shared_from_this(), i, std::placeholders::_1)); - WARN_NOT_OK( - catalog_manager_.ScheduleTask(task), - yb::Format( - "Failed to schedule request to UpdateTServer to $0 for $1", ts_uuid, - request().DebugString())); + std::bind(&UpdateAllTServers::Done, this->shared_from_this(), i, _1)); + auto s = catalog_manager_.ScheduleTask(task); + if (!s.ok()) { + Done(i, + s.CloneAndPrepend(Format( + "Failed to schedule request to UpdateTServer to $0 for $1", ts_uuid, + request().DebugString()))); + } } } template void UpdateAllTServers::DoCallbackAndRespond(const Status& s) { - TRACE("$0: $1", __func__, s.ToString()); - VLOG_WITH_FUNC(2) << s.ToString(); + TRACE("$0: $1 $2", __func__, (IsReleaseRequest() ? "Release" : "Acquire"), s.ToString()); + VLOG_WITH_FUNC(2) << (IsReleaseRequest() ? "Release" : "Acquire") << " " << s.ToString(); WARN_NOT_OK( s, yb::Format( "$0Failed.$1", LogPrefix(), @@ -1274,49 +1390,22 @@ void UpdateAllTServers::CheckForDone() { DoneAll(); } -template <> -Status UpdateAllTServers::BeforeRpcs() { - TRACE_FUNC(); - RETURN_NOT_OK(ValidateLockRequest(req_, requestor_latest_lease_epoch_)); - std::shared_ptr local_lock_manager; - DCHECK(!epoch_.has_value()) << "Epoch should not yet be set for AcquireObjectLockRequestPB"; - { - SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); - RETURN_NOT_OK(CheckLeaderLockStatus(l, std::nullopt)); - epoch_ = l.epoch(); - local_lock_manager = object_lock_info_manager_.ts_local_lock_manager(); - } - // Update Local State. - // TODO: Use RETURN_NOT_OK_PREPEND - auto s = local_lock_manager->AcquireObjectLocks( - req_, GetClientDeadline(), tserver::WaitForBootstrap::kFalse); +template +Status UpdateAllTServers::DoPersistRequestUnlocked(const ScopedLeaderSharedLock& l) { + // Persist the request. + RETURN_NOT_OK(CheckLeaderLockStatus(l, epoch_)); + auto s = object_lock_info_manager_.PersistRequest(*epoch_, req_, *txn_id_); if (!s.ok()) { - LOG(WARNING) << "Failed to acquire object locks locally at the master " << s; + LOG(WARNING) << "Failed to update object lock " << s; return s.CloneAndReplaceCode(Status::kRemoteError); } - // todo(zdrudi): Do we want to verify the requestor has a valid lease here before persisting? - // Persist the request. - { - SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); - RETURN_NOT_OK(CheckLeaderLockStatus(l, epoch_)); - auto s = object_lock_info_manager_.PersistRequest(*epoch_, req_, *txn_id_); - if (!s.ok()) { - LOG(WARNING) << "Failed to update object lock " << s; - return s.CloneAndReplaceCode(Status::kRemoteError); - } - } return Status::OK(); } -template <> -Status UpdateAllTServers::BeforeRpcs() { - TRACE_FUNC(); - if (!epoch_.has_value()) { - SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); - RETURN_NOT_OK(CheckLeaderLockStatus(l, std::nullopt)); - epoch_ = l.epoch(); - } - return object_lock_info_manager_.AddToInProgress(*epoch_, req_); +template +Status UpdateAllTServers::DoPersistRequest() { + SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); + return DoPersistRequestUnlocked(l); } template <> @@ -1330,16 +1419,10 @@ Status UpdateAllTServers::AfterRpcs() { TRACE_FUNC(); VLOG_WITH_FUNC(2); SCOPED_LEADER_SHARED_LOCK(l, &catalog_manager_); - RETURN_NOT_OK(CheckLeaderLockStatus(l, epoch_)); - // Persist the request. - auto s = object_lock_info_manager_.PersistRequest(*epoch_, req_, *txn_id_); - if (!s.ok()) { - LOG(WARNING) << "Failed to update object lock " << s; - return s.CloneAndReplaceCode(Status::kRemoteError); - } + RETURN_NOT_OK(DoPersistRequestUnlocked(l)); // Update Local State. auto local_lock_manager = object_lock_info_manager_.ts_local_lock_manager(); - s = local_lock_manager->ReleaseObjectLocks(req_, GetClientDeadline()); + auto s = local_lock_manager->ReleaseObjectLocks(req_, GetClientDeadline()); if (!s.ok()) { LOG(WARNING) << "Failed to release object lock locally." << s; return s.CloneAndReplaceCode(Status::kRemoteError); @@ -1376,7 +1459,7 @@ bool UpdateAllTServers::RelaunchIfNecessary() { } VLOG(1) << "New TServers were added. Relaunching."; - LaunchFrom(old_size); + LaunchRpcsFrom(old_size); return true; } @@ -1406,6 +1489,11 @@ bool UpdateTServer::Sen return true; } +template +MonoTime UpdateTServer::ComputeDeadline() const { + return MonoTime(ToSteady(shared_all_tservers_->GetClientDeadline())); +} + template void UpdateTServer::HandleResponse(int attempt) { VLOG_WITH_PREFIX(3) << __func__ << " response is " << yb::ToString(resp_); diff --git a/src/yb/master/object_lock_info_manager.h b/src/yb/master/object_lock_info_manager.h index 259e542e62f4..700b9a2f981d 100644 --- a/src/yb/master/object_lock_info_manager.h +++ b/src/yb/master/object_lock_info_manager.h @@ -60,6 +60,8 @@ class ObjectLockInfoManager { void Start(); + void Shutdown(); + void LockObject( const AcquireObjectLocksGlobalRequestPB& req, AcquireObjectLocksGlobalResponsePB& resp, rpc::RpcContext rpc); diff --git a/src/yb/master/permissions_manager.cc b/src/yb/master/permissions_manager.cc index aca2965abd5d..1d2f08bc29c1 100644 --- a/src/yb/master/permissions_manager.cc +++ b/src/yb/master/permissions_manager.cc @@ -452,7 +452,7 @@ Status PermissionsManager::AlterRole( s = catalog_manager_->sys_catalog_->Upsert(catalog_manager_->leader_ready_term(), role); if (!s.ok()) { - LOG(ERROR) << "Unable to alter role " << req->name() << ": " << s; + LOG(WARNING) << "Unable to alter role " << req->name() << ": " << s; return s; } l.Commit(); @@ -518,8 +518,8 @@ Status PermissionsManager::DeleteRole( // Update sys-catalog with the new member_of list for this role. s = catalog_manager_->sys_catalog_->Upsert(catalog_manager_->leader_ready_term(), role); if (!s.ok()) { - LOG(ERROR) << "Unable to remove role " << req->name() - << " from member_of list for role " << role_name; + LOG(WARNING) << "Unable to remove role " << req->name() + << " from member_of list for role " << role_name; role->mutable_metadata()->AbortMutation(); } else { role->mutable_metadata()->CommitMutation(); diff --git a/src/yb/master/sys_catalog.cc b/src/yb/master/sys_catalog.cc index af94c550227b..f970aaf53719 100644 --- a/src/yb/master/sys_catalog.cc +++ b/src/yb/master/sys_catalog.cc @@ -745,8 +745,8 @@ Status SysCatalogTable::SyncWrite(SysCatalogWriter* writer) { << "complete. Continuing to wait."; time = CoarseMonoClock::now(); if (time >= deadline) { - LOG(ERROR) << "Already waited for a total of " << ::yb::ToString(waited_so_far) << ". " - << "Returning a timeout from SyncWrite."; + LOG(WARNING) << "Already waited for a total of " << ::yb::ToString(waited_so_far) << ". " + << "Returning a timeout from SyncWrite."; return STATUS_FORMAT(TimedOut, "SyncWrite timed out after $0", waited_so_far); } } @@ -1406,7 +1406,7 @@ Result SysCatalogTable::ReadPgClassColumnWithOidValue(const uint32_t d } oid = result_oid_col->uint32_value(); - VLOG(1) << "Table oid: " << table_oid << column_name << " oid: " << oid; + VLOG(1) << "Table oid: " << table_oid << " Column " << column_name << " oid: " << oid; } return oid; @@ -1501,7 +1501,7 @@ Result> SysCatalogTable::ReadPgAttNameTypid if (attnum_col->int16_value() < 0) { // Ignore system columns. VLOG(1) << "Ignoring system column (attnum = " << attnum_col->int16_value() - << ") for attrelid $0:" << table_oid; + << ") for attrelid: " << table_oid; continue; } @@ -1521,7 +1521,7 @@ Result> SysCatalogTable::ReadPgAttNameTypid if (atttypid == kPgInvalidOid) { // Ignore dropped columns. VLOG(1) << "Ignoring dropped column " << attname << " (atttypid = 0)" - << " for attrelid $0:" << table_oid; + << " for attrelid: " << table_oid; continue; } @@ -2050,7 +2050,7 @@ Result SysCatalogTable::ReadPgAttributeInfo( if (attnum < 0) { // Ignore system columns. VLOG(1) << "Ignoring system column (attnum = " << attnum_col->int16_value() - << ") for attrelid $0:" << attrelid; + << ") for attrelid: " << attrelid; continue; } @@ -2059,7 +2059,7 @@ Result SysCatalogTable::ReadPgAttributeInfo( if (atttypid == kPgInvalidOid) { // Ignore dropped columns. VLOG(1) << "Ignoring dropped column " << attname << " (atttypid = 0)" - << " for attrelid $0:" << attrelid; + << " for attrelid: " << attrelid; continue; } diff --git a/src/yb/master/table_index.cc b/src/yb/master/table_index.cc index bb9c911d1948..1cfb2a020c55 100644 --- a/src/yb/master/table_index.cc +++ b/src/yb/master/table_index.cc @@ -37,7 +37,7 @@ void TableIndex::AddOrReplace(const TableInfoPtr& table) { std::string first_id = (*pos)->id(); auto replace_successful = tables_.replace(pos, table); if (!replace_successful) { - LOG(ERROR) << Format( + LOG(WARNING) << Format( "Multiple tables prevented inserting a new table with id $0. First id was $1", table->id(), first_id); } diff --git a/src/yb/master/util/yql_vtable_helpers.cc b/src/yb/master/util/yql_vtable_helpers.cc index 90ce1e721560..cb8e01ba3385 100644 --- a/src/yb/master/util/yql_vtable_helpers.cc +++ b/src/yb/master/util/yql_vtable_helpers.cc @@ -133,7 +133,7 @@ QLValuePB GetValueHelper::Apply(const std::string& strval, const Da value_pb.set_binary_value(strval); break; default: - LOG(ERROR) << "unexpected string type " << data_type; + LOG(DFATAL) << "unexpected string type " << data_type; break; } return value_pb; @@ -150,7 +150,7 @@ QLValuePB GetValueHelper::Apply( value_pb.set_binary_value(strval, len); break; default: - LOG(ERROR) << "unexpected string type " << data_type; + LOG(DFATAL) << "unexpected string type " << data_type; break; } return value_pb; @@ -172,7 +172,7 @@ QLValuePB GetValueHelper::Apply(const int32_t intval, const DataType da value_pb.set_int8_value(intval); break; default: - LOG(ERROR) << "unexpected int type " << data_type; + LOG(DFATAL) << "unexpected int type " << data_type; break; } return value_pb; diff --git a/src/yb/master/xcluster/xcluster_bootstrap_helper.cc b/src/yb/master/xcluster/xcluster_bootstrap_helper.cc index 26192cb508ad..cd50e8974c70 100644 --- a/src/yb/master/xcluster/xcluster_bootstrap_helper.cc +++ b/src/yb/master/xcluster/xcluster_bootstrap_helper.cc @@ -239,7 +239,7 @@ void SetupUniverseReplicationWithBootstrapHelper::DoReplicationBootstrap( // First get the universe. auto bootstrap_info = catalog_manager_.GetUniverseReplicationBootstrap(replication_id); if (bootstrap_info == nullptr) { - LOG(ERROR) << "UniverseReplicationBootstrap not found: " << replication_id; + LOG(DFATAL) << "UniverseReplicationBootstrap not found: " << replication_id; return; } diff --git a/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc b/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc index 5dd3fa65e9da..a5862415e6c4 100644 --- a/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc +++ b/src/yb/master/xcluster/xcluster_inbound_replication_group_setup_task.cc @@ -932,7 +932,7 @@ Result XClusterTableSetupTask::ValidateSourceSchemaAnd // Double-check schema name here if the previous check was skipped. if (is_ysql_table && !has_valid_pgschema_name) { - std::string target_schema_name = table_schema_resp.schema().pgschema_name(); + std::string target_schema_name = table_schema_resp.schema().depricated_pgschema_name(); if (target_schema_name != source_schema.SchemaName()) { table->clear_table_id(); continue; diff --git a/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc b/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc index 45fba8b37b2b..30fdb34f16fb 100644 --- a/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc +++ b/src/yb/master/xcluster/xcluster_outbound_replication_group-test.cc @@ -246,7 +246,7 @@ class XClusterOutboundReplicationGroupMockedTest : public YBTest { pb.set_state(master::SysTablesEntryPB::PREPARING); pb.set_name(table_name); pb.set_namespace_id(namespace_id); - pb.mutable_schema()->set_pgschema_name(pg_schema_name); + pb.mutable_schema()->set_depricated_pgschema_name(pg_schema_name); pb.set_table_type(PGSQL_TABLE_TYPE); l.Commit(); } diff --git a/src/yb/master/xrepl_catalog_manager.cc b/src/yb/master/xrepl_catalog_manager.cc index b11900e38f98..70b38d53ad69 100644 --- a/src/yb/master/xrepl_catalog_manager.cc +++ b/src/yb/master/xrepl_catalog_manager.cc @@ -238,7 +238,7 @@ class CDCStreamLoader : public Visitor { table = catalog_manager_->tables_->FindTableOrNull( xcluster::StripSequencesDataAliasIfPresent(metadata.table_id(0))); if (!table) { - LOG(ERROR) << "Invalid table ID " << metadata.table_id(0) << " for stream " << stream_id; + LOG(DFATAL) << "Invalid table ID " << metadata.table_id(0) << " for stream " << stream_id; // TODO (#2059): Potentially signals a race condition that table got deleted while stream // was being created. // Log error and continue without loading the stream. @@ -722,7 +722,7 @@ Status CatalogManager::BackfillMetadataForXRepl( // is not present without backfilling it to master's disk or tservers. // Skip this check for colocated parent tables as they do not have pgschema names. if (!IsColocationParentTableId(table_id) && - (backfill_required || table_lock->schema().pgschema_name().empty())) { + (backfill_required || table_lock->schema().depricated_pgschema_name().empty())) { LOG_WITH_FUNC(INFO) << "backfilling pgschema_name for table " << table_id; string pgschema_name = VERIFY_RESULT(GetPgSchemaName(table_id, table_lock.data())); VLOG(1) << "For table: " << table_lock->name() << " found pgschema_name: " << pgschema_name; @@ -2730,7 +2730,9 @@ Status CatalogManager::CleanUpCDCSDKStreamsMetadata(const LeaderEpoch& epoch) { // itself is not found, we can safely delete the cdc_state entry. auto tablet_info_result = GetTabletInfo(entry.tablet_id); if (!tablet_info_result.ok()) { - keys_to_delete.emplace_back(entry.tablet_id, entry.stream_id); + LOG_WITH_FUNC(WARNING) << "Did not find tablet info for tablet_id: " << entry.tablet_id + << " , will not delete its cdc_state entry for stream id:" + << entry.stream_id << "in this iteration"; continue; } @@ -4752,7 +4754,7 @@ Status CatalogManager::ClearFailedReplicationBootstrap() { if (bootstrap_info == nullptr) { auto error_msg = Format("UniverseReplicationBootstrap not found: $0", replication_id.ToString()); - LOG(ERROR) << error_msg; + LOG(WARNING) << error_msg; return STATUS(NotFound, error_msg); } } @@ -4990,7 +4992,7 @@ Status CatalogManager::DoProcessCDCSDKTabletDeletion() { auto s = cdc_state_table_->DeleteEntries(entries_to_delete); if (!s.ok()) { - LOG(ERROR) << "Unable to flush operations to delete cdc streams: " << s; + LOG(WARNING) << "Unable to flush operations to delete cdc streams: " << s; return s.CloneAndPrepend("Error deleting cdc stream rows from cdc_state table"); } diff --git a/src/yb/master/yql_peers_vtable.cc b/src/yb/master/yql_peers_vtable.cc index fb29d5e28b14..cbad3e1e0c7b 100644 --- a/src/yb/master/yql_peers_vtable.cc +++ b/src/yb/master/yql_peers_vtable.cc @@ -114,15 +114,15 @@ Result PeersVTable::RetrieveData( // result, skip 'remote_endpoint' in the results. auto private_ip = entry.ts_ips.private_ip_future.get(); if (!private_ip.ok()) { - LOG(ERROR) << "Failed to get private ip from " << entry.ts_info.ShortDebugString() - << ": " << private_ip.status(); + LOG(WARNING) << "Failed to get private ip from " << entry.ts_info.ShortDebugString() + << ": " << private_ip.status(); continue; } auto public_ip = entry.ts_ips.public_ip_future.get(); if (!public_ip.ok()) { - LOG(ERROR) << "Failed to get public ip from " << entry.ts_info.ShortDebugString() - << ": " << public_ip.status(); + LOG(WARNING) << "Failed to get public ip from " << entry.ts_info.ShortDebugString() + << ": " << public_ip.status(); continue; } diff --git a/src/yb/master/ysql/ysql_catalog_config.cc b/src/yb/master/ysql/ysql_catalog_config.cc index e41737636694..64efc0f619b1 100644 --- a/src/yb/master/ysql/ysql_catalog_config.cc +++ b/src/yb/master/ysql/ysql_catalog_config.cc @@ -111,7 +111,7 @@ Status YsqlCatalogConfig::SetInitDbDone(const Status& initdb_status, const Leade if (initdb_status.ok()) { LOG(INFO) << "Global initdb completed successfully"; } else { - LOG(ERROR) << "Global initdb failed: " << initdb_status; + LOG(FATAL) << "Global initdb failed: " << initdb_status; } auto [l, pb] = LockForWrite(epoch); diff --git a/src/yb/master/ysql/ysql_initdb_major_upgrade_handler.cc b/src/yb/master/ysql/ysql_initdb_major_upgrade_handler.cc index 715180a6e029..b702bb599cdb 100644 --- a/src/yb/master/ysql/ysql_initdb_major_upgrade_handler.cc +++ b/src/yb/master/ysql/ysql_initdb_major_upgrade_handler.cc @@ -66,6 +66,9 @@ DEFINE_RUNTIME_bool(ysql_upgrade_import_stats, false, DEFINE_test_flag(bool, ysql_fail_cleanup_previous_version_catalog, false, "Fail the cleanup of the previous version ysql catalog"); +DEFINE_test_flag(bool, ysql_block_writes_to_catalog, false, + "Block writes to the catalog tables like we would during a ysql major upgrade"); + using yb::pgwrapper::PgWrapper; #define SCHECK_YSQL_ENABLED SCHECK(FLAGS_enable_ysql, IllegalState, "YSQL is not enabled") @@ -289,6 +292,10 @@ bool YsqlInitDBAndMajorUpgradeHandler::IsWriteToCatalogTableAllowed( return is_forced_update; } + if (FLAGS_TEST_ysql_block_writes_to_catalog) { + return is_forced_update; + } + // If we are not in the middle of a major upgrade then only allow updates to the current // version. return IsCurrentVersionYsqlCatalogTable(table_id); @@ -352,12 +359,12 @@ void YsqlInitDBAndMajorUpgradeHandler::RunMajorVersionUpgrade(const LeaderEpoch& if (update_state_status.ok()) { LOG(INFO) << "Ysql major catalog upgrade completed successfully"; } else { - LOG(ERROR) << "Failed to set major version upgrade state: " << update_state_status; + LOG(DFATAL) << "Failed to set major version upgrade state: " << update_state_status; } return; } - LOG(ERROR) << "Ysql major catalog upgrade failed: " << status; + LOG(WARNING) << "Ysql major catalog upgrade failed: " << status; ERROR_NOT_OK( TransitionMajorCatalogUpgradeState(YsqlMajorCatalogUpgradeInfoPB::FAILED, epoch, status), "Failed to set major version upgrade state"); diff --git a/src/yb/qlexpr/ql_expr.cc b/src/yb/qlexpr/ql_expr.cc index 8705d0968bf3..ebfeee20bc41 100644 --- a/src/yb/qlexpr/ql_expr.cc +++ b/src/yb/qlexpr/ql_expr.cc @@ -490,7 +490,7 @@ Status QLExprExecutor::EvalCondition(const QLConditionPB& condition, case QL_OP_LIKE: FALLTHROUGH_INTENDED; case QL_OP_NOT_LIKE: - LOG(ERROR) << "Internal error: illegal or unknown operator " << condition.op(); + LOG(DFATAL) << "Internal error: illegal or unknown operator " << condition.op(); break; case QL_OP_NOOP: @@ -733,7 +733,7 @@ Status QLExprExecutor::EvalCondition( case QL_OP_LIKE: FALLTHROUGH_INTENDED; case QL_OP_NOT_LIKE: - LOG(ERROR) << "Internal error: illegal or unknown operator " << condition.op(); + LOG(DFATAL) << "Internal error: illegal or unknown operator " << condition.op(); break; case QL_OP_NOOP: diff --git a/src/yb/qlexpr/ql_serialization.cc b/src/yb/qlexpr/ql_serialization.cc index 8f239e426209..128e838eaafa 100644 --- a/src/yb/qlexpr/ql_serialization.cc +++ b/src/yb/qlexpr/ql_serialization.cc @@ -58,8 +58,8 @@ void SerializeValue( bool is_out_of_range = false; CQLEncodeBytes(decimal.EncodeToSerializedBigDecimal(&is_out_of_range), buffer); if(is_out_of_range) { - LOG(ERROR) << "Out of range: Unable to encode decimal " << decimal.ToString() - << " into a BigDecimal serialized representation"; + LOG(DFATAL) << "Out of range: Unable to encode decimal " << decimal.ToString() + << " into a BigDecimal serialized representation"; } return; } diff --git a/src/yb/rocksdb/db/memtable_list.cc b/src/yb/rocksdb/db/memtable_list.cc index 892564084b8c..2a143c28a487 100644 --- a/src/yb/rocksdb/db/memtable_list.cc +++ b/src/yb/rocksdb/db/memtable_list.cc @@ -326,9 +326,9 @@ void MemTableList::PickMemtablesToFlush( } all_memtables_logged = true; } - LOG(ERROR) << "Failed when checking if memtable can be flushed (will still flush it): " - << filter_result.status() << ". Memtable: " << m->ToString() - << ss.str(); + LOG(DFATAL) << "Failed when checking if memtable can be flushed (will still flush it): " + << filter_result.status() << ". Memtable: " << m->ToString() + << ss.str(); // Still flush the memtable so that this error does not keep occurring. } } diff --git a/src/yb/rocksdb/db/version_set.cc b/src/yb/rocksdb/db/version_set.cc index 4e2afbc81d3c..ff7ce86b345a 100644 --- a/src/yb/rocksdb/db/version_set.cc +++ b/src/yb/rocksdb/db/version_set.cc @@ -3132,8 +3132,7 @@ Status VersionSet::Import(const std::string& source_dir, if (!status.ok()) { for (const auto& file : revert_list) { - auto delete_status = env_->DeleteFile(file); - LOG(ERROR) << "Failed to delete file: " << file << ", status: " << delete_status.ToString(); + ERROR_NOT_OK(env_->DeleteFile(file), yb::Format("Failed to delete file $0", file)); } return status; } diff --git a/src/yb/rocksdb/db/write_batch.cc b/src/yb/rocksdb/db/write_batch.cc index e6e003676ead..2f2a95f2959e 100644 --- a/src/yb/rocksdb/db/write_batch.cc +++ b/src/yb/rocksdb/db/write_batch.cc @@ -251,7 +251,7 @@ uint32_t WriteBatch::ComputeContentFlags() const { if ((rv & ContentFlags::DEFERRED) != 0) { BatchContentClassifier classifier; auto status = Iterate(&classifier); - LOG_IF(ERROR, !status.ok()) << "Iterate failed during ComputeContentFlags: " << status; + LOG_IF(WARNING, !status.ok()) << "Iterate failed during ComputeContentFlags: " << status; rv = classifier.content_flags; // this method is conceptually const, because it is performing a lazy diff --git a/src/yb/rocksdb/db/write_thread.cc b/src/yb/rocksdb/db/write_thread.cc index d54fb8455c19..7f961ed92c80 100644 --- a/src/yb/rocksdb/db/write_thread.cc +++ b/src/yb/rocksdb/db/write_thread.cc @@ -29,6 +29,7 @@ #include "yb/rocksdb/util/random.h" #include "yb/util/callsite_profiling.h" +#include "yb/util/monotime.h" #include "yb/util/sync_point.h" namespace rocksdb { @@ -242,9 +243,11 @@ void WriteThread::JoinBatchGroup(Writer* w) { DEBUG_ONLY_TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:Wait", w); if (!linked_as_leader) { + auto start = yb::MonoTime::Now(); AwaitState(w, STATE_GROUP_LEADER | STATE_PARALLEL_FOLLOWER | STATE_COMPLETED, &ctx); + w->batch->SetWriteGroupJoinDuration(yb::MonoTime::Now() - start); DEBUG_ONLY_TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:DoneWaiting", w); } } diff --git a/src/yb/rocksdb/table/format.cc b/src/yb/rocksdb/table/format.cc index c3cbe9e1e627..0a386ce8ce15 100644 --- a/src/yb/rocksdb/table/format.cc +++ b/src/yb/rocksdb/table/format.cc @@ -463,7 +463,7 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer, status = ReadBlock(file, footer, options, handle, &slice, used_buf); if (!status.ok()) { - LOG(ERROR) << __func__ << ": " << status << "\n" << yb::GetStackTrace(); + LOG_WITH_FUNC(WARNING) << status; return status; } diff --git a/src/yb/rocksdb/table/index_reader.cc b/src/yb/rocksdb/table/index_reader.cc index 8602fd0cc007..54a6891321cc 100644 --- a/src/yb/rocksdb/table/index_reader.cc +++ b/src/yb/rocksdb/table/index_reader.cc @@ -143,7 +143,7 @@ Result> HashIndexReader::Create( s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock, &prefixes_handle); if (!s.ok()) { - LOG(ERROR) << "Failed to find hash index prefixes block: " << s; + LOG(DFATAL) << "Failed to find hash index prefixes block: " << s; return index_reader; } @@ -152,7 +152,7 @@ Result> HashIndexReader::Create( s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock, &prefixes_meta_handle); if (!s.ok()) { - LOG(ERROR) << "Failed to find hash index prefixes metadata block: " << s; + LOG(DFATAL) << "Failed to find hash index prefixes metadata block: " << s; return index_reader; } @@ -168,7 +168,7 @@ Result> HashIndexReader::Create( &prefixes_meta_contents, env, mem_tracker, true /* do decompression */); if (!s.ok()) { - LOG(ERROR) << "Failed to read hash index prefixes metadata block: " << s; + LOG(DFATAL) << "Failed to read hash index prefixes metadata block: " << s; return index_reader; } @@ -183,7 +183,7 @@ Result> HashIndexReader::Create( index_reader->index_block_->SetBlockHashIndex(hash_index); index_reader->OwnPrefixesContents(std::move(prefixes_contents)); } else { - LOG(ERROR) << "Failed to create block hash index: " << s; + LOG(DFATAL) << "Failed to create block hash index: " << s; } } else { BlockPrefixIndex* prefix_index = nullptr; @@ -194,7 +194,7 @@ Result> HashIndexReader::Create( if (s.ok()) { index_reader->index_block_->SetBlockPrefixIndex(prefix_index); } else { - LOG(ERROR) << "Failed to create block prefix index: " << s; + LOG(DFATAL) << "Failed to create block prefix index: " << s; } } diff --git a/src/yb/rocksdb/tools/sst_dump_tool.cc b/src/yb/rocksdb/tools/sst_dump_tool.cc index 3ac6ef2a7a4e..1e9751ea1461 100644 --- a/src/yb/rocksdb/tools/sst_dump_tool.cc +++ b/src/yb/rocksdb/tools/sst_dump_tool.cc @@ -53,6 +53,7 @@ #include "yb/docdb/docdb_debug.h" +#include "yb/util/format.h" #include "yb/util/status_log.h" using yb::docdb::StorageDbType; @@ -60,8 +61,6 @@ using yb::docdb::StorageDbType; namespace rocksdb { using std::dynamic_pointer_cast; -using std::unique_ptr; -using std::shared_ptr; std::string DocDBKVFormatter::Format( const yb::Slice&, const yb::Slice&, yb::docdb::StorageDbType) const { @@ -97,9 +96,7 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { uint64_t magic_number; // read table magic number - Footer footer; - - unique_ptr file; + std::unique_ptr file; uint64_t file_size; Status s = options_.env->NewRandomAccessFile(file_path, &file, soptions_); if (s.ok()) { @@ -109,10 +106,10 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { file_.reset(new RandomAccessFileReader(std::move(file))); if (s.ok()) { - s = ReadFooterFromFile(file_.get(), file_size, &footer); + s = ReadFooterFromFile(file_.get(), file_size, &footer_); } if (s.ok()) { - magic_number = footer.table_magic_number(); + magic_number = footer_.table_magic_number(); } if (s.ok()) { @@ -135,10 +132,10 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { s = NewTableReader(ioptions_, soptions_, *internal_comparator_, file_size, &table_reader_); if (s.ok() && table_reader_->IsSplitSst()) { - unique_ptr data_file; + std::unique_ptr data_file; RETURN_NOT_OK(options_.env->NewRandomAccessFile( TableBaseToDataFileName(file_path), &data_file, soptions_)); - unique_ptr data_file_reader( + std::unique_ptr data_file_reader( new RandomAccessFileReader(std::move(data_file))); table_reader_->SetDataFileReader(std::move(data_file_reader)); } @@ -149,10 +146,10 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { Status SstFileReader::NewTableReader( const ImmutableCFOptions& ioptions, const EnvOptions& soptions, const InternalKeyComparator& internal_comparator, uint64_t file_size, - unique_ptr* table_reader) { + std::unique_ptr* table_reader) { // We need to turn off pre-fetching of index and filter nodes for // BlockBasedTable - shared_ptr block_table_factory = + std::shared_ptr block_table_factory = dynamic_pointer_cast(options_.table_factory); if (block_table_factory) { @@ -172,7 +169,7 @@ Status SstFileReader::NewTableReader( } Status SstFileReader::DumpTable(const std::string& out_filename) { - unique_ptr out_file; + std::unique_ptr out_file; Env* env = Env::Default(); RETURN_NOT_OK(env->NewWritableFile(out_filename, &out_file, soptions_)); Status s = table_reader_->DumpTable(out_file.get()); @@ -182,20 +179,20 @@ Status SstFileReader::DumpTable(const std::string& out_filename) { uint64_t SstFileReader::CalculateCompressedTableSize( const TableBuilderOptions& tb_options, size_t block_size) { - unique_ptr out_file; - unique_ptr env(NewMemEnv(Env::Default())); + std::unique_ptr out_file; + std::unique_ptr env(NewMemEnv(Env::Default())); CHECK_OK(env->NewWritableFile(testFileName, &out_file, soptions_)); - unique_ptr dest_writer; + std::unique_ptr dest_writer; dest_writer.reset(new WritableFileWriter(std::move(out_file), soptions_)); BlockBasedTableOptions table_options; table_options.block_size = block_size; BlockBasedTableFactory block_based_tf(table_options); - unique_ptr table_builder; + std::unique_ptr table_builder; table_builder = block_based_tf.NewTableBuilder( tb_options, TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, dest_writer.get()); - unique_ptr iter(table_reader_->NewIterator(ReadOptions())); + std::unique_ptr iter(table_reader_->NewIterator(ReadOptions())); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { if (!iter->status().ok()) { fputs(iter->status().ToString().c_str(), stderr); @@ -387,6 +384,77 @@ Status SstFileReader::ReadSequential(bool print_kv, return ret; } +namespace { + +void PrintSaveBlockCommand(const std::string& data_file_path, const BlockHandle& block_handle) { + std::cout << "dd if=\"" << data_file_path << "\" bs=1 skip=" << block_handle.offset() + << " count=" << block_handle.size() << " of=\"" << data_file_path << ".offset_" + << block_handle.offset() << ".size_" << block_handle.size() << ".part\"" << std::endl; +} + +} // namespace + +Status SstFileReader::CheckDataBlocks(DoUncompress do_uncompress) { + if (!table_reader_) { + return init_result_; + } + ReadOptions read_options; + read_options.verify_checksums = true; + + std::unique_ptr index_iterator(table_reader_->NewIndexIterator(read_options)); + RETURN_NOT_OK(index_iterator->status()); + + const auto data_file_path = + table_reader_->IsSplitSst() ? TableBaseToDataFileName(file_name_) : file_name_; + std::unique_ptr data_file; + RETURN_NOT_OK(options_.env->NewRandomAccessFile(data_file_path, &data_file, soptions_)); + std::unique_ptr data_file_reader( + new RandomAccessFileReader(std::move(data_file))); + + size_t index_entry_pos = 0; + BlockHandle prev_block_handle; + BlockHandle block_handle; + bool save_block = false; + for (index_iterator->SeekToFirst(); index_iterator->Valid(); + index_iterator->Next(), ++index_entry_pos) { + prev_block_handle = block_handle; + { + auto index_value_slice = index_iterator->Entry().value; + auto status = block_handle.DecodeFrom(&index_value_slice); + if (!status.ok()) { + LOG(WARNING) << "Failed to decode SST index entry #" << index_entry_pos << ": " + << index_iterator->Entry().value.ToDebugHexString() << ". " << status; + continue; + } + LOG_IF(WARNING, index_value_slice.size() > 0) + << "Extra bytes (" << index_value_slice.size() + << ") in index entry: " << index_iterator->Entry().value.ToDebugHexString(); + } + YB_LOG_EVERY_N_SECS(INFO, 30) << "Checking data block #" << index_entry_pos + << " handle: " << block_handle.ToDebugString(); + + BlockContents block_contents; + auto status = ReadBlockContents( + data_file_reader.get(), footer_, read_options, block_handle, &block_contents, options_.env, + /* mem_tracker = */ nullptr, do_uncompress); + if (!status.ok()) { + LOG(WARNING) << "Failed to read block with handle: " << block_handle.ToDebugString() << ". " + << status; + if (prev_block_handle.IsSet()) { + PrintSaveBlockCommand(data_file_path, prev_block_handle); + } + PrintSaveBlockCommand(data_file_path, block_handle); + // Save next block as well. + save_block = true; + } else if (save_block) { + PrintSaveBlockCommand(data_file_path, block_handle); + save_block = false; + } + } + + return Status::OK(); +} + Status SstFileReader::ReadTableProperties( std::shared_ptr* table_properties) { if (!table_reader_) { @@ -401,7 +469,7 @@ namespace { void print_help() { fprintf(stderr, - "sst_dump [--command=check|scan|none|raw] [--verify_checksum] " + "sst_dump [--command=check|scan|check_data_blocks|none|raw] [--verify_checksum] " "--file=data_dir_OR_sst_file" " [--output_format=raw|hex|decoded_regulardb|decoded_intentsdb]" " [--formatter_tablet_metadata=" @@ -411,7 +479,8 @@ void print_help() { " [--read_num=NUM]" " [--show_properties]" " [--show_compression_sizes]" - " [--show_compression_sizes [--set_block_size=]]\n"); + " [--show_compression_sizes [--set_block_size=]]" + " [--skip_uncompress]\n"); } } // namespace @@ -431,6 +500,7 @@ int SSTDumpTool::Run(int argc, char** argv) { bool show_properties = false; bool show_compression_sizes = false; bool set_block_size = false; + DoUncompress do_uncompress = DoUncompress ::kTrue; std::string from_key; std::string to_key; std::string block_size_str; @@ -470,6 +540,8 @@ int SSTDumpTool::Run(int argc, char** argv) { show_properties = true; } else if (strcmp(argv[i], "--show_compression_sizes") == 0) { show_compression_sizes = true; + } else if (strcmp(argv[i], "--skip_uncompress") == 0) { + do_uncompress = DoUncompress::kFalse; } else if (strncmp(argv[i], "--set_block_size=", 17) == 0) { set_block_size = true; block_size_str = argv[i] + 17; @@ -575,6 +647,8 @@ int SSTDumpTool::Run(int argc, char** argv) { if (read_num > 0 && total_read > read_num) { break; } + } else if (command == "check_data_blocks") { + ERROR_NOT_OK(reader.CheckDataBlocks(do_uncompress), "Failed to scan SST file blocks:"); } if (show_properties) { const rocksdb::TableProperties* table_properties; diff --git a/src/yb/rocksdb/tools/sst_dump_tool_imp.h b/src/yb/rocksdb/tools/sst_dump_tool_imp.h index 6671c9590c9b..3e6235f88b95 100644 --- a/src/yb/rocksdb/tools/sst_dump_tool_imp.h +++ b/src/yb/rocksdb/tools/sst_dump_tool_imp.h @@ -25,13 +25,17 @@ #include #include -#include "yb/rocksdb/rocksdb_fwd.h" + #include "yb/rocksdb/db/dbformat.h" #include "yb/rocksdb/immutable_options.h" +#include "yb/rocksdb/rocksdb_fwd.h" +#include "yb/rocksdb/table/format.h" #include "yb/rocksdb/util/file_reader_writer.h" namespace rocksdb { +YB_STRONGLY_TYPED_BOOL(DoUncompress); + class SstFileReader { public: SstFileReader( @@ -43,6 +47,8 @@ class SstFileReader { const std::string& from_key, bool has_to, const std::string& to_key); + Status CheckDataBlocks(DoUncompress do_uncompress); + Status ReadTableProperties( std::shared_ptr* table_properties); uint64_t GetReadNumber() { return read_num_; } @@ -81,6 +87,7 @@ class SstFileReader { EnvOptions soptions_; Status init_result_; + Footer footer_; std::unique_ptr table_reader_; std::unique_ptr file_; // options_ and internal_comparator_ will also be used in diff --git a/src/yb/rocksdb/util/env_posix.cc b/src/yb/rocksdb/util/env_posix.cc index 8bf9b3f72896..bb79f1e4b0aa 100644 --- a/src/yb/rocksdb/util/env_posix.cc +++ b/src/yb/rocksdb/util/env_posix.cc @@ -396,7 +396,7 @@ class PosixEnv : public Env { int fd = fileno(f); #ifdef ROCKSDB_FALLOCATE_PRESENT if (fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 4 * 1024) != 0) { - LOG(ERROR) << STATUS_IO_ERROR(fname, errno); + LOG(WARNING) << STATUS_IO_ERROR(fname, errno); } #endif SetFD_CLOEXEC(fd, nullptr); diff --git a/src/yb/rocksdb/util/posix_logger.h b/src/yb/rocksdb/util/posix_logger.h index 90306a1d3a37..304f69d50f08 100644 --- a/src/yb/rocksdb/util/posix_logger.h +++ b/src/yb/rocksdb/util/posix_logger.h @@ -165,9 +165,9 @@ class PosixLogger : public Logger { if (fallocate( fd_, FALLOC_FL_KEEP_SIZE, 0, static_cast(desired_allocation_chunk * kDebugLogChunkSize)) != 0) { - LOG(ERROR) << STATUS_IO_ERROR(fname_, errno) - << " desired_allocation_chunk: " << desired_allocation_chunk - << " kDebugLogChunkSize: " << kDebugLogChunkSize; + LOG(WARNING) << STATUS_IO_ERROR(fname_, errno) + << " desired_allocation_chunk: " << desired_allocation_chunk + << " kDebugLogChunkSize: " << kDebugLogChunkSize; } } #endif diff --git a/src/yb/rocksdb/write_batch.h b/src/yb/rocksdb/write_batch.h index 54c80e0207e0..af1eb93efa4f 100644 --- a/src/yb/rocksdb/write_batch.h +++ b/src/yb/rocksdb/write_batch.h @@ -49,6 +49,7 @@ #include "yb/rocksdb/status.h" #include "yb/rocksdb/write_batch_base.h" +#include "yb/util/monotime.h" #include "yb/util/slice.h" #include "yb/util/slice_parts.h" @@ -287,6 +288,14 @@ class WriteBatch : public WriteBatchBase { handler_for_logging_ = handler_for_logging; } + yb::MonoDelta GetWriteGroupJoinDuration() const { + return write_group_join_duration_; + } + + void SetWriteGroupJoinDuration(yb::MonoDelta write_group_join_duration) { + write_group_join_duration_ = write_group_join_duration; + } + private: friend class WriteBatchInternal; std::unique_ptr save_points_; @@ -304,6 +313,9 @@ class WriteBatch : public WriteBatchBase { mutable size_t direct_entries_ = 0; Handler* handler_for_logging_ = nullptr; + + // The field is set by rocksdb during the batch write. + yb::MonoDelta write_group_join_duration_ = yb::MonoDelta::kZero; }; } // namespace rocksdb diff --git a/src/yb/rpc/acceptor.cc b/src/yb/rpc/acceptor.cc index 87e3a1189f2c..e4da0227cc51 100644 --- a/src/yb/rpc/acceptor.cc +++ b/src/yb/rpc/acceptor.cc @@ -146,7 +146,7 @@ void Acceptor::Shutdown() { void Acceptor::IoHandler(ev::io& io, int events) { auto it = sockets_.find(&io); if (it == sockets_.end()) { - LOG(ERROR) << "IoHandler for unknown socket: " << &io; + LOG(DFATAL) << "IoHandler for unknown socket: " << &io; return; } Socket& socket = it->second.socket; diff --git a/src/yb/rpc/io_thread_pool.cc b/src/yb/rpc/io_thread_pool.cc index f14d57abcea9..2e15da40bfa5 100644 --- a/src/yb/rpc/io_thread_pool.cc +++ b/src/yb/rpc/io_thread_pool.cc @@ -59,7 +59,7 @@ class IoThreadPool::Impl { auto deadline = std::chrono::steady_clock::now() + 15s; while (!io_service_.stopped()) { if (std::chrono::steady_clock::now() >= deadline) { - LOG(ERROR) << "Io service failed to stop"; + LOG(WARNING) << "Io service failed to stop"; io_service_.stop(); break; } @@ -74,7 +74,7 @@ class IoThreadPool::Impl { void Execute() { boost::system::error_code ec; io_service_.run(ec); - LOG_IF(ERROR, ec) << "Failed to run io service: " << ec; + LOG_IF(DFATAL, ec) << "Failed to run io service: " << ec; } std::string name_; diff --git a/src/yb/rpc/messenger.cc b/src/yb/rpc/messenger.cc index c7538bf55743..97f1eab60e73 100644 --- a/src/yb/rpc/messenger.cc +++ b/src/yb/rpc/messenger.cc @@ -604,8 +604,8 @@ Messenger::~Messenger() { VLOG(1) << "Messenger destructor for " << this << " called at:\n" << GetStackTrace(); #ifndef NDEBUG if (!closing_) { - LOG(ERROR) << "Messenger created here:\n" << creation_stack_trace_.Symbolize() - << "Messenger destructor for " << this << " called at:\n" << GetStackTrace(); + LOG(DFATAL) << "Messenger created here:\n" << creation_stack_trace_.Symbolize() + << "Messenger destructor for " << this << " called at:\n" << GetStackTrace(); } #endif CHECK(closing_) << "Should have already shut down"; diff --git a/src/yb/rpc/outbound_call.cc b/src/yb/rpc/outbound_call.cc index ac55648dbf05..5015fdb0ca91 100644 --- a/src/yb/rpc/outbound_call.cc +++ b/src/yb/rpc/outbound_call.cc @@ -621,7 +621,7 @@ void OutboundCall::SetFailed(const Status &status, std::unique_ptrtime(), ec); - LOG_IF(ERROR, ec) << "Reschedule timer failed: " << ec.message(); + LOG_IF(DFATAL, ec) << "Reschedule timer failed: " << ec.message(); ++timer_counter_; timer_.async_wait(strand_.wrap(std::bind(&Impl::HandleTimer, this, _1))); } @@ -128,7 +128,8 @@ class Scheduler::Impl { --timer_counter_; if (ec) { - LOG_IF(ERROR, ec != boost::asio::error::operation_aborted) << "Wait failed: " << ec.message(); + LOG_IF(DFATAL, ec != boost::asio::error::operation_aborted) + << "Wait failed: " << ec.message(); return; } if (closing_.load(std::memory_order_acquire)) { diff --git a/src/yb/rpc/strand.h b/src/yb/rpc/strand.h index 5f0a20e1a051..9ed190b42305 100644 --- a/src/yb/rpc/strand.h +++ b/src/yb/rpc/strand.h @@ -24,6 +24,16 @@ namespace rpc { class StrandTask : public MPSCQueueEntry, public ThreadPoolTask { protected: ~StrandTask() = default; + private: + friend void SetNext(StrandTask* entry, StrandTask* next) { + entry->next_ = next; + } + + friend StrandTask* GetNext(const StrandTask* entry) { + return entry->next_; + } + + StrandTask* next_ = nullptr; }; template diff --git a/src/yb/rpc/thread_pool.cc b/src/yb/rpc/thread_pool.cc index 1e37c5562ad4..acf300bbea9e 100644 --- a/src/yb/rpc/thread_pool.cc +++ b/src/yb/rpc/thread_pool.cc @@ -41,7 +41,7 @@ namespace { class Worker; -using TaskQueue = RWQueue; +using TaskQueue = SemiFairQueue; using WaitingWorkers = LockFreeStack; struct ThreadPoolShare { @@ -144,8 +144,7 @@ class Worker : public boost::intrusive::list_base_hook<> { ThreadPoolTask* PopTask() { // First of all we try to get already queued task, w/o locking. // If there is no task, so we could go to waiting state. - ThreadPoolTask* task; - if (share_.task_queue.pop(task)) { + if (auto* task = share_.task_queue.Pop()) { return task; } @@ -190,8 +189,7 @@ class Worker : public boost::intrusive::list_base_hook<> { if (task_) { return std::exchange(task_, nullptr); } - ThreadPoolTask* task; - if (share_.task_queue.pop(task)) { + if (auto task = share_.task_queue.Pop()) { return task; } return std::nullopt; @@ -333,7 +331,7 @@ class ThreadPool::Impl { { std::lock_guard lock(mutex_); if (closing_) { - CHECK(share_.task_queue.empty()); + CHECK(share_.task_queue.Empty()); CHECK(workers_.empty()); return; } @@ -357,8 +355,7 @@ class ThreadPool::Impl { workers_.clear(); } } - ThreadPoolTask* task = nullptr; - while (share_.task_queue.pop(task)) { + while (auto* task = share_.task_queue.Pop()) { TaskDone(task, kShuttingDownStatus); } diff --git a/src/yb/rpc/thread_pool.h b/src/yb/rpc/thread_pool.h index 57928dcebd0f..2eb31745cbb4 100644 --- a/src/yb/rpc/thread_pool.h +++ b/src/yb/rpc/thread_pool.h @@ -21,6 +21,7 @@ #include "yb/gutil/port.h" +#include "yb/util/lockfree.h" #include "yb/util/status.h" #include "yb/util/tostring.h" #include "yb/util/type_traits.h" @@ -34,7 +35,7 @@ namespace rpc { class ThreadSubPoolBase; -class ThreadPoolTask { +class ThreadPoolTask : public MPSCQueueEntry { public: // Invoked in thread pool virtual void Run() = 0; diff --git a/src/yb/rpc/yb_rpc.cc b/src/yb/rpc/yb_rpc.cc index b122b4b17b36..4d0f801dbbdd 100644 --- a/src/yb/rpc/yb_rpc.cc +++ b/src/yb/rpc/yb_rpc.cc @@ -281,8 +281,7 @@ void YBInboundCall::UpdateWaitStateInfo() { .method = method_name().ToBuffer(), }); } else { - LOG_IF(ERROR, GetAtomicFlag(&FLAGS_ysql_yb_enable_ash)) - << "Wait state is nullptr for " << ToString(); + LOG_IF(DFATAL, FLAGS_ysql_yb_enable_ash) << "Wait state is nullptr for " << ToString(); } } diff --git a/src/yb/server/call_home.cc b/src/yb/server/call_home.cc index 2702ec94c73e..2f72e0247050 100644 --- a/src/yb/server/call_home.cc +++ b/src/yb/server/call_home.cc @@ -151,7 +151,7 @@ class RpcsCollector : public Collector { auto url = Substitute("http://$0/rpcz", yb::ToString(*addr_)); auto status = curl_.FetchURL(url, &buf); if (!status.ok()) { - LOG(ERROR) << "Unable to read url " << url; + LOG(WARNING) << "Unable to read url " << url; return; } @@ -258,9 +258,9 @@ std::string CallHome::BuildJson() { rapidjson::Reader reader; rapidjson::StringStream ss(str.c_str()); if (!reader.Parse(ss, writer)) { - LOG(ERROR) << "Unable to parse json. Error: " << reader.GetParseErrorCode() << " at offset " - << reader.GetErrorOffset() << " in string " - << str.substr(reader.GetErrorOffset(), 10); + LOG(WARNING) << "Unable to parse json. Error: " << reader.GetParseErrorCode() << " at offset " + << reader.GetErrorOffset() << " in string " + << str.substr(reader.GetErrorOffset(), 10); return str; } diff --git a/src/yb/server/generic_service.cc b/src/yb/server/generic_service.cc index 0d162193a5e0..aa73f19cf852 100644 --- a/src/yb/server/generic_service.cc +++ b/src/yb/server/generic_service.cc @@ -171,7 +171,7 @@ void GenericServiceImpl::ReloadCertificates( rpc::RpcContext rpc) { const auto status = server_->ReloadKeysAndCertificates(); if (!status.ok()) { - LOG(ERROR) << "Reloading certificates failed: " << status; + LOG(WARNING) << "Reloading certificates failed: " << status; rpc.RespondFailure(status); return; } diff --git a/src/yb/server/server_fwd.h b/src/yb/server/server_fwd.h index a0c461fd847c..9b85473f253d 100644 --- a/src/yb/server/server_fwd.h +++ b/src/yb/server/server_fwd.h @@ -23,6 +23,7 @@ namespace server { class Clock; class GenericServiceProxy; class MonitoredTask; +class RpcServerBase; class RunnableMonitoredTask; enum class MonitoredTaskState : int; diff --git a/src/yb/server/total_mem_watcher.cc b/src/yb/server/total_mem_watcher.cc index 5a8784ab07b2..d2b252327043 100644 --- a/src/yb/server/total_mem_watcher.cc +++ b/src/yb/server/total_mem_watcher.cc @@ -100,9 +100,9 @@ void TotalMemWatcher::MemoryMonitoringLoop(std::function trigger_termina } std::string termination_explanation = GetTerminationExplanation(); if (!termination_explanation.empty()) { - LOG(ERROR) << "Memory usage exceeded configured limit, terminating the process: " - << termination_explanation << "\nDetails:\n" - << GetMemoryUsageDetails(); + LOG(DFATAL) << "Memory usage exceeded configured limit, terminating the process: " + << termination_explanation << "\nDetails:\n" + << GetMemoryUsageDetails(); trigger_termination_fn(); return; } diff --git a/src/yb/server/webserver.cc b/src/yb/server/webserver.cc index 6f5218103658..684761858927 100644 --- a/src/yb/server/webserver.cc +++ b/src/yb/server/webserver.cc @@ -546,7 +546,7 @@ Status Webserver::Impl::GetBoundAddresses(std::vector* addrs_ptr) cons break; } default: { - LOG(ERROR) << "Unexpected address family: " << sockaddrs[i]->ss_family; + LOG(DFATAL) << "Unexpected address family: " << sockaddrs[i]->ss_family; RSTATUS_DCHECK(false, IllegalState, "Unexpected address family"); break; } diff --git a/src/yb/tablet/mvcc.cc b/src/yb/tablet/mvcc.cc index 0f70848b34c3..bf6a1058652b 100644 --- a/src/yb/tablet/mvcc.cc +++ b/src/yb/tablet/mvcc.cc @@ -388,7 +388,7 @@ void MvccManager::AddPending(HybridTime ht, const OpId& op_id, bool is_follower_ sanity_check_lower_bound && sanity_check_lower_bound != HybridTime::kMax) { HybridTime incremented_hybrid_time = sanity_check_lower_bound.Incremented(); - YB_LOG_EVERY_N_SECS(ERROR, 5) << LogPrefix() + YB_LOG_EVERY_N_SECS(DFATAL, 5) << LogPrefix() << "Assigning an artificially incremented hybrid time: " << incremented_hybrid_time << ". This needs to be investigated. " << get_details_msg(/* drain_aborted */ false); ht = incremented_hybrid_time; @@ -466,7 +466,7 @@ void MvccManager::UpdatePropagatedSafeTimeOnLeader(const FixedHybridTimeLease& h #else // Do not crash in production. if (safe_time < propagated_safe_time_) { - YB_LOG_EVERY_N_SECS(ERROR, 5) << LogPrefix() + YB_LOG_EVERY_N_SECS(DFATAL, 5) << LogPrefix() << "Previously saw " << YB_EXPR_TO_STREAM(propagated_safe_time_) << ", but now safe time is " << safe_time; } else { diff --git a/src/yb/tablet/operations/operation_driver.cc b/src/yb/tablet/operations/operation_driver.cc index 09b716e0bfc7..15fbf5666268 100644 --- a/src/yb/tablet/operations/operation_driver.cc +++ b/src/yb/tablet/operations/operation_driver.cc @@ -353,8 +353,7 @@ void OperationDriver::ReplicationFinished( // the tablet. if (prepare_state_copy != PrepareState::PREPARED) { LOG(DFATAL) << "Replicating an operation that has not been prepared: " << AsString(this); - - LOG(ERROR) << "Attempting to wait for the operation to be prepared"; + LOG(WARNING) << "Attempting to wait for the operation to be prepared"; // This case should never happen, but if it happens we are trying to survive. for (;;) { diff --git a/src/yb/tablet/tablet-split-test.cc b/src/yb/tablet/tablet-split-test.cc index 15a6f66b0b63..a70c895f9580 100644 --- a/src/yb/tablet/tablet-split-test.cc +++ b/src/yb/tablet/tablet-split-test.cc @@ -134,9 +134,9 @@ TEST_F(TabletSplitTest, SplitTablet) { << docdb::DocDBDebugDumpToStr( tablet()->doc_db(), &tablet()->GetSchemaPackingProvider(), docdb::IncludeBinary::kTrue); - const auto source_docdb_dump_str = tablet()->TEST_DocDBDumpStr(IncludeIntents::kTrue); + const auto source_docdb_dump_str = tablet()->TEST_DocDBDumpStr(docdb::IncludeIntents::kTrue); std::unordered_set source_docdb_dump; - tablet()->TEST_DocDBDumpToContainer(IncludeIntents::kTrue, &source_docdb_dump); + tablet()->TEST_DocDBDumpToContainer(docdb::IncludeIntents::kTrue, &source_docdb_dump); std::unordered_set source_rows; for (const auto& row : ASSERT_RESULT(SelectAll(tablet().get()))) { @@ -183,7 +183,7 @@ TEST_F(TabletSplitTest, SplitTablet) { split_tablet->metadata()->ToSuperBlock(&super_block); ASSERT_EQ(split_tablet->tablet_id(), super_block.kv_store().kv_store_id()); } - const auto split_docdb_dump_str = split_tablet->TEST_DocDBDumpStr(IncludeIntents::kTrue); + const auto split_docdb_dump_str = split_tablet->TEST_DocDBDumpStr(docdb::IncludeIntents::kTrue); // Before compaction underlying DocDB dump should be the same. ASSERT_EQ(source_docdb_dump_str, split_docdb_dump_str); @@ -207,12 +207,12 @@ TEST_F(TabletSplitTest, SplitTablet) { ASSERT_OK(split_tablet->ForceManualRocksDBCompact()); VLOG(1) << split_tablet->tablet_id() << " compacted:" << std::endl - << split_tablet->TEST_DocDBDumpStr(IncludeIntents::kTrue); + << split_tablet->TEST_DocDBDumpStr(docdb::IncludeIntents::kTrue); // After compaction split tablets' RocksDB instances should have no overlap and no unexpected // data. std::unordered_set split_docdb_dump; - split_tablet->TEST_DocDBDumpToContainer(IncludeIntents::kTrue, &split_docdb_dump); + split_tablet->TEST_DocDBDumpToContainer(docdb::IncludeIntents::kTrue, &split_docdb_dump); for (const auto& entry : split_docdb_dump) { ASSERT_EQ(source_docdb_dump.erase(entry), 1); } diff --git a/src/yb/tablet/tablet.cc b/src/yb/tablet/tablet.cc index 253b187b2cf2..bcee4b08e030 100644 --- a/src/yb/tablet/tablet.cc +++ b/src/yb/tablet/tablet.cc @@ -812,7 +812,7 @@ Status Tablet::CreateTabletDirectories(const string& db_dir, FsManager* fs) { Format("Failed to create RocksDB tablet directory $0", db_dir)); RETURN_NOT_OK_PREPEND( - fs->CreateDirIfMissingAndSync(docdb::GetStorageDir(db_dir, kIntentsDirName)), + fs->CreateDirIfMissingAndSync(docdb::GetStorageDir(db_dir, docdb::kIntentsDirName)), Format("Failed to create RocksDB tablet intents directory $0", db_dir)); RETURN_NOT_OK(snapshots_->CreateDirectories(db_dir, fs)); @@ -1106,7 +1106,7 @@ Status Tablet::OpenRegularDB(const rocksdb::Options& common_options) { if (db != nullptr) { delete db; } - return STATUS(IllegalState, rocksdb_open_status.ToString()); + return rocksdb_open_status; } regular_db_.reset(db); regular_db_->ListenFilesChanged(std::bind(&Tablet::RegularDbFilesChanged, this)); @@ -1122,7 +1122,7 @@ Status Tablet::OpenIntentsDB(const rocksdb::Options& common_options) { const auto& db_dir = metadata()->rocksdb_dir(); - auto intents_dir = docdb::GetStorageDir(db_dir, kIntentsDirName); + auto intents_dir = docdb::GetStorageDir(db_dir, docdb::kIntentsDirName); LOG_WITH_PREFIX(INFO) << "Opening intents DB at: " << intents_dir; rocksdb::Options intents_rocksdb_options(common_options); intents_rocksdb_options.compaction_context_factory = {}; @@ -1737,6 +1737,13 @@ Status Tablet::WriteTransactionalBatch( WriteToRocksDB(frontiers, &write_batch, StorageDbType::kIntents); + const auto duration = write_batch.GetWriteGroupJoinDuration(); + if (duration > MonoDelta::kZero) { + // Track only if the duration is positive so we know how many write has non-zero duration. + metrics_->Increment(TabletEventStats::kIntentDbWriteThreadJoinDuration, + duration.ToMicroseconds()); + } + last_batch_data.hybrid_time = hybrid_time; last_batch_data.next_write_id = writer.intra_txn_write_id(); transaction_participant()->BatchReplicated(transaction_id, last_batch_data); @@ -2362,6 +2369,13 @@ Status Tablet::RemoveIntentsImpl( InitFrontiers(data, frontiers); WriteToRocksDB(frontiers, &intents_write_batch, StorageDbType::kIntents); + const auto duration = intents_write_batch.GetWriteGroupJoinDuration(); + if (duration > MonoDelta::kZero) { + // Track only if the duration is positive so we know how many write has non-zero duration. + metrics_->Increment(TabletEventStats::kIntentDbRemoveThreadJoinDuration, + duration.ToMicroseconds()); + } + if (!context.apply_state().active()) { break; } @@ -4100,7 +4114,7 @@ Status Tablet::ForceRocksDBCompact( return Status::OK(); } -std::string Tablet::TEST_DocDBDumpStr(IncludeIntents include_intents) { +std::string Tablet::TEST_DocDBDumpStr(docdb::IncludeIntents include_intents) { if (!regular_db_) return ""; if (!include_intents) { @@ -4112,7 +4126,7 @@ std::string Tablet::TEST_DocDBDumpStr(IncludeIntents include_intents) { } void Tablet::TEST_DocDBDumpToContainer( - IncludeIntents include_intents, std::unordered_set* out) { + docdb::IncludeIntents include_intents, std::unordered_set* out) { if (!regular_db_) return; if (!include_intents) { @@ -4123,7 +4137,7 @@ void Tablet::TEST_DocDBDumpToContainer( return docdb::DocDBDebugDumpToContainer(doc_db(), &GetSchemaPackingProvider(), out); } -void Tablet::TEST_DocDBDumpToLog(IncludeIntents include_intents) { +void Tablet::TEST_DocDBDumpToLog(docdb::IncludeIntents include_intents) { if (!regular_db_) { LOG_WITH_PREFIX(INFO) << "No RocksDB to dump"; return; diff --git a/src/yb/tablet/tablet.h b/src/yb/tablet/tablet.h index e76002ee0951..312ce9a82291 100644 --- a/src/yb/tablet/tablet.h +++ b/src/yb/tablet/tablet.h @@ -98,7 +98,6 @@ namespace tablet { YB_STRONGLY_TYPED_BOOL(BlockingRocksDbShutdownStart); YB_STRONGLY_TYPED_BOOL(FlushOnShutdown); -YB_STRONGLY_TYPED_BOOL(IncludeIntents); YB_STRONGLY_TYPED_BOOL(CheckRegularDB) YB_DEFINE_ENUM(Direction, (kForward)(kBackward)); @@ -644,13 +643,14 @@ class Tablet : public AbstractTablet, // range-based partitions always matches the returned middle key. Result GetEncodedMiddleSplitKey(std::string *partition_split_key = nullptr) const; - std::string TEST_DocDBDumpStr(IncludeIntents include_intents = IncludeIntents::kFalse); + std::string TEST_DocDBDumpStr( + docdb::IncludeIntents include_intents = docdb::IncludeIntents::kFalse); void TEST_DocDBDumpToContainer( - IncludeIntents include_intents, std::unordered_set* out); + docdb::IncludeIntents include_intents, std::unordered_set* out); // Dumps DocDB contents to log, every record as a separate log message, with the given prefix. - void TEST_DocDBDumpToLog(IncludeIntents include_intents); + void TEST_DocDBDumpToLog(docdb::IncludeIntents include_intents); Result TEST_CountDBRecords(docdb::StorageDbType db_type); diff --git a/src/yb/tablet/tablet.proto b/src/yb/tablet/tablet.proto index 2e04d6e65e57..d3c38e7cfa0c 100644 --- a/src/yb/tablet/tablet.proto +++ b/src/yb/tablet/tablet.proto @@ -37,6 +37,7 @@ option java_package = "org.yb.tablet"; import "yb/common/common.proto"; import "yb/common/common_types.proto"; +import "yb/common/opid.proto"; import "yb/tablet/tablet_types.proto"; message TabletStatusPB { @@ -65,6 +66,7 @@ message TabletStatusPB { repeated bytes colocated_table_ids = 19; optional string pgschema_name = 20; repeated bytes vector_index_finished_backfills = 21; + optional OpIdPB last_op_id = 22; } // Used to present the maintenance manager's internal state. diff --git a/src/yb/tablet/tablet_bootstrap.cc b/src/yb/tablet/tablet_bootstrap.cc index 40a8c9fa9ae9..c750f35b029c 100644 --- a/src/yb/tablet/tablet_bootstrap.cc +++ b/src/yb/tablet/tablet_bootstrap.cc @@ -555,7 +555,7 @@ class TabletBootstrap { if (FLAGS_TEST_dump_docdb_before_tablet_bootstrap) { LOG_WITH_PREFIX(INFO) << "DEBUG: DocDB dump before tablet bootstrap:"; - tablet_->TEST_DocDBDumpToLog(IncludeIntents::kTrue); + tablet_->TEST_DocDBDumpToLog(docdb::IncludeIntents::kTrue); } const auto needs_recovery = VERIFY_RESULT(PrepareToReplay()); @@ -630,7 +630,7 @@ class TabletBootstrap { listener_->StatusMessage(message); if (FLAGS_TEST_dump_docdb_after_tablet_bootstrap) { LOG_WITH_PREFIX(INFO) << "DEBUG: DocDB debug dump after tablet bootstrap:\n"; - tablet_->TEST_DocDBDumpToLog(IncludeIntents::kTrue); + tablet_->TEST_DocDBDumpToLog(docdb::IncludeIntents::kTrue); } *rebuilt_tablet = std::move(tablet_); diff --git a/src/yb/tablet/tablet_metadata.cc b/src/yb/tablet/tablet_metadata.cc index 4c81b5f6c59c..66246b686342 100644 --- a/src/yb/tablet/tablet_metadata.cc +++ b/src/yb/tablet/tablet_metadata.cc @@ -132,7 +132,6 @@ std::string MakeTableInfoLogPrefix( } // namespace const int64 kNoDurableMemStore = -1; -const std::string kIntentsDirName = "intents"; const std::string kSnapshotsDirName = "snapshots"; // ============================================================================ @@ -880,10 +879,11 @@ Status RaftGroupMetadata::DeleteTabletData(TabletDataState delete_type, const auto& rocksdb_dir = this->rocksdb_dir(); LOG_WITH_PREFIX(INFO) << "Destroying regular db at: " << rocksdb_dir; - rocksdb::Status status = rocksdb::DestroyDB(rocksdb_dir, rocksdb_options); + auto status = DestroyDB(rocksdb_dir, rocksdb_options); if (!status.ok()) { - LOG_WITH_PREFIX(ERROR) << "Failed to destroy regular DB at: " << rocksdb_dir << ": " << status; + LOG_WITH_PREFIX(WARNING) + << "Failed to destroy regular DB at: " << rocksdb_dir << ": " << status; } else { LOG_WITH_PREFIX(INFO) << "Successfully destroyed regular DB at: " << rocksdb_dir; } @@ -899,8 +899,8 @@ Status RaftGroupMetadata::DeleteTabletData(TabletDataState delete_type, status = rocksdb::DestroyDB(intents_dir, rocksdb_options); if (!status.ok()) { - LOG_WITH_PREFIX(ERROR) << "Failed to destroy provisional records DB at: " << intents_dir - << ": " << status; + LOG_WITH_PREFIX(DFATAL) << "Failed to destroy provisional records DB at: " << intents_dir + << ": " << status; } else { LOG_WITH_PREFIX(INFO) << "Successfully destroyed provisional records DB at: " << intents_dir; } @@ -934,7 +934,7 @@ Status RaftGroupMetadata::DeleteTabletData(TabletDataState delete_type, bool RaftGroupMetadata::IsTombstonedWithNoRocksDBData() const { std::lock_guard lock(data_mutex_); const auto& rocksdb_dir = kv_store_.rocksdb_dir; - const auto intents_dir = docdb::GetStorageDir(rocksdb_dir, kIntentsDirName); + const auto intents_dir = docdb::GetStorageDir(rocksdb_dir, docdb::kIntentsDirName); return tablet_data_state_ == TABLET_DATA_TOMBSTONED && !fs_manager_->env()->FileExists(rocksdb_dir) && !fs_manager_->env()->FileExists(intents_dir); @@ -2445,7 +2445,7 @@ bool RaftGroupMetadata::OnPostSplitCompactionDone() { } std::string RaftGroupMetadata::intents_rocksdb_dir() const { - return docdb::GetStorageDir(kv_store_.rocksdb_dir, kIntentsDirName); + return docdb::GetStorageDir(kv_store_.rocksdb_dir, docdb::kIntentsDirName); } std::string RaftGroupMetadata::snapshots_dir() const { diff --git a/src/yb/tablet/tablet_metadata.h b/src/yb/tablet/tablet_metadata.h index fb150a6bdd35..27033eab515f 100644 --- a/src/yb/tablet/tablet_metadata.h +++ b/src/yb/tablet/tablet_metadata.h @@ -70,7 +70,6 @@ namespace yb::tablet { using TableInfoMap = std::unordered_map; extern const int64 kNoDurableMemStore; -extern const std::string kIntentsDirName; extern const std::string kSnapshotsDirName; const uint64_t kNoLastFullCompactionTime = HybridTime::kMin.ToUint64(); diff --git a/src/yb/tablet/tablet_metrics.cc b/src/yb/tablet/tablet_metrics.cc index 69ead7992b96..1d514b9debaf 100644 --- a/src/yb/tablet/tablet_metrics.cc +++ b/src/yb/tablet/tablet_metrics.cc @@ -79,6 +79,16 @@ METRIC_DEFINE_event_stats( table, total_wait_queue_time, "Wait Queue Time", yb::MetricUnit::kMicroseconds, "Number of microseconds spent in the wait queue for requests which enter the wait queue"); +METRIC_DEFINE_event_stats( + table, intentsdb_rocksdb_write_thread_join_group_micros, + "intents db write thread join duration", yb::MetricUnit::kMicroseconds, + "The time taken for intents db write thread to join the write group"); + +METRIC_DEFINE_event_stats( + table, intentsdb_rocksdb_remove_thread_join_group_micros, + "Intents db remove thread join duration", yb::MetricUnit::kMicroseconds, + "The time taken for intents db remove thread to join the write group"); + METRIC_DEFINE_gauge_uint32(tablet, compact_rs_running, "RowSet Compactions Running", yb::MetricUnit::kMaintenanceOperations, @@ -268,6 +278,12 @@ const EventStatsEntry kEventStats[] = { {pggate::YB_STORAGE_EVENT_TOTAL_WAIT_QUEUE_TIME, TabletEventStats::kTotalWaitQueueTime, &METRIC_total_wait_queue_time}, + {pggate::YB_STORAGE_EVENT_INTENTSDB_WRITE_JOIN_GROUP_MICROS, + TabletEventStats::kIntentDbWriteThreadJoinDuration, + &METRIC_intentsdb_rocksdb_write_thread_join_group_micros}, + {pggate::YB_STORAGE_EVENT_INTENTSDB_REMOVE_JOIN_GROUP_MICROS, + TabletEventStats::kIntentDbRemoveThreadJoinDuration, + &METRIC_intentsdb_rocksdb_remove_thread_join_group_micros}, }; class TabletMetricsImpl final : public TabletMetrics { diff --git a/src/yb/tablet/tablet_metrics.h b/src/yb/tablet/tablet_metrics.h index 7f96872a46fe..a93d15712b29 100644 --- a/src/yb/tablet/tablet_metrics.h +++ b/src/yb/tablet/tablet_metrics.h @@ -56,7 +56,9 @@ YB_DEFINE_ENUM(TabletEventStats, (kQlWriteLatency) (kWriteOpDurationCommitWaitConsistency) (kReadTimeWait) - (kTotalWaitQueueTime)) + (kTotalWaitQueueTime) + (kIntentDbWriteThreadJoinDuration) + (kIntentDbRemoveThreadJoinDuration)) // Make sure to add new counters to the list in src/yb/yql/pggate/pg_metrics_list.h as well. YB_DEFINE_ENUM(TabletCounters, diff --git a/src/yb/tablet/tablet_peer.cc b/src/yb/tablet/tablet_peer.cc index a8e7d6a83b6a..7562006cd825 100644 --- a/src/yb/tablet/tablet_peer.cc +++ b/src/yb/tablet/tablet_peer.cc @@ -787,37 +787,44 @@ std::unique_ptr TabletPeer::CreateUpdateTransaction( } void TabletPeer::GetTabletStatusPB(TabletStatusPB* status_pb_out) { - std::lock_guard lock(lock_); - DCHECK(status_pb_out != nullptr); - DCHECK(status_listener_.get() != nullptr); - const auto disk_size_info = GetOnDiskSizeInfo(); - status_pb_out->set_tablet_id(status_listener_->tablet_id()); - status_pb_out->set_namespace_name(status_listener_->namespace_name()); - status_pb_out->set_table_name(status_listener_->table_name()); - status_pb_out->set_table_id(status_listener_->table_id()); - status_pb_out->set_last_status(status_listener_->last_status()); - status_listener_->partition()->ToPB(status_pb_out->mutable_partition()); - status_pb_out->set_state(state_); - status_pb_out->set_tablet_data_state(meta_->tablet_data_state()); - auto tablet = tablet_; - if (tablet) { - status_pb_out->set_table_type(tablet->table_type()); - auto vector_index_finished_backfills = tablet->vector_indexes().FinishedBackfills(); - if (vector_index_finished_backfills) { - *status_pb_out->mutable_vector_index_finished_backfills() = - std::move(*vector_index_finished_backfills); + std::shared_ptr consensus; + { + std::lock_guard lock(lock_); + DCHECK(status_pb_out != nullptr); + DCHECK(status_listener_.get() != nullptr); + const auto disk_size_info = GetOnDiskSizeInfo(); + status_pb_out->set_tablet_id(status_listener_->tablet_id()); + status_pb_out->set_namespace_name(status_listener_->namespace_name()); + status_pb_out->set_table_name(status_listener_->table_name()); + status_pb_out->set_table_id(status_listener_->table_id()); + status_pb_out->set_last_status(status_listener_->last_status()); + status_listener_->partition()->ToPB(status_pb_out->mutable_partition()); + status_pb_out->set_state(state_); + status_pb_out->set_tablet_data_state(meta_->tablet_data_state()); + auto tablet = tablet_; + if (tablet) { + status_pb_out->set_table_type(tablet->table_type()); + auto vector_index_finished_backfills = tablet->vector_indexes().FinishedBackfills(); + if (vector_index_finished_backfills) { + *status_pb_out->mutable_vector_index_finished_backfills() = + std::move(*vector_index_finished_backfills); + } } + disk_size_info.ToPB(status_pb_out); + // Set hide status of the tablet. + status_pb_out->set_is_hidden(meta_->hidden()); + status_pb_out->set_parent_data_compacted(meta_->parent_data_compacted()); + for (const auto& table : meta_->GetAllColocatedTables()) { + status_pb_out->add_colocated_table_ids(table); + } + consensus = consensus_; } - disk_size_info.ToPB(status_pb_out); - // Set hide status of the tablet. - status_pb_out->set_is_hidden(meta_->hidden()); - status_pb_out->set_parent_data_compacted(meta_->parent_data_compacted()); - for (const auto& table : meta_->GetAllColocatedTables()) { - status_pb_out->add_colocated_table_ids(table); + if (consensus) { + consensus->log()->GetLatestEntryOpId().ToPB(status_pb_out->mutable_last_op_id()); } } -Status TabletPeer::RunLogGC() { +Status TabletPeer::RunLogGC(bool rollover) { if (!CheckRunning().ok()) { return Status::OK(); } @@ -833,6 +840,9 @@ Status TabletPeer::RunLogGC() { } else { min_log_index = VERIFY_RESULT(GetEarliestNeededLogIndex()); } + if (rollover) { + RETURN_NOT_OK(log_->AllocateSegmentAndRollOver()); + } int32_t num_gced = 0; return log_->GC(min_log_index, &num_gced); } @@ -1474,7 +1484,7 @@ Status TabletPeer::StartReplicaOperation( void TabletPeer::SetPropagatedSafeTime(HybridTime ht) { auto driver = NewReplicaOperationDriver(nullptr); if (!driver.ok()) { - LOG_WITH_PREFIX(ERROR) << "Failed to create operation driver to set propagated hybrid time"; + LOG_WITH_PREFIX(DFATAL) << "Failed to create operation driver to set propagated hybrid time"; return; } (**driver).SetPropagatedSafeTime(ht, tablet_->mvcc_manager()); @@ -1754,19 +1764,17 @@ Status TabletPeer::ChangeRole(const std::string& requestor_uuid) { } switch (peer_pb.member_type()) { - case PeerMemberType::OBSERVER: - FALLTHROUGH_INTENDED; + case PeerMemberType::OBSERVER: [[fallthrough]]; case PeerMemberType::VOTER: - LOG(ERROR) << "Peer " << peer_pb.permanent_uuid() << " is a " - << PeerMemberType_Name(peer_pb.member_type()) - << " Not changing its role after remote bootstrap"; + LOG(WARNING) << "Peer " << peer_pb.permanent_uuid() << " is a " + << PeerMemberType_Name(peer_pb.member_type()) + << " Not changing its role after remote bootstrap"; // Even though this is an error, we return Status::OK() so the remote server doesn't // tombstone its tablet. return Status::OK(); - case PeerMemberType::PRE_OBSERVER: - FALLTHROUGH_INTENDED; + case PeerMemberType::PRE_OBSERVER: [[fallthrough]]; case PeerMemberType::PRE_VOTER: { consensus::ChangeConfigRequestPB req; consensus::ChangeConfigResponsePB resp; diff --git a/src/yb/tablet/tablet_peer.h b/src/yb/tablet/tablet_peer.h index d4090ca5e07f..335af1084705 100644 --- a/src/yb/tablet/tablet_peer.h +++ b/src/yb/tablet/tablet_peer.h @@ -393,7 +393,7 @@ class TabletPeer : public std::enable_shared_from_this, Result NewReplicaOperationDriver(std::unique_ptr* operation); // Tells the tablet's log to garbage collect. - Status RunLogGC(); + Status RunLogGC(bool rollover = false); // Register the maintenance ops associated with this peer's tablet, also invokes // Tablet::RegisterMaintenanceOps(). diff --git a/src/yb/tablet/tablet_peer_mm_ops.cc b/src/yb/tablet/tablet_peer_mm_ops.cc index 270a585ab3f3..df50e78dd963 100644 --- a/src/yb/tablet/tablet_peer_mm_ops.cc +++ b/src/yb/tablet/tablet_peer_mm_ops.cc @@ -98,7 +98,7 @@ void LogGCOp::Perform() { Status s = tablet_peer_->RunLogGC(); if (!s.ok()) { s = s.CloneAndPrepend("Unexpected error while running Log GC from TabletPeer"); - LOG(ERROR) << s.ToString(); + LOG(DFATAL) << s.ToString(); } sem_.unlock(); diff --git a/src/yb/tablet/tablet_snapshots.cc b/src/yb/tablet/tablet_snapshots.cc index 421f0021c559..0debed53e06e 100644 --- a/src/yb/tablet/tablet_snapshots.cc +++ b/src/yb/tablet/tablet_snapshots.cc @@ -527,22 +527,7 @@ Status TabletSnapshots::RestoreCheckpoint( return STATUS(IllegalState, "Unable to copy checkpoint files", s.ToString()); } - { - auto& env = this->env(); - auto children = VERIFY_RESULT(env.GetChildren(db_dir, ExcludeDots::kTrue)); - for (const auto& child : children) { - if (!child.starts_with(docdb::kVectorIndexDirPrefix)) { - continue; - } - auto source_dir = JoinPathSegments(db_dir, child); - if (!env.DirExists(source_dir)) { - continue; - } - auto dest_dir = docdb::GetStorageDir(db_dir, child); - LOG_WITH_PREFIX(INFO) << "Moving " << source_dir << " => " << dest_dir; - RETURN_NOT_OK(env.RenameFile(source_dir, dest_dir)); - } - } + RETURN_NOT_OK(MoveChildren(this->env(), db_dir, docdb::IncludeIntents::kFalse)); auto tablet_metadata_file = TabletMetadataFile(db_dir); if (env().FileExists(tablet_metadata_file)) { @@ -721,8 +706,8 @@ Status TabletSnapshots::CreateCheckpoint( Status TabletSnapshots::DoCreateCheckpoint( const std::string& dir, CreateIntentsCheckpointIn create_intents_checkpoint_in) { - auto temp_intents_dir = docdb::GetStorageDir(dir, kIntentsDirName); - auto final_intents_dir = docdb::GetStorageCheckpointDir(dir, kIntentsDirName); + auto temp_intents_dir = docdb::GetStorageDir(dir, docdb::kIntentsDirName); + auto final_intents_dir = docdb::GetStorageCheckpointDir(dir, docdb::kIntentsDirName); if (has_intents_db()) { RETURN_NOT_OK(rocksdb::checkpoint::CreateCheckpoint(&intents_db(), temp_intents_dir)); diff --git a/src/yb/tablet/transaction_participant.cc b/src/yb/tablet/transaction_participant.cc index 777eb5e8e552..d05f9bcf309b 100644 --- a/src/yb/tablet/transaction_participant.cc +++ b/src/yb/tablet/transaction_participant.cc @@ -696,6 +696,7 @@ class TransactionParticipant::Impl } void Abort(const TransactionId& id, TransactionStatusCallback callback) { + VLOG_WITH_PREFIX(2) << "Abort transaction: " << id; // We are not trying to cleanup intents here because we don't know whether this transaction // has intents of not. auto lock_and_iterator_result = LockAndFind( @@ -784,8 +785,8 @@ class TransactionParticipant::Impl auto id = FullyDecodeTransactionId(data.state.transaction_id()); if (!id.ok()) { - LOG(ERROR) << "Could not decode transaction details, whose apply record OpId was: " - << data.op_id; + LOG(DFATAL) << "Could not decode transaction details, whose apply record OpId was: " + << data.op_id << ": " << id.status(); return id.status(); } diff --git a/src/yb/tablet/write_query.cc b/src/yb/tablet/write_query.cc index f747abc1dc88..bcbfb746c359 100644 --- a/src/yb/tablet/write_query.cc +++ b/src/yb/tablet/write_query.cc @@ -45,6 +45,7 @@ #include "yb/tserver/tserver.pb.h" +#include "yb/util/debug-util.h" #include "yb/util/logging.h" #include "yb/util/metrics.h" #include "yb/util/scope_exit.h" @@ -63,6 +64,9 @@ TAG_FLAG(disable_alter_vs_write_mutual_exclusion, advanced); DEFINE_test_flag(bool, writequery_stuck_from_callback_leak, false, "Simulate WriteQuery stuck because of the update index flushed rpc call back leak"); +DECLARE_bool(batch_tablet_metrics_update); +DECLARE_bool(ysql_analyze_dump_metrics); + namespace yb { namespace tablet { @@ -203,6 +207,14 @@ WriteQuery::WriteQuery( start_time_(MonoTime::Now()), execute_mode_(ExecuteMode::kSimple) { IncrementActiveWriteQueryObjectsBy(1); + auto res = tablet_safe(); + if (res.ok()) { + global_tablet_metrics_ = (*res)->metrics(); + } + + metrics_ = std::make_shared( + GetAtomicFlag(&FLAGS_batch_tablet_metrics_update) + ? &scoped_tablet_metrics_ : global_tablet_metrics_); } LWWritePB& WriteQuery::request() { @@ -281,6 +293,21 @@ void WriteQuery::Release() { WriteQuery::~WriteQuery() { IncrementActiveWriteQueryObjectsBy(-1); + + // Any metrics updated after destroying the WriteQuery + // object cannot be sent with the response PB. So, update + // global tablet metrics directly from now. + *metrics_ = global_tablet_metrics_; + + if (global_tablet_metrics_) { + scoped_tablet_metrics_.MergeAndClear(global_tablet_metrics_); + } + auto tablet_result = tablet_safe(); + if (tablet_result.ok()) { + scoped_statistics_.MergeAndClear( + (*tablet_result)->regulardb_statistics().get(), + (*tablet_result)->intentsdb_statistics().get()); + } } void WriteQuery::set_client_request(std::reference_wrapper req) { @@ -309,12 +336,11 @@ void WriteQuery::Finished(WriteOperation* operation, const Status& status) { auto tablet = *tablet_result; if (status.ok()) { - TabletMetrics* metrics = tablet->metrics(); - if (metrics) { + if (metrics_) { auto op_duration_usec = make_unsigned(MonoDelta(MonoTime::Now() - start_time_).ToMicroseconds()); - metrics->Increment(tablet::TabletEventStats::kQlWriteLatency, op_duration_usec); + (*metrics_)->Increment(tablet::TabletEventStats::kQlWriteLatency, op_duration_usec); } } @@ -715,7 +741,7 @@ Status WriteQuery::DoExecute() { dockv::PartialRangeKeyIntents partial_range_key_intents(metadata.UsePartialRangeKeyIntents()); prepare_result_ = VERIFY_RESULT(docdb::PrepareDocWriteOperation( - doc_ops_, write_batch.read_pairs(), tablet->metrics(), isolation_level_, row_mark_type, + doc_ops_, write_batch.read_pairs(), metrics_, isolation_level_, row_mark_type, transactional_table, write_batch.has_transaction(), deadline(), partial_range_key_intents, tablet->shared_lock_manager())); @@ -772,7 +798,7 @@ Status WriteQuery::DoExecute() { return docdb::ResolveOperationConflicts( doc_ops_, conflict_management_policy, now, write_batch.transaction().pg_txn_start_us(), request_start_us(), request_id, tablet->doc_db(), partial_range_key_intents, - transaction_participant, tablet->metrics(), &prepare_result_.lock_batch, wait_queue, + transaction_participant, metrics_, &prepare_result_.lock_batch, wait_queue, deadline(), [this, now](const Result& result) { if (!result.ok()) { @@ -808,7 +834,7 @@ Status WriteQuery::DoExecute() { doc_ops_, conflict_management_policy, write_batch, tablet->clock()->Now(), read_time_ ? read_time_.read : HybridTime::kMax, write_batch.transaction().pg_txn_start_us(), request_start_us(), request_id, tablet->doc_db(), partial_range_key_intents, - transaction_participant, tablet->metrics(), + transaction_participant, metrics_, &prepare_result_.lock_batch, wait_queue, is_advisory_lock_request, deadline(), [this](const Result& result) { if (!result.ok()) { @@ -851,7 +877,7 @@ Status WriteQuery::DoTransactionalConflictsResolved() { safe_time = VERIFY_RESULT(tablet->SafeTime(RequireLease::kTrue)); read_time_ = ReadHybridTime::FromHybridTimeRange( {safe_time, tablet->clock()->NowRange().second}); - tablet->metrics()->Increment(tablet::TabletCounters::kPickReadTimeOnDocDB); + (*metrics_)->Increment(tablet::TabletCounters::kPickReadTimeOnDocDB); } else if (prepare_result_.need_read_snapshot && isolation_level_ == IsolationLevel::SERIALIZABLE_ISOLATION) { return STATUS_FORMAT( @@ -872,7 +898,7 @@ Status WriteQuery::DoCompleteExecute(HybridTime safe_time) { auto tablet = VERIFY_RESULT(tablet_safe()); if (prepare_result_.need_read_snapshot && !read_time_) { // A read_time will be picked by the below ScopedReadOperation::Create() call. - tablet->metrics()->Increment(tablet::TabletCounters::kPickReadTimeOnDocDB); + (*metrics_)->Increment(tablet::TabletCounters::kPickReadTimeOnDocDB); } // For WriteQuery requests with execution mode kCql and kPgsql, we perform schema version checks // in two places: @@ -903,18 +929,13 @@ Status WriteQuery::DoCompleteExecute(HybridTime safe_time) { read_time_)) : ScopedReadOperation(); - docdb::DocDBStatistics statistics; - auto scope_exit = ScopeExit([&statistics, tablet] { - statistics.MergeAndClear( - tablet->regulardb_statistics().get(), tablet->intentsdb_statistics().get()); - }); docdb::ReadOperationData read_operation_data { .deadline = deadline(), .read_time = prepare_result_.need_read_snapshot ? read_op.read_time() // When need_read_snapshot is false, this time is used only to write TTL field of record. : ReadHybridTime::SingleTime(tablet->clock()->Now()), - .statistics = &statistics, + .statistics = &scoped_statistics_, }; // We expect all read operations for this transaction to be done in AssembleDocWriteBatch. Once @@ -1423,5 +1444,17 @@ void WriteQuery::IncrementActiveWriteQueryObjectsBy(int64_t value) { } } +PgsqlResponsePB* WriteQuery::GetPgsqlResponseForMetricsCapture() const { + if (!pgsql_write_ops_.empty()) { + auto& write_op = pgsql_write_ops_.at(0); + if (GetAtomicFlag(&FLAGS_ysql_analyze_dump_metrics) && + write_op->request().metrics_capture() == + PgsqlMetricsCaptureType::PGSQL_METRICS_CAPTURE_ALL) { + return write_op->response(); + } + } + return nullptr; +} + } // namespace tablet } // namespace yb diff --git a/src/yb/tablet/write_query.h b/src/yb/tablet/write_query.h index 8291f2736c82..ad5fb9ac178a 100644 --- a/src/yb/tablet/write_query.h +++ b/src/yb/tablet/write_query.h @@ -18,12 +18,14 @@ #include "yb/docdb/docdb_fwd.h" #include "yb/docdb/docdb.h" #include "yb/docdb/doc_operation.h" +#include "yb/docdb/docdb_statistics.h" #include "yb/docdb/lock_batch.h" #include "yb/rpc/rpc_context.h" #include "yb/tablet/tablet_fwd.h" +#include "yb/tablet/tablet_metrics.h" #include "yb/tserver/tserver.fwd.h" #include "yb/util/operation_counter.h" @@ -119,6 +121,12 @@ class WriteQuery { uint64_t request_start_us() const { return request_start_us_; } + std::shared_ptr metrics() { return metrics_; } + + PgsqlResponsePB* GetPgsqlResponseForMetricsCapture() const; + ScopedTabletMetrics scoped_tablet_metrics() { return scoped_tablet_metrics_; } + docdb::DocDBStatistics scoped_statistics() { return scoped_statistics_; } + private: friend struct UpdateQLIndexesTask; enum class ExecuteMode; @@ -243,6 +251,17 @@ class WriteQuery { // Stores the start time of the underlying rpc request that created this WriteQuery. // The field is consistent across failed ReadRpc/WriteRpc retries. uint64_t request_start_us_ = 0; + + // Metrics that are stored for the lifetime of a WriteQuery object and returned + // with PgsqlResponsePB. + ScopedTabletMetrics scoped_tablet_metrics_; + TabletMetrics* global_tablet_metrics_ = nullptr; + + // Stores either scoped_tablet_metrics_ or global_tablet_metrics_ + // depending on the batch_tablet_metrics_update gflag. This points + // to global_tablet_metrics_ once the WriteQuery object is destroyed. + std::shared_ptr metrics_; + docdb::DocDBStatistics scoped_statistics_; }; } // namespace tablet diff --git a/src/yb/tools/data-patcher.cc b/src/yb/tools/data-patcher.cc index f49ff1f58fff..3ab7b262a129 100644 --- a/src/yb/tools/data-patcher.cc +++ b/src/yb/tools/data-patcher.cc @@ -1090,7 +1090,7 @@ class ApplyPatch { LOG(INFO) << "Renaming " << src << " to " << dst; Status s = env_->RenameFile(src, dst); if (!s.ok()) { - LOG(ERROR) << "Error renaming " << src << " to " << dst << ": " << s; + LOG(DFATAL) << "Error renaming " << src << " to " << dst << ": " << s; } return s; } diff --git a/src/yb/tools/fs_tool.cc b/src/yb/tools/fs_tool.cc index 7b38c0767e05..1b3a8bda8bd0 100644 --- a/src/yb/tools/fs_tool.cc +++ b/src/yb/tools/fs_tool.cc @@ -217,12 +217,8 @@ Status FsTool::PrintLogSegmentHeader(const string& path, auto segment_result = ReadableLogSegment::Open(fs_manager_->env(), path); if (!segment_result.ok()) { auto s = segment_result.status(); - if (s.IsUninitialized()) { - LOG(ERROR) << path << " is not initialized: " << s.ToString(); - return Status::OK(); - } - if (s.IsCorruption()) { - LOG(ERROR) << path << " is corrupt: " << s.ToString(); + if (s.IsUninitialized() || s.IsCorruption()) { + LOG(DFATAL) << path << " is not valid: " << s; return Status::OK(); } return s.CloneAndPrepend("Unexpected error reading log segment " + path); diff --git a/src/yb/tools/yb-admin-snapshot-schedule-test.cc b/src/yb/tools/yb-admin-snapshot-schedule-test.cc index fe2ee0188e03..a1a722a8f4ba 100644 --- a/src/yb/tools/yb-admin-snapshot-schedule-test.cc +++ b/src/yb/tools/yb-admin-snapshot-schedule-test.cc @@ -262,8 +262,12 @@ class YbAdminSnapshotScheduleTest : public AdminTestBase { std::string seq_no{VERIFY_RESULT(GetMemberAsStr(out, "seq_no"))}; return WaitFor([&]() -> Result { - auto out = VERIFY_RESULT(CallJsonAdmin("list_clones", source_namespace_id, seq_no)); - const auto entries = out.GetArray(); + auto out = CallJsonAdmin("list_clones", source_namespace_id, seq_no); + if (!out.ok()) { + LOG(WARNING) << "Failed to list clones: " << out.status(); + return false; + } + const auto entries = out->GetArray(); SCHECK_EQ(entries.Size(), 1, IllegalState, "Wrong number of entries. Expected 1"); auto state = master::SysCloneStatePB::CLONE_SCHEMA_STARTED; master::SysCloneStatePB::State_Parse( @@ -1295,14 +1299,24 @@ class YbAdminSnapshotScheduleTestWithYsqlColocationRestoreParam: } }; +namespace { + +std::string TestParamToString(const testing::TestParamInfo& param_info) { + return AsString(get<0>(param_info.param)).substr(1) + "_" + + AsString(get<1>(param_info.param)).substr(1); +} + +} // namespace + INSTANTIATE_TEST_CASE_P( - ColocationAndRestoreType, YbAdminSnapshotScheduleTestWithYsqlColocationRestoreParam, + , YbAdminSnapshotScheduleTestWithYsqlColocationRestoreParam, ::testing::Values( ScheduleRestoreTestParams(YsqlColocationConfig::kNotColocated, RestoreType::kPITR), ScheduleRestoreTestParams(YsqlColocationConfig::kDBColocated, RestoreType::kPITR), ScheduleRestoreTestParams(YsqlColocationConfig::kTablegroup, RestoreType::kPITR), ScheduleRestoreTestParams(YsqlColocationConfig::kNotColocated, RestoreType::kClone), - ScheduleRestoreTestParams(YsqlColocationConfig::kDBColocated, RestoreType::kClone))); + ScheduleRestoreTestParams(YsqlColocationConfig::kDBColocated, RestoreType::kClone)), + TestParamToString); TEST_P(YbAdminSnapshotScheduleTestWithYsqlColocationRestoreParam, Pgsql) { auto schedule_id = ASSERT_RESULT(PreparePgWithColocatedParam()); diff --git a/src/yb/tools/yb-backup/yb-backup-cross-feature-test.cc b/src/yb/tools/yb-backup/yb-backup-cross-feature-test.cc index f73e5cf62d16..20a88a6a7b91 100644 --- a/src/yb/tools/yb-backup/yb-backup-cross-feature-test.cc +++ b/src/yb/tools/yb-backup/yb-backup-cross-feature-test.cc @@ -2627,12 +2627,9 @@ TEST_P( {"--backup_location", backup_dir, "--keyspace", Format("ysql.$0", backup_db_name), "create"})); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(0), {}, - tserver::FlushTabletsRequestPB::COMPACT)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(1), {}, - tserver::FlushTabletsRequestPB::COMPACT)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(2), {}, - tserver::FlushTabletsRequestPB::COMPACT)); + ASSERT_OK(cluster_->CompactTabletsOnSingleTServer(0, {})); + ASSERT_OK(cluster_->CompactTabletsOnSingleTServer(1, {})); + ASSERT_OK(cluster_->CompactTabletsOnSingleTServer(2, {})); ASSERT_OK(RunBackupCommand( {"--backup_location", backup_dir, "--keyspace", Format("ysql.$0", restore_db_name), @@ -2643,12 +2640,9 @@ TEST_P( ASSERT_NO_FATALS( InsertRows(Format("INSERT INTO $0 VALUES (9,9,9), (10,10,10), (11,11,11)", table_name), 3)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(0), {}, - tserver::FlushTabletsRequestPB::COMPACT)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(1), {}, - tserver::FlushTabletsRequestPB::COMPACT)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(2), {}, - tserver::FlushTabletsRequestPB::COMPACT)); + ASSERT_OK(cluster_->CompactTabletsOnSingleTServer(0, {})); + ASSERT_OK(cluster_->CompactTabletsOnSingleTServer(1, {})); + ASSERT_OK(cluster_->CompactTabletsOnSingleTServer(2, {})); LOG(INFO) << "Test finished: " << CURRENT_TEST_CASE_AND_TEST_NAME_STR(); } diff --git a/src/yb/tserver/metrics_snapshotter.cc b/src/yb/tserver/metrics_snapshotter.cc index cd2eafb4ecf7..bcd1b689b2fb 100644 --- a/src/yb/tserver/metrics_snapshotter.cc +++ b/src/yb/tserver/metrics_snapshotter.cc @@ -504,8 +504,8 @@ Status MetricsSnapshotter::Thread::DoMetricsSnapshot() { uint64_t system_ticks = cur_ticks[2] - prev_ticks_[2]; prev_ticks_ = cur_ticks; if (total_ticks <= 0) { - YB_LOG_EVERY_N_SECS(ERROR, 120) << Format("Failed to calculate CPU usage - " - "invalid total CPU ticks: $0.", total_ticks); + YB_LOG_EVERY_N_SECS(DFATAL, 120) + << Format("Failed to calculate CPU usage - invalid total CPU ticks: $0.", total_ticks); } else { double cpu_usage_user = static_cast(user_ticks) / total_ticks; double cpu_usage_system = static_cast(system_ticks) / total_ticks; diff --git a/src/yb/tserver/pg_client_service.cc b/src/yb/tserver/pg_client_service.cc index f047f8edf638..1ca0c54b664a 100644 --- a/src/yb/tserver/pg_client_service.cc +++ b/src/yb/tserver/pg_client_service.cc @@ -139,6 +139,10 @@ TAG_FLAG(check_pg_object_id_allocators_interval_secs, advanced); DEFINE_NON_RUNTIME_int64(shmem_exchange_idle_timeout_ms, 2000 * yb::kTimeMultiplier, "Idle timeout interval in milliseconds used by shared memory exchange thread pool."); +DEFINE_test_flag(bool, enable_ysql_operation_lease_expiry_check, true, + "Whether tservers should monitor their ysql op lease and kill their hosted pg " + "sessions when it expires. Only available as a flag for tests."); + DECLARE_uint64(cdc_intent_retention_ms); DECLARE_uint64(transaction_heartbeat_usec); DECLARE_int32(cdc_read_rpc_timeout_ms); @@ -533,6 +537,7 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv table_cache_(client_future_), check_expired_sessions_("check_expired_sessions", &messenger->scheduler()), check_object_id_allocators_("check_object_id_allocators", &messenger->scheduler()), + check_ysql_lease_("check_ysql_lease_liveness", &messenger->scheduler()), response_cache_(parent_mem_tracker, metric_entity), instance_id_(permanent_uuid), shared_mem_pool_(parent_mem_tracker, instance_id_), @@ -566,6 +571,7 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv DCHECK(!permanent_uuid.empty()); ScheduleCheckExpiredSessions(CoarseMonoClock::now()); ScheduleCheckObjectIdAllocators(); + ScheduleCheckYsqlLeaseWithNoLease(); if (FLAGS_pg_client_use_shared_memory) { WARN_NOT_OK(SharedExchange::Cleanup(instance_id_), "Cleanup shared memory failed"); } @@ -591,6 +597,7 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv sessions.clear(); check_expired_sessions_.Shutdown(); check_object_id_allocators_.Shutdown(); + check_ysql_lease_.Shutdown(); if (exchange_thread_pool_) { exchange_thread_pool_->Shutdown(); } @@ -890,7 +897,7 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv std::future> DoGetOldTransactionsForTablet( const uint32_t min_txn_age_ms, const uint32_t max_num_txns, - const std::shared_ptr& proxy, const TabletId& tablet_id) { + const RemoteTabletServerPtr& remote_ts, const TabletId& tablet_id) { auto req = std::make_shared(); req->set_tablet_id(tablet_id); req->set_min_txn_age_ms(min_txn_age_ms); @@ -899,13 +906,14 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv return MakeFuture>([&](auto callback) { auto resp = std::make_shared(); std::shared_ptr controller = std::make_shared(); - proxy->GetOldTransactionsAsync( + remote_ts->proxy()->GetOldTransactionsAsync( *req.get(), resp.get(), controller.get(), - [req, callback, controller, resp] { + [req, callback, controller, resp, remote_ts] { auto s = controller->status(); if (!s.ok()) { - s = s.CloneAndPrepend( - Format("GetOldTransactions request for tablet $0 failed: ", req->tablet_id())); + s = s.CloneAndPrepend(Format( + "GetOldTransactions request for tablet $0 to tserver $1 failed: ", + req->tablet_id(), remote_ts->permanent_uuid())); return callback(s); } callback(OldTxnsRespInfo { @@ -918,7 +926,7 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv std::future> DoGetOldSingleShardWaiters( const uint32_t min_txn_age_ms, const uint32_t max_num_txns, - const std::shared_ptr& proxy) { + const RemoteTabletServerPtr& remote_ts) { auto req = std::make_shared(); req->set_min_txn_age_ms(min_txn_age_ms); req->set_max_num_txns(max_num_txns); @@ -926,12 +934,14 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv return MakeFuture>([&](auto callback) { auto resp = std::make_shared(); std::shared_ptr controller = std::make_shared(); - proxy->GetOldSingleShardWaitersAsync( + remote_ts->proxy()->GetOldSingleShardWaitersAsync( *req.get(), resp.get(), controller.get(), - [req, callback, controller, resp] { + [req, callback, controller, resp, remote_ts] { auto s = controller->status(); if (!s.ok()) { - s = s.CloneAndPrepend("GetOldSingleShardWaiters request failed: "); + s = s.CloneAndPrepend(Format( + "GetOldSingleShardWaiters request to tserver $0 failed: ", + remote_ts->permanent_uuid())); return callback(s); } callback(OldTxnsRespInfo { @@ -1061,17 +1071,17 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv auto proxy = remote_tserver->proxy(); for (const auto& tablet : txn_status_tablets.global_tablets) { res_futures.push_back( - DoGetOldTransactionsForTablet(min_txn_age_ms, max_num_txns, proxy, tablet)); + DoGetOldTransactionsForTablet(min_txn_age_ms, max_num_txns, remote_tserver, tablet)); status_tablet_ids.insert(tablet); } for (const auto& tablet : txn_status_tablets.placement_local_tablets) { res_futures.push_back( - DoGetOldTransactionsForTablet(min_txn_age_ms, max_num_txns, proxy, tablet)); + DoGetOldTransactionsForTablet(min_txn_age_ms, max_num_txns, remote_tserver, tablet)); status_tablet_ids.insert(tablet); } // Query for oldest single shard waiting transactions as well. res_futures.push_back( - DoGetOldSingleShardWaiters(min_txn_age_ms, max_num_txns, proxy)); + DoGetOldSingleShardWaiters(min_txn_age_ms, max_num_txns, remote_tserver)); } // Limit num transactions to max_num_txns for which lock status is being queried. // @@ -1083,10 +1093,13 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv OldTxnMetadataVariantComparator> old_txns_pq; StatusToPB(Status::OK(), resp->mutable_status()); for (auto it = res_futures.begin(); - it != res_futures.end() && resp->status().code() == AppStatusPB::OK; ) { + it != res_futures.end() && resp->status().code() == AppStatusPB::OK; ++it) { auto res = it->get(); if (!res.ok()) { - return res.status(); + // A node could be unavailable. We need not fail the pg_locks query if we see at least one + // response for all of the status tablets. + LOG(INFO) << res.status(); + continue; } std::visit([&](auto&& old_txns_resp) { @@ -1094,7 +1107,6 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv // Ignore leadership and NOT_FOUND errors as we broadcast the request to all tservers. if (old_txns_resp->error().code() == TabletServerErrorPB::NOT_THE_LEADER || old_txns_resp->error().code() == TabletServerErrorPB::TABLET_NOT_FOUND) { - it = res_futures.erase(it); return; } const auto& s = StatusFromPB(old_txns_resp->error().status()); @@ -1114,7 +1126,6 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv old_txns_pq.pop(); } } - it++; }, res->resp_ptr); } if (resp->status().code() != AppStatusPB::OK) { @@ -1912,32 +1923,98 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv table_cache_.InvalidateDbTables(db_oids_updated, db_oids_deleted); } - void ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& lease_refresh_info, MonoTime time) { - std::lock_guard lock(mutex_); - last_lease_refresh_time_ = time; - if (lease_refresh_info.new_lease()) { - LOG(INFO) << Format( - "Received new lease epoch $0 from the master leader. Clearing all pg sessions.", - lease_refresh_info.lease_epoch()); - lease_epoch_ = lease_refresh_info.lease_epoch(); - auto s = tablet_server_.RestartPG(); - if (!s.ok()) { - LOG(WARNING) << "Failed to restart PG postmaster: " << s; + void ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& lease_refresh_info) { + { + std::lock_guard lock(mutex_); + lease_expiry_time_ = + CoarseTimePoint{std::chrono::milliseconds(lease_refresh_info.lease_expiry_time_ms())}; + if (lease_expiry_time_ < CoarseMonoClock::Now()) { + // This function is passed the timestamp from before the RefreshYsqlLeaseRpc is sent. So it + // is possible the RPC takes longer than the lease TTL the master gave us, in which case + // this tserver still does not have a live lease. + return; + } + bool had_live_lease = ysql_lease_is_live_; + ysql_lease_is_live_ = true; + if (lease_refresh_info.new_lease() || lease_epoch_ != lease_refresh_info.lease_epoch()) { + LOG(INFO) << Format( + "Received new lease epoch $0 from the master leader. Clearing all pg sessions.", + lease_refresh_info.lease_epoch()); + lease_epoch_ = lease_refresh_info.lease_epoch(); + } else if (!had_live_lease) { + LOG(INFO) << Format( + "Master leader refreshed our lease for epoch $0. We thought this lease had " + "expired but it hadn't. Restarting pg.", + lease_epoch_); + } else { + // Lease was live and is live after this update. The epoch didn't change. Nothing left to + // do. + return; } } + // No need to hold lock while restarting the pg process. + WARN_NOT_OK(tablet_server_.RestartPG(), "Failed to restart PG postmaster."); } YSQLLeaseInfo GetYSQLLeaseInfo() { SharedLock lock(mutex_); YSQLLeaseInfo lease_info; - // todo(zdrudi): For now just return is live if we've ever received a lease. - lease_info.is_live = last_lease_refresh_time_.Initialized(); + lease_info.is_live = ysql_lease_is_live_; if (lease_info.is_live) { lease_info.lease_epoch = lease_epoch_; } return lease_info; } + void ScheduleCheckYsqlLeaseWithNoLease() { + ScheduleCheckYsqlLease(CoarseMonoClock::now() + 1s); + } + + void ScheduleCheckYsqlLease(CoarseTimePoint next_check_time) { + check_ysql_lease_.Schedule( + [this, next_check_time](const Status& status) { + if (!status.ok()) { + return; + } + if (CoarseMonoClock::now() < next_check_time) { + ScheduleCheckYsqlLease(next_check_time); + return; + } + CheckYsqlLeaseStatus(); + }, + next_check_time - CoarseMonoClock::now()); + } + + std::optional CheckYsqlLeaseStatusInner() { + { + std::lock_guard lock(mutex_); + if (!ysql_lease_is_live_) { + return {}; + } + if (CoarseMonoClock::now() < lease_expiry_time_) { + return lease_expiry_time_; + } + ysql_lease_is_live_ = false; + LOG(INFO) << "Lease has expired, killing pg sessions."; + } + // todo(zdrudi): make this a fatal? + WARN_NOT_OK(tablet_server_.KillPg(), "Couldn't stop PG"); + return {}; + } + + void CheckYsqlLeaseStatus() { + if (PREDICT_FALSE(!FLAGS_TEST_enable_ysql_operation_lease_expiry_check)) { + ScheduleCheckYsqlLeaseWithNoLease(); + return; + } + auto lease_expiry = CheckYsqlLeaseStatusInner(); + if (lease_expiry) { + ScheduleCheckYsqlLease(*lease_expiry); + } else { + ScheduleCheckYsqlLeaseWithNoLease(); + } + } + void CleanupSessions( std::vector&& expired_sessions, CoarseTimePoint time) { if (expired_sessions.empty()) { @@ -2446,6 +2523,7 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv rpc::ScheduledTaskTracker check_expired_sessions_ GUARDED_BY(mutex_); CoarseTimePoint check_expired_sessions_time_ GUARDED_BY(mutex_); rpc::ScheduledTaskTracker check_object_id_allocators_; + rpc::ScheduledTaskTracker check_ysql_lease_; PgResponseCache response_cache_; @@ -2479,8 +2557,9 @@ class PgClientServiceImpl::Impl : public LeaseEpochValidator, public SessionProv std::optional cdc_state_table_; PgTxnSnapshotManager txn_snapshot_manager_; - MonoTime last_lease_refresh_time_ GUARDED_BY(mutex_); - uint64_t lease_epoch_ GUARDED_BY(mutex_); + CoarseTimePoint lease_expiry_time_ GUARDED_BY(mutex_); + bool ysql_lease_is_live_ GUARDED_BY(mutex_) {false}; + uint64_t lease_epoch_ GUARDED_BY(mutex_) = 0; }; PgClientServiceImpl::PgClientServiceImpl( @@ -2520,9 +2599,9 @@ Result PgClientServiceImpl::GetLocalPgTxnSnapshot( return impl_->GetLocalPgTxnSnapshot(snapshot_id); } -void PgClientServiceImpl::ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& - lease_refresh_info, MonoTime time) { - impl_->ProcessLeaseUpdate(lease_refresh_info, time); +void PgClientServiceImpl::ProcessLeaseUpdate( + const master::RefreshYsqlLeaseInfoPB& lease_refresh_info) { + impl_->ProcessLeaseUpdate(lease_refresh_info); } YSQLLeaseInfo PgClientServiceImpl::GetYSQLLeaseInfo() const { diff --git a/src/yb/tserver/pg_client_service.h b/src/yb/tserver/pg_client_service.h index 203b73bc9ba8..db0cd857d13a 100644 --- a/src/yb/tserver/pg_client_service.h +++ b/src/yb/tserver/pg_client_service.h @@ -29,6 +29,7 @@ #include "yb/tserver/pg_client.service.h" #include "yb/tserver/pg_txn_snapshot_manager.h" +#include "yb/tserver/ysql_lease.h" namespace yb { @@ -96,7 +97,6 @@ class TserverXClusterContextIf; (CronGetLastMinute) \ (AcquireAdvisoryLock) \ (ReleaseAdvisoryLock) \ - (AcquireObjectLock) \ (ExportTxnSnapshot) \ (ImportTxnSnapshot) \ (ClearExportedTxnSnapshots) \ @@ -109,16 +109,12 @@ class TserverXClusterContextIf; // Forwards call to corresponding PgClientSession async method (see // PG_CLIENT_SESSION_ASYNC_METHODS). #define YB_PG_CLIENT_ASYNC_METHODS \ + (AcquireObjectLock) \ (OpenTable) \ (GetTableKeyRanges) \ /**/ -struct YSQLLeaseInfo { - bool is_live; - uint64_t lease_epoch; -}; - class PgClientServiceImpl : public PgClientServiceIf { public: explicit PgClientServiceImpl( @@ -141,7 +137,7 @@ class PgClientServiceImpl : public PgClientServiceIf { const std::unordered_set& db_oids_deleted); Result GetLocalPgTxnSnapshot(const PgTxnSnapshotLocalId& snapshot_id); - void ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& lease_refresh_info, MonoTime time); + void ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& lease_refresh_info); YSQLLeaseInfo GetYSQLLeaseInfo() const; size_t TEST_SessionsCount(); diff --git a/src/yb/tserver/pg_client_session.cc b/src/yb/tserver/pg_client_session.cc index 83ae39ac944c..5902c5d62a32 100644 --- a/src/yb/tserver/pg_client_session.cc +++ b/src/yb/tserver/pg_client_session.cc @@ -12,6 +12,7 @@ // #include "yb/tserver/pg_client_session.h" +#include #include #include @@ -42,8 +43,9 @@ #include "yb/common/common_util.h" #include "yb/common/ql_type.h" #include "yb/common/pgsql_error.h" -#include "yb/common/transaction_error.h" #include "yb/common/schema.h" +#include "yb/common/transaction_error.h" +#include "yb/common/transaction_priority.h" #include "yb/common/wire_protocol.h" #include "yb/rpc/lightweight_message.h" @@ -1080,13 +1082,14 @@ class TransactionProvider { } } - Result NextTxnIdForPlain(CoarseTimePoint deadline) { + Result NextTxnMetaForPlain( + CoarseTimePoint deadline, bool is_for_release = false) { + client::internal::InFlightOpsGroupsWithMetadata ops_info; if (!next_plain_) { auto txn = Build(deadline, {}); // Don't execute txn->GetMetadata() here since the transaction is not iniatialized with // its full metadata yet, like isolation level. Synchronizer synchronizer; - client::internal::InFlightOpsGroupsWithMetadata ops_info; if (txn->batcher_if().Prepare( &ops_info, client::ForceConsistentRead::kFalse, deadline, client::Initial::kFalse, synchronizer.AsStdStatusCallback())) { @@ -1095,7 +1098,20 @@ class TransactionProvider { RETURN_NOT_OK(synchronizer.Wait()); next_plain_.swap(txn); } - return next_plain_->id(); + // next_plain_ would be ready at this point i.e status tablet picked. + auto txn_meta_res = next_plain_->metadata(); + if (txn_meta_res.ok()) { + return txn_meta_res; + } + if (!is_for_release) { + return txn_meta_res.status(); + } + // If the transaction has already failed due to some reason, we should release the locks. + // And also reset next_plain_, so the subsequent ysql transaction would use a new docdb txn. + TransactionMetadata txn_meta_for_release; + txn_meta_for_release.transaction_id = next_plain_->id(); + next_plain_ = nullptr; + return txn_meta_for_release; } private: @@ -1195,7 +1211,7 @@ template Request AcquireRequestFor( const std::string& session_host_uuid, const TransactionId& txn_id, SubTransactionId subtxn_id, uint64_t database_id, uint64_t object_id, TableLockType lock_type, uint64_t lease_epoch, - ClockBase* clock, CoarseTimePoint deadline) { + ClockBase* clock, CoarseTimePoint deadline, const TabletId& status_tablet) { auto now = clock->Now(); Request req; if (const auto& wait_state = ash::WaitStateInfo::CurrentWaitState()) { @@ -1214,6 +1230,7 @@ Request AcquireRequestFor( lock->set_database_oid(database_id); lock->set_object_oid(object_id); lock->set_lock_type(lock_type); + req.set_status_tablet(status_tablet); return req; } @@ -1267,7 +1284,8 @@ void ReleaseWithRetries( // interval it can safely give up. The Master is responsible for cleaning up the locks for any // tserver that loses its lease. We have additional retries just to be safe. Also the timeout // used here defaults to 60s, which is much larger than the default lease interval of 15s. - auto timeout = MonoDelta::FromMilliseconds(FLAGS_tserver_yb_client_default_timeout_ms); + auto deadline = MonoTime::Now() + + MonoDelta::FromMilliseconds(FLAGS_tserver_yb_client_default_timeout_ms); if (!lease_validator.IsLeaseValid(release_req->lease_epoch())) { LOG(INFO) << "Lease epoch " << release_req->lease_epoch() << " is not valid. Will not retry " << " Release request " << (VLOG_IS_ON(2) ? release_req->ShortDebugString() : ""); @@ -1291,7 +1309,7 @@ void ReleaseWithRetries( ReleaseWithRetries(client, lease_validator, release_req, attempt + 1); } }, - timeout); + ToCoarse(deadline)); } } // namespace @@ -1321,9 +1339,9 @@ class PgClientSession::Impl { PgCreateTable helper(req); RETURN_NOT_OK(helper.Prepare()); - if (xcluster_context()) { - xcluster_context()->PrepareCreateTableHelper(req, helper); - } + if (xcluster_context()) { + xcluster_context()->PrepareCreateTableHelper(req, helper); + } const auto* metadata = VERIFY_RESULT(GetDdlTransactionMetadata( req.use_transaction(), req.use_regular_transaction_block(), context->GetClientDeadline())); @@ -2307,7 +2325,7 @@ class PgClientSession::Impl { return status; } - Status AcquireObjectLock( + Status DoAcquireObjectLock( const PgAcquireObjectLockRequestPB& req, PgAcquireObjectLockResponsePB* resp, rpc::RpcContext* context) { RSTATUS_DCHECK(IsObjectLockingEnabled(), IllegalState, "Table Locking feature not enabled."); @@ -2323,33 +2341,45 @@ class PgClientSession::Impl { if (setup_session_result.is_plain && setup_session_result.session_data.transaction) { RETURN_NOT_OK(setup_session_result.session_data.transaction->GetMetadata(deadline).get()); } - auto& txn_id = setup_session_result.session_data.transaction - ? setup_session_result.session_data.transaction->id() - : VERIFY_RESULT_REF(transaction_provider_.NextTxnIdForPlain(deadline)); + auto txn_meta_res = setup_session_result.session_data.transaction + ? setup_session_result.session_data.transaction->GetMetadata(deadline).get() + : transaction_provider_.NextTxnMetaForPlain(deadline); + RETURN_NOT_OK(txn_meta_res); const auto lock_type = static_cast(req.lock_type()); VLOG_WITH_PREFIX_AND_FUNC(1) - << "txn_id " << txn_id + << "txn_id " << txn_meta_res->transaction_id << " lock_type: " << AsString(lock_type) << " req: " << req.ShortDebugString(); + auto callback = MakeRpcOperationCompletionCallback( + std::move(*context), resp, nullptr /* clock */); if (IsTableLockTypeGlobal(lock_type)) { if (setup_session_result.is_plain) { plain_session_has_exclusive_object_locks_.store(true); } auto lock_req = AcquireRequestFor( - instance_uuid(), txn_id, options.active_sub_transaction_id(), req.database_oid(), - req.object_oid(), lock_type, lease_epoch_, context_.clock.get(), deadline); - auto status_future = MakeFuture([&](auto callback) { - client_.AcquireObjectLocksGlobalAsync( - lock_req, callback, - MonoDelta::FromMilliseconds(FLAGS_tserver_yb_client_default_timeout_ms)); - }); - return status_future.get(); + instance_uuid(), txn_meta_res->transaction_id, options.active_sub_transaction_id(), + req.database_oid(), req.object_oid(), lock_type, lease_epoch_, context_.clock.get(), + deadline, txn_meta_res->status_tablet); + client_.AcquireObjectLocksGlobalAsync(lock_req, std::move(callback), deadline); + return Status::OK(); } auto lock_req = AcquireRequestFor( - instance_uuid(), txn_id, options.active_sub_transaction_id(), req.database_oid(), - req.object_oid(), lock_type, lease_epoch_, context_.clock.get(), deadline); - return ts_lock_manager()->AcquireObjectLocks(lock_req, deadline); + instance_uuid(), txn_meta_res->transaction_id, options.active_sub_transaction_id(), + req.database_oid(), req.object_oid(), lock_type, lease_epoch_, context_.clock.get(), + deadline, txn_meta_res->status_tablet); + ts_lock_manager()->AcquireObjectLocksAsync(lock_req, deadline, std::move(callback)); + return Status::OK(); + } + + void AcquireObjectLock( + const PgAcquireObjectLockRequestPB& req, PgAcquireObjectLockResponsePB* resp, + yb::rpc::RpcContext context) { + auto s = DoAcquireObjectLock(req, resp, &context); + if (!s.ok()) { + StatusToPB(s, resp->mutable_status()); + context.RespondSuccess(); + } } void StartShutdown() { @@ -2429,7 +2459,7 @@ class PgClientSession::Impl { // If we failed to report the status of this DDL transaction, we can just log and ignore // it, as the poller in the YB-Master will figure out the status of this transaction using // the transaction status tablet and PG catalog. - ERROR_NOT_OK(client_.ReportYsqlDdlTxnStatus(*metadata, *commit), + WARN_NOT_OK(client_.ReportYsqlDdlTxnStatus(*metadata, *commit), Format("Sending ReportYsqlDdlTxnStatus call of $0 failed", *commit)); } @@ -2444,8 +2474,8 @@ class PgClientSession::Impl { // (commit.has_value() is false), the purpose is to use the side effect of // WaitForDdlVerificationToFinish to trigger the start of a background task to // complete the DDL transaction at the DocDB side. - ERROR_NOT_OK(client_.WaitForDdlVerificationToFinish(*metadata), - "WaitForDdlVerificationToFinish call failed"); + WARN_NOT_OK(client_.WaitForDdlVerificationToFinish(*metadata), + "WaitForDdlVerificationToFinish call failed"); } } } @@ -2654,7 +2684,8 @@ class PgClientSession::Impl { kind = PgClientSessionKind::kDdl; EnsureSession(kind, deadline); RETURN_NOT_OK(GetDdlTransactionMetadata( - true /* use_transaction */, false /* use_regular_transaction_block */, deadline)); + true /* use_transaction */, false /* use_regular_transaction_block */, deadline, + options.priority())); } else { DCHECK(kind == PgClientSessionKind::kPlain); auto& session = EnsureSession(kind, deadline); @@ -2694,6 +2725,9 @@ class PgClientSession::Impl { RSTATUS_DCHECK( is_plain_session, IllegalState, "Read time manipulation can't be specified for non kPlain sessions"); + RSTATUS_DCHECK( + !options.defer_read_point(), IllegalState, + "Cannot manipulate read time when read point needs to be deferred."); ProcessReadTimeManipulation( options.read_time_manipulation(), read_time_serial_no, ClampUncertaintyWindow(options.clamp_uncertainty_window())); @@ -2713,15 +2747,8 @@ class PgClientSession::Impl { RETURN_NOT_OK( UpdateReadPointForXClusterConsistentReads(options, deadline, session.read_point())); - if (options.defer_read_point()) { - // Deferring allows avoiding read restart errors in case of a READ ONLY transaction by setting - // the read point to the global limit (i.e., read time + max clock skew) and hence waiting out - // any ambiguity of data visibility that might arise from clock skew. - RSTATUS_DCHECK( - !txn, IllegalState, - "Deferring read point is only allowed in SERIALIZABLE DEFERRABLE READ ONLY, a distributed " - "transaction is unexpected here."); - + if (!options.ddl_mode() && !options.use_catalog_session() && options.defer_read_point()) { + // For DMLs, only fast path writes cannot be deferred. RETURN_NOT_OK(session.read_point()->TrySetDeferredCurrentReadTime()); } @@ -2880,8 +2907,10 @@ class PgClientSession::Impl { return Status::OK(); } + // All DDLs use kHighestPriority unless specified otherwise. Result GetDdlTransactionMetadata( - bool use_transaction, bool use_regular_transaction_block, CoarseTimePoint deadline) { + bool use_transaction, bool use_regular_transaction_block, CoarseTimePoint deadline, + uint64_t priority = kHighPriTxnUpperBound) { if (!use_transaction) { return nullptr; } @@ -2910,6 +2939,7 @@ class PgClientSession::Impl { ? IsolationLevel::SERIALIZABLE_ISOLATION : IsolationLevel::SNAPSHOT_ISOLATION; txn = transaction_provider_.Take(deadline); RETURN_NOT_OK(txn->Init(isolation)); + txn->SetPriority(priority); txn->SetLogPrefixTag(kTxnLogPrefixTag, id_); ddl_txn_metadata_ = VERIFY_RESULT(Copy(txn->GetMetadata(deadline).get())); EnsureSession(kSessionKind, deadline)->SetTransaction(txn); @@ -3132,7 +3162,7 @@ class PgClientSession::Impl { // collected. One way to fix this we need to add a periodic scan job in YB-Master to look // for any table/index that are involved in a DDL transaction and start a background task // to complete the DDL transaction at the DocDB side. - LOG(ERROR) << "DdlAtomicityFinishTransaction failed: " << status; + LOG(DFATAL) << "DdlAtomicityFinishTransaction failed: " << status; } return MergeStatus(std::move(commit_status), std::move(status)); } @@ -3173,9 +3203,12 @@ class PgClientSession::Impl { plain_session_has_exclusive_object_locks_.store(false); DEBUG_ONLY_TEST_SYNC_POINT("PlainTxnStateReset"); } + auto txn_meta_res = txn + ? txn->GetMetadata(deadline).get() + : transaction_provider_.NextTxnMetaForPlain(deadline, !subtxn_id); + RETURN_NOT_OK(txn_meta_res); return DoReleaseObjectLocks( - txn ? txn->id() : VERIFY_RESULT_REF(transaction_provider_.NextTxnIdForPlain(deadline)), - subtxn_id, deadline, has_exclusive_locks); + txn_meta_res->transaction_id, subtxn_id, deadline, has_exclusive_locks); } Status DoReleaseObjectLocks( diff --git a/src/yb/tserver/pg_client_session.h b/src/yb/tserver/pg_client_session.h index 5622ae767264..aaf6bd808d58 100644 --- a/src/yb/tserver/pg_client_session.h +++ b/src/yb/tserver/pg_client_session.h @@ -68,7 +68,6 @@ namespace tserver { (WaitForBackendsCatalogVersion) \ (AcquireAdvisoryLock) \ (ReleaseAdvisoryLock) \ - (AcquireObjectLock) \ /**/ // These methods may respond with Status::OK() and continue async processing (including network @@ -77,6 +76,7 @@ namespace tserver { // If such method responds with error Status, it will be handled by the upper layer that will fill // response with error status and call context.RespondSuccess. #define PG_CLIENT_SESSION_ASYNC_METHODS \ + (AcquireObjectLock) \ (GetTableKeyRanges) \ /**/ diff --git a/src/yb/tserver/pg_create_table.cc b/src/yb/tserver/pg_create_table.cc index cbb273ff2c73..32aaba329ebc 100644 --- a/src/yb/tserver/pg_create_table.cc +++ b/src/yb/tserver/pg_create_table.cc @@ -13,7 +13,11 @@ #include "yb/tserver/pg_create_table.h" +#include "yb/cdc/xcluster_types.h" + #include "yb/client/client.h" +#include "yb/client/schema.h" // YB_TODO(#12770): TO BE DELETED AFTER REWORKING + // PG-SCHEMA-NAME USAGE IN CDC #include "yb/client/table.h" #include "yb/client/table_creator.h" @@ -131,11 +135,15 @@ Status PgCreateTable::Exec( } schema_builder_.SetTableProperties(table_properties); } + + RETURN_NOT_OK(schema_builder_.Build(&schema)); + + // YB_TODO(#12770): TO BE DELETED AFTER REWORKING PG-SCHEMA-NAME USAGE IN CDC if (!req_.schema_name().empty()) { - schema_builder_.SetSchemaName(req_.schema_name()); + client::internal::GetSchema(&schema).SetSchemaName(req_.schema_name()); } + // \YB_TODO(#12770) - RETURN_NOT_OK(schema_builder_.Build(&schema)); const auto split_rows = VERIFY_RESULT(BuildSplitRows(schema)); // Create table. @@ -152,6 +160,10 @@ Status PgCreateTable::Exec( if (req_.is_shared_table()) { table_creator->is_pg_shared_table(); } + if (req_.schema_name() == "cron" && req_.table_name() == "job") { + table_creator->internal_table_type(master::InternalTableType::PG_CRON_JOB_TABLE); + } + if (hash_schema_) { table_creator->hash_schema(*hash_schema_); } else if (!req_.is_pg_catalog_table()) { @@ -201,6 +213,11 @@ Status PgCreateTable::Exec( } } + // If the table was created in the xCluster DDL replication extension. + if (req_.schema_name() == xcluster::kDDLQueuePgSchemaName) { + table_creator->internal_table_type(master::InternalTableType::XCLUSTER_DDL_REPLICATION_TABLE); + } + if (xcluster_source_table_id_.IsValid()) { table_creator->xcluster_source_table_id(xcluster_source_table_id_.GetYbTableId()); } @@ -415,7 +432,7 @@ Status CreateSequencesDataTable(client::YBClient* client, CoarseTimePoint deadli LOG(INFO) << "Table '" << table_name.ToString() << "' already exists"; } else { // If any other error, report that! - LOG(ERROR) << "Error creating table '" << table_name.ToString() << "': " << status; + LOG(WARNING) << "Error creating table '" << table_name.ToString() << "': " << status; return status; } return Status::OK(); diff --git a/src/yb/tserver/remote_bootstrap_client.cc b/src/yb/tserver/remote_bootstrap_client.cc index 2ac87c49625c..aed3a65542a7 100644 --- a/src/yb/tserver/remote_bootstrap_client.cc +++ b/src/yb/tserver/remote_bootstrap_client.cc @@ -644,19 +644,7 @@ Status RemoteBootstrapClient::DownloadRocksDBFiles() { } // To avoid adding new file type to remote bootstrap we move intents as subdir of regular DB. auto& env = this->env(); - auto children = VERIFY_RESULT(env.GetChildren(rocksdb_dir, ExcludeDots::kTrue)); - for (const auto& child : children) { - if (!child.starts_with(docdb::kVectorIndexDirPrefix) && child != tablet::kIntentsDirName) { - continue; - } - auto source_dir = JoinPathSegments(rocksdb_dir, child); - if (!env.DirExists(source_dir)) { - continue; - } - auto dest_dir = docdb::GetStorageDir(rocksdb_dir, child); - LOG_WITH_PREFIX(INFO) << "Moving " << source_dir << " => " << dest_dir; - RETURN_NOT_OK(env.RenameFile(source_dir, dest_dir)); - } + RETURN_NOT_OK(MoveChildren(env, rocksdb_dir, docdb::IncludeIntents::kTrue)); if (FLAGS_bytes_remote_bootstrap_durable_write_mb != 0) { // Persist directory so that recently downloaded files are accessible. RETURN_NOT_OK(env.SyncDir(rocksdb_dir)); diff --git a/src/yb/tserver/remote_bootstrap_file_downloader.cc b/src/yb/tserver/remote_bootstrap_file_downloader.cc index 5198e5720ac5..f9cfbc9ec132 100644 --- a/src/yb/tserver/remote_bootstrap_file_downloader.cc +++ b/src/yb/tserver/remote_bootstrap_file_downloader.cc @@ -113,8 +113,8 @@ Status RemoteBootstrapFileDownloader::DownloadFile( return Status::OK(); } // TODO fallback to copy. - LOG_WITH_PREFIX(ERROR) << "Failed to link file: " << file_path << " => " << it->second - << ": " << link_status; + LOG_WITH_PREFIX(WARNING) + << "Failed to link file: " << file_path << " => " << it->second << ": " << link_status; } } @@ -155,8 +155,8 @@ Status RemoteBootstrapFileDownloader::DownloadFile( static auto rate_updater = []() { auto remote_bootstrap_clients_started = RemoteClientBase::StartedClientsCount(); if (remote_bootstrap_clients_started < 1) { - YB_LOG_EVERY_N(ERROR, 100) << "Invalid number of remote bootstrap sessions: " - << remote_bootstrap_clients_started; + YB_LOG_EVERY_N(DFATAL, 100) << "Invalid number of remote bootstrap sessions: " + << remote_bootstrap_clients_started; return static_cast(FLAGS_remote_bootstrap_rate_limit_bytes_per_sec); } return static_cast( diff --git a/src/yb/tserver/remote_bootstrap_service.cc b/src/yb/tserver/remote_bootstrap_service.cc index 009961fff49f..c2037f0d0ed7 100644 --- a/src/yb/tserver/remote_bootstrap_service.cc +++ b/src/yb/tserver/remote_bootstrap_service.cc @@ -586,9 +586,10 @@ Status RemoteBootstrapServiceImpl::DoEndRemoteBootstrapSession( num_sessions_serving_data_->Decrement(); LOG_IF(DFATAL, nsessions_serving_data_.fetch_sub(1, std::memory_order_acq_rel) <= 0) << "found nsessions_serving_data_ <= 0 when updating rbs session " << session_id; - LOG(ERROR) << "Remote bootstrap session " << session_id << " on tablet " << session->tablet_id() - << " with peer " << session->requestor_uuid() << " failed. session_succeeded = " - << session_succeeded; + LOG(WARNING) + << "Remote bootstrap session " << session_id << " on tablet " << session->tablet_id() + << " with peer " << session->requestor_uuid() << " failed. session_succeeded = " + << session_succeeded; } return Status::OK(); diff --git a/src/yb/tserver/remote_bootstrap_session.cc b/src/yb/tserver/remote_bootstrap_session.cc index ea625e8b945e..7fcf63cb3184 100644 --- a/src/yb/tserver/remote_bootstrap_session.cc +++ b/src/yb/tserver/remote_bootstrap_session.cc @@ -656,9 +656,9 @@ void RemoteBootstrapSession::InitRateLimiter() { rate_limiter_.SetTargetRateUpdater([this]() -> uint64_t { DCHECK_GT(FLAGS_remote_bootstrap_rate_limit_bytes_per_sec, 0); if (FLAGS_remote_bootstrap_rate_limit_bytes_per_sec <= 0) { - YB_LOG_EVERY_N(ERROR, 1000) - << "Invalid value for remote_bootstrap_rate_limit_bytes_per_sec: " - << FLAGS_remote_bootstrap_rate_limit_bytes_per_sec; + YB_LOG_EVERY_N(WARNING, 1000) + << "Invalid value for remote_bootstrap_rate_limit_bytes_per_sec: " + << FLAGS_remote_bootstrap_rate_limit_bytes_per_sec; // Since the rate limiter is initialized, it's expected that the value of // FLAGS_remote_bootstrap_rate_limit_bytes_per_sec is greater than 0. Since this is not the // case, we'll log an error, and set the rate to 50 MB/s. diff --git a/src/yb/tserver/remote_bootstrap_snapshots.cc b/src/yb/tserver/remote_bootstrap_snapshots.cc index 77da53c000a4..e260b068a52e 100644 --- a/src/yb/tserver/remote_bootstrap_snapshots.cc +++ b/src/yb/tserver/remote_bootstrap_snapshots.cc @@ -151,13 +151,13 @@ Status RemoteBootstrapSnapshotsComponent::DownloadFileInto( // If we fail to fetch a snapshot file, delete the snapshot directory, log the error, // but don't fail the remote bootstrap as snapshot files are not needed for running // the tablet. - LOG(ERROR) << "Error downloading snapshot file " << file_path << ": " << s; + LOG(WARNING) << "Error downloading snapshot file " << file_path << ": " << s; failed_snapshot_ids->insert(file_pb.snapshot_id()); LOG(INFO) << "Deleting snapshot dir " << snapshot_dir; auto delete_status = Env::Default()->DeleteRecursively(snapshot_dir); if (!delete_status.ok()) { - LOG(ERROR) << "Error deleting corrupted snapshot directory " << snapshot_dir << ": " - << delete_status; + LOG(WARNING) << "Error deleting corrupted snapshot directory " << snapshot_dir << ": " + << delete_status; } } else { LOG(INFO) << "Downloaded file " << file_path << " for snapshot " << file_pb.snapshot_id(); diff --git a/src/yb/tserver/service_util.h b/src/yb/tserver/service_util.h index 1bf8d834cebc..964a52085617 100644 --- a/src/yb/tserver/service_util.h +++ b/src/yb/tserver/service_util.h @@ -83,11 +83,7 @@ Result CheckUuidMatch(TabletPeerLookupIf* tablet_manager, // Maintain compat in release mode, but complain. std::string msg = strings::Substitute("$0: Missing destination UUID in request from $1: $2", method_name, requestor_string, req->ShortDebugString()); -#ifdef NDEBUG - YB_LOG_EVERY_N(ERROR, 100) << msg; -#else - LOG(FATAL) << msg; -#endif + YB_LOG_EVERY_N(DFATAL, 100) << msg; return true; } if (PREDICT_FALSE(req->dest_uuid() != local_uuid)) { diff --git a/src/yb/tserver/stateful_services/pg_auto_analyze_service.cc b/src/yb/tserver/stateful_services/pg_auto_analyze_service.cc index 9ef65a9e3dd7..4e005ed02a3c 100644 --- a/src/yb/tserver/stateful_services/pg_auto_analyze_service.cc +++ b/src/yb/tserver/stateful_services/pg_auto_analyze_service.cc @@ -410,6 +410,7 @@ Result, std::vector>> VLOG_WITH_FUNC(1) << "Deleted or renamed " << dbname << "/" << namespace_id << ", skipping"; continue; } + if (!conn_result) { VLOG_WITH_FUNC(1) << "Conn failed: " << conn_result.status(); return conn_result.status(); @@ -422,6 +423,9 @@ Result, std::vector>> continue; } + auto s = conn.Execute("SET yb_use_internal_auto_analyze_service_conn=true"); + RETURN_NOT_OK(s); + // Construct ANALYZE statement and RUN ANALYZE. // Try to analyze all tables in batches to minimize the number of catalog version increments. // More catalog version increments lead to a higher number of PG cache refreshes on all PG @@ -477,6 +481,7 @@ Result, std::vector>> // Need to refresh name cache because the cached table name is outdated. refresh_name_cache_ = true; } else { + // TODO: Fix this, else branch doesn't imply that the table was deleted. VLOG(1) << "Table " << table_name << " was deleted"; // Need to remove deleted table entries from the YCQL service table. deleted_tables.push_back(table_id); diff --git a/src/yb/tserver/stateful_services/stateful_service_base.cc b/src/yb/tserver/stateful_services/stateful_service_base.cc index 2f3dcb63c149..cbe6016f2cd7 100644 --- a/src/yb/tserver/stateful_services/stateful_service_base.cc +++ b/src/yb/tserver/stateful_services/stateful_service_base.cc @@ -324,7 +324,7 @@ void StatefulServiceBase::StartPeriodicTaskIfNeeded() { std::bind(&StatefulServiceBase::ProcessTaskPeriodically, this)); if (!s.ok()) { task_enqueued_ = false; - LOG(ERROR) << "Failed to schedule " << ServiceName() << " periodic task :" << s; + LOG(WARNING) << "Failed to schedule " << ServiceName() << " periodic task :" << s; } } diff --git a/src/yb/tserver/tablet_server.cc b/src/yb/tserver/tablet_server.cc index ad4544aa0845..8028076d30d3 100644 --- a/src/yb/tserver/tablet_server.cc +++ b/src/yb/tserver/tablet_server.cc @@ -367,12 +367,6 @@ TabletServer::TabletServer(const TabletServerOptions& opts) std::make_unique>(); ysql_db_catalog_version_index_used_->fill(false); } - if (opts.server_type == TabletServerOptions::kServerType && - PREDICT_FALSE(FLAGS_TEST_enable_object_locking_for_table_locks)) { - ts_local_lock_manager_ = std::make_shared(clock_, this); - } else { - ts_local_lock_manager_ = nullptr; - } LOG(INFO) << "yb::tserver::TabletServer created at " << this; LOG(INFO) << "yb::tserver::TSTabletManager created at " << tablet_manager_.get(); } @@ -740,6 +734,8 @@ Status TabletServer::Start() { RETURN_NOT_OK(heartbeater_->Start()); + StartTSLocalLockManager(); + if (FLAGS_tserver_enable_metrics_snapshotter) { RETURN_NOT_OK(metrics_snapshotter_->Start()); } @@ -785,6 +781,10 @@ void TabletServer::Shutdown() { "Failed to stop table mutation count sender thread"); } + if (auto local_lock_manager = ts_local_lock_manager(); local_lock_manager) { + local_lock_manager->Shutdown(); + } + client()->RequestAbortAllRpcs(); tablet_manager_->StartShutdown(); @@ -795,9 +795,24 @@ void TabletServer::Shutdown() { } tserver::TSLocalLockManagerPtr TabletServer::ResetAndGetTSLocalLockManager() { - std::lock_guard l(lock_); - ts_local_lock_manager_ = std::make_shared(clock_, this); - return ts_local_lock_manager_; + ts_local_lock_manager()->Shutdown(); + { + std::lock_guard l(lock_); + ts_local_lock_manager_.reset(); + } + StartTSLocalLockManager(); + return ts_local_lock_manager(); +} + +void TabletServer::StartTSLocalLockManager() { + if (opts_.server_type == TabletServerOptions::kServerType && + PREDICT_FALSE(FLAGS_TEST_enable_object_locking_for_table_locks)) { + std::lock_guard l(lock_); + ts_local_lock_manager_ = std::make_shared( + clock_, this /* TabletServerIf* */, *this /* RpcServerBase& */, + tablet_manager_->waiting_txn_pool()); + ts_local_lock_manager_->Start(tablet_manager_->waiting_txn_registry()); + } } bool TabletServer::HasBootstrappedLocalLockManager() const { @@ -805,11 +820,10 @@ bool TabletServer::HasBootstrappedLocalLockManager() const { return lock_manager && lock_manager->IsBootstrapped(); } -Status TabletServer::ProcessLeaseUpdate( - const master::RefreshYsqlLeaseInfoPB& lease_refresh_info, MonoTime time) { +Status TabletServer::ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& lease_refresh_info) { VLOG(2) << __func__; auto lock_manager = ts_local_lock_manager(); - if (lease_refresh_info.has_ddl_lock_entries() && lock_manager) { + if (lease_refresh_info.new_lease() && lock_manager) { if (lock_manager->IsBootstrapped()) { // Reset the local lock manager to bootstrap from the given DDL lock entries. lock_manager = ResetAndGetTSLocalLockManager(); @@ -821,13 +835,13 @@ Status TabletServer::ProcessLeaseUpdate( // having it the other way around, and having an old-session that is not reset. auto pg_client_service = pg_client_service_.lock(); if (pg_client_service) { - pg_client_service->impl.ProcessLeaseUpdate(lease_refresh_info, time); + pg_client_service->impl.ProcessLeaseUpdate(lease_refresh_info); } return Status::OK(); } -Result TabletServer::GetYSQLLeaseInfo() const { +Result TabletServer::GetYSQLLeaseInfo() const { if (!IsYsqlLeaseEnabled()) { return STATUS(NotSupported, "YSQL lease is not enabled"); } @@ -835,13 +849,7 @@ Result TabletServer::GetYSQLLeaseInfo() const { if (!pg_client_service) { RSTATUS_DCHECK(pg_client_service, InternalError, "Unable to get pg_client_service"); } - auto lease_info = pg_client_service->impl.GetYSQLLeaseInfo(); - GetYSQLLeaseInfoResponsePB resp; - resp.set_is_live(lease_info.is_live); - if (lease_info.is_live) { - resp.set_lease_epoch(lease_info.lease_epoch); - } - return resp; + return pg_client_service->impl.GetYSQLLeaseInfo(); } Status TabletServer::RestartPG() const { @@ -851,6 +859,13 @@ Status TabletServer::RestartPG() const { return STATUS(IllegalState, "PG restarter callback not registered, cannot restart PG"); } +Status TabletServer::KillPg() const { + if (pg_killer_) { + return pg_killer_(); + } + return STATUS(IllegalState, "Pg killer callback not registered, cannot restart PG"); +} + bool TabletServer::IsYsqlLeaseEnabled() { return GetAtomicFlag(&FLAGS_TEST_enable_object_locking_for_table_locks) || GetAtomicFlag(&FLAGS_enable_ysql_operation_lease); @@ -1052,7 +1067,7 @@ Status TabletServer::GetTserverCatalogMessageLists( uint64_t expected_version = ysql_catalog_version + 1; std::set current_versions; for (const auto& info : messages_vec) { - DCHECK(current_versions.insert(info.first).second); + CHECK(current_versions.insert(info.first).second); if (info.first <= ysql_catalog_version) { continue; } @@ -1150,26 +1165,29 @@ Status TabletServer::SetTserverCatalogMessageList( return Status::OK(); } db_message_lists = &it2->second.queue; - // If the queue is empty, of the new_catalog_version is larger than the last version in - // the queue (the queue is sorted in catalog version), just append the new pair. - if (db_message_lists->empty() || db_message_lists->rbegin()->first < new_catalog_version) { - db_message_lists->emplace_back(std::make_pair(new_catalog_version, message_list)); - return Status::OK(); - } - // The queue isn't empty, insert the new pair to the right position. Because db_message_lists - // is sorted, we can use std::upper_bound with a custom comparator function to find the right - // insertion point. - auto comp = [](uint64_t current_version, - const std::pair>& p) { - return current_version < p.first; + // Insert the new pair to the right position. Because db_message_lists is sorted, we can use + // std::lower_bound with a custom comparator function to find the right insertion point. + auto comp = [](const std::pair>& p, + uint64_t current_version) { + return p.first < current_version; }; - auto it3 = std::upper_bound(db_message_lists->begin(), db_message_lists->end(), + auto it3 = std::lower_bound(db_message_lists->begin(), db_message_lists->end(), new_catalog_version, comp); - if (it3 != db_message_lists->end()) { - db_message_lists->insert(it3, std::make_pair(new_catalog_version, message_list)); - } else { - // We can reach here if the last version in the queue is the same as new_catalog_version. + if (it3 == db_message_lists->end()) { + // This means that either the queue is empty, or the new_catalog_version is larger than + // the last version in the queue (the queue is sorted in catalog version). + VLOG(2) << "appending new version: " << new_catalog_version; + db_message_lists->emplace_back(std::make_pair(new_catalog_version, message_list)); + } else { + // std::lower_bound: returns an iterator pointing to the first element in the range + // that is not less than (i.e., greater than or equal to) new_catalog_version. + if (it3->first > new_catalog_version) { + VLOG(2) << "inserting new version: " << new_catalog_version; + db_message_lists->insert(it3, std::make_pair(new_catalog_version, message_list)); + } else { + VLOG(2) << "found existing version: " << new_catalog_version; + } } return Status::OK(); } @@ -1346,8 +1364,8 @@ void TabletServer::SetYsqlDBCatalogVersions( ++count; } if (shm_index == -1) { - YB_LOG_EVERY_N_SECS(ERROR, 60) << "Cannot find free db_catalog_versions_ slot, db_oid: " - << db_oid; + YB_LOG_EVERY_N_SECS(WARNING, 60) + << "Cannot find free db_catalog_versions_ slot, db_oid: " << db_oid; continue; } // update the newly inserted entry to have the allocated slot. @@ -1518,6 +1536,7 @@ void TabletServer::MergeInvalMessagesIntoQueueUnlocked( int start_index, int end_index) { DCHECK_LT(start_index, end_index); + VLOG(2) << "merging inval messages for db: " << db_oid; auto it = ysql_db_invalidation_messages_map_.find(db_oid); if (it == ysql_db_invalidation_messages_map_.end()) { // The db_oid does not exist in ysql_db_invalidation_messages_map_ yet. This is possible @@ -1573,6 +1592,7 @@ void TabletServer::DoMergeInvalMessagesIntoQueueUnlocked( // Compare the incoming version with the current existing one. if (incoming_version == existing_version) { + VLOG(2) << "found existing version " << incoming_version; if (incoming_message_list != it->second) { // same version should have same message. LOG(DFATAL) << "message_list mismatch: " << existing_version; @@ -1582,6 +1602,7 @@ void TabletServer::DoMergeInvalMessagesIntoQueueUnlocked( ++start_index; } else if (incoming_version < existing_version) { // The incoming version is lower, insert it before it. + VLOG(2) << "inserting version " << incoming_version; it = db_message_lists->insert(it, std::make_pair(incoming_version, incoming_message_list)); // After insertion, it points to the newly inserted incoming version, advance it to the // original existing version. @@ -1591,6 +1612,8 @@ void TabletServer::DoMergeInvalMessagesIntoQueueUnlocked( } else { // The incoming version is higher, move it to the next existing slot in the queue. // Keep start_index unchanged so that it can be compared with the next slot in the queue. + VLOG(2) << "existing version: " << existing_version + << ", higher incoming version: " << incoming_version; ++it; } } @@ -1601,8 +1624,10 @@ void TabletServer::DoMergeInvalMessagesIntoQueueUnlocked( const uint64_t current_version = db_inval_messages.current_version(); const std::optional& message_list = db_inval_messages.has_message_list() ? std::optional(db_inval_messages.message_list()) : std::nullopt; + VLOG(2) << "appending version " << current_version; db_message_lists->emplace_back(std::make_pair(current_version, message_list)); } + VLOG(2) << "queue size: " << db_message_lists->size(); // We may have added more messages to the queue that exceeded the max size. while (db_message_lists->size() > FLAGS_ysql_max_invalidation_message_queue_size) { db_message_lists->pop_front(); @@ -2070,6 +2095,10 @@ void TabletServer::RegisterPgProcessRestarter(std::function restar pg_restarter_ = std::move(restarter); } +void TabletServer::RegisterPgProcessKiller(std::function killer) { + pg_killer_ = std::move(killer); +} + Status TabletServer::StartYSQLLeaseRefresher() { return ysql_lease_poller_->Start(); } diff --git a/src/yb/tserver/tablet_server.h b/src/yb/tserver/tablet_server.h index bf6d5969416a..e93d249b0f23 100644 --- a/src/yb/tserver/tablet_server.h +++ b/src/yb/tserver/tablet_server.h @@ -207,10 +207,10 @@ class TabletServer : public DbServerBase, public TabletServerIf { ConcurrentPointerReference SharedObject() override { return shared_object(); } Status PopulateLiveTServers(const master::TSHeartbeatResponsePB& heartbeat_resp) EXCLUDES(lock_); - Status ProcessLeaseUpdate( - const master::RefreshYsqlLeaseInfoPB& lease_refresh_info, MonoTime time); - Result GetYSQLLeaseInfo() const override; + Status ProcessLeaseUpdate(const master::RefreshYsqlLeaseInfoPB& lease_refresh_info); + Result GetYSQLLeaseInfo() const override; Status RestartPG() const override; + Status KillPg() const override; static bool IsYsqlLeaseEnabled(); tserver::TSLocalLockManagerPtr ResetAndGetTSLocalLockManager() EXCLUDES(lock_); @@ -371,6 +371,8 @@ class TabletServer : public DbServerBase, public TabletServerIf { void RegisterPgProcessRestarter(std::function restarter) override; + void RegisterPgProcessKiller(std::function killer) override; + Status StartYSQLLeaseRefresher(); TserverXClusterContextIf& GetXClusterContext() const; @@ -459,6 +461,8 @@ class TabletServer : public DbServerBase, public TabletServerIf { Result CreateInternalPGConn( const std::string& database_name, const std::optional& deadline) override; + void StartTSLocalLockManager() EXCLUDES (lock_); + std::atomic initted_{false}; // If true, all heartbeats will be seen as failed. @@ -623,6 +627,7 @@ class TabletServer : public DbServerBase, public TabletServerIf { std::unique_ptr secure_context_; std::vector certificate_reloaders_; std::function pg_restarter_; + std::function pg_killer_; // xCluster consumer. mutable std::mutex xcluster_consumer_mutex_; diff --git a/src/yb/tserver/tablet_server_interface.h b/src/yb/tserver/tablet_server_interface.h index 8e5da0622fc9..1b1b5030414f 100644 --- a/src/yb/tserver/tablet_server_interface.h +++ b/src/yb/tserver/tablet_server_interface.h @@ -32,6 +32,7 @@ #include "yb/tserver/tserver_fwd.h" #include "yb/tserver/tserver_util_fwd.h" #include "yb/tserver/local_tablet_server.h" +#include "yb/tserver/ysql_lease.h" #include "yb/util/concurrent_value.h" @@ -147,9 +148,11 @@ class TabletServerIf : public LocalTabletServer { virtual void SetYsqlDBCatalogVersions( const tserver::DBCatalogVersionDataPB& db_catalog_version_data) = 0; - virtual Result GetYSQLLeaseInfo() const = 0; + virtual Result GetYSQLLeaseInfo() const = 0; virtual Status RestartPG() const = 0; + + virtual Status KillPg() const = 0; }; } // namespace tserver diff --git a/src/yb/tserver/tablet_service.cc b/src/yb/tserver/tablet_service.cc index f64066086962..e1d020240daa 100644 --- a/src/yb/tserver/tablet_service.cc +++ b/src/yb/tserver/tablet_service.cc @@ -107,6 +107,7 @@ #include "yb/tserver/tserver_xcluster_context_if.h" #include "yb/tserver/xcluster_safe_time_map.h" #include "yb/tserver/ysql_advisory_lock_table.h" +#include "yb/tserver/ysql_lease.h" #include "yb/util/async_util.h" #include "yb/util/backoff_waiter.h" @@ -231,6 +232,10 @@ DEFINE_UNKNOWN_bool(enable_ysql, true, "specified or can be auto-detected). Also each tablet server will start a PostgreSQL " "server as a child process."); +DEFINE_RUNTIME_bool(ysql_allow_duplicating_repeatable_read_queries, yb::kIsDebug, + "Response with success when duplicate write request is detected, " + "if case this request contains read time."); + DECLARE_int32(ysql_transaction_abort_timeout_ms); DECLARE_bool(ysql_yb_disable_wait_for_backends_catalog_version); @@ -505,15 +510,14 @@ class WriteQueryCompletionCallback { SCOPED_WAIT_STATUS(OnCpu_Active); VLOG(1) << __PRETTY_FUNCTION__ << " completing with status " << status; // When we don't need to return any data, we could return success on duplicate request. - if (status.IsAlreadyPresent() && - query_->ql_write_ops()->empty() && - query_->pgsql_write_ops()->empty() && - query_->client_request()->redis_write_batch().empty()) { + if (status.IsAlreadyPresent() && AllowDuplicateRequest()) { status = Status::OK(); } TRACE("Write completing with status $0", yb::ToString(status)); + CopyMetricsToPgsqlResponse(); + if (!status.ok()) { if (leader_term_set_in_request_ && status.IsAborted() && status.message().Contains("Operation submitted in term")) { @@ -556,6 +560,26 @@ class WriteQueryCompletionCallback { return response_->mutable_error(); } + void CopyMetricsToPgsqlResponse() const { + auto tablet_metrics = query_->scoped_tablet_metrics(); + auto statistics = query_->scoped_statistics(); + + if (auto* resp = query_->GetPgsqlResponseForMetricsCapture()) { + tablet_metrics.CopyToPgsqlResponse(resp); + statistics.CopyToPgsqlResponse(resp); + } + } + + bool AllowDuplicateRequest() const { + if (!query_->ql_write_ops()->empty() || + !query_->client_request()->redis_write_batch().empty()) { + return false; + } + return query_->pgsql_write_ops()->empty() || + (FLAGS_ysql_allow_duplicating_repeatable_read_queries && + query_->client_request()->has_read_time()); + } + tablet::TabletPeerPtr tablet_peer_; const std::shared_ptr context_; WriteResponsePB* const response_; @@ -802,7 +826,7 @@ void TabletServiceAdminImpl::BackfillIndex( tablet.tablet->SafeTime(tablet::RequireLease::kFalse, read_at, deadline); DVLOG(1) << "Got safe time " << safe_time.ToString(); if (!safe_time.ok()) { - LOG(ERROR) << "Could not get a good enough safe time " << safe_time.ToString(); + LOG(WARNING) << "Could not get a good enough safe time " << safe_time.ToString(); SetupErrorAndRespond(resp->mutable_error(), safe_time.status(), &context); return; } @@ -1040,11 +1064,11 @@ void TabletServiceAdminImpl::AlterSchema(const tablet::ChangeMetadataRequestPB* schema_version = tablet.peer->tablet_metadata()->schema_version( req->has_alter_table_id() ? req->alter_table_id() : ""); if (schema_version == req->schema_version()) { - LOG(ERROR) << "The current schema does not match the request schema." - << " version=" << schema_version - << " current-schema=" << tablet_schema.ToString() - << " request-schema=" << req_schema.ToString() - << " (corruption)"; + LOG(DFATAL) << "The current schema does not match the request schema." + << " version=" << schema_version + << " current-schema=" << tablet_schema.ToString() + << " request-schema=" << req_schema.ToString() + << " (corruption)"; SetupErrorAndRespond(resp->mutable_error(), STATUS(Corruption, "got a different schema for the same version number"), TabletServerErrorPB::MISMATCHED_SCHEMA, &context); @@ -1054,11 +1078,11 @@ void TabletServiceAdminImpl::AlterSchema(const tablet::ChangeMetadataRequestPB* // If the current schema is newer than the one in the request reject the request. if (schema_version > req->schema_version()) { - LOG(ERROR) << "Tablet " << req->tablet_id() << " has a newer schema" - << " version=" << schema_version - << " req->schema_version()=" << req->schema_version() - << "\n current-schema=" << tablet_schema.ToString() - << "\n request-schema=" << req_schema.ToString(); + LOG(WARNING) << "Tablet " << req->tablet_id() << " has a newer schema" + << " version=" << schema_version + << " req->schema_version()=" << req->schema_version() + << "\n current-schema=" << tablet_schema.ToString() + << "\n request-schema=" << req_schema.ToString(); SetupErrorAndRespond( resp->mutable_error(), STATUS_SUBSTITUTE( @@ -1093,15 +1117,17 @@ void TabletServiceAdminImpl::AlterSchema(const tablet::ChangeMetadataRequestPB* return; } + auto skip_aborting_active_transactions = + FLAGS_TEST_enable_object_locking_for_table_locks || + FLAGS_TEST_skip_aborting_active_transactions_during_schema_change; // After write operation is paused, active transactions will be aborted for YSQL transactions. if (tablet.tablet->table_type() == TableType::PGSQL_TABLE_TYPE && - req->should_abort_active_txns() && - !FLAGS_TEST_skip_aborting_active_transactions_during_schema_change) { + req->should_abort_active_txns() && !skip_aborting_active_transactions) { DCHECK(req->has_transaction_id()); if (tablet.tablet->transaction_participant() == nullptr) { auto status = STATUS( IllegalState, "Transaction participant is null for tablet " + req->tablet_id()); - LOG(ERROR) << status; + LOG(DFATAL) << status; SetupErrorAndRespond( resp->mutable_error(), status, @@ -1223,7 +1249,7 @@ void TabletServiceImpl::VerifyTableRowRange( const auto safe_time = tablet->SafeTime(tablet::RequireLease::kFalse, read_at, deadline); DVLOG(1) << "Got safe time " << safe_time.ToString(); if (!safe_time.ok()) { - LOG(ERROR) << "Could not get a good enough safe time " << safe_time.ToString(); + LOG(DFATAL) << "Could not get a good enough safe time " << safe_time.ToString(); SetupErrorAndRespond(resp->mutable_error(), safe_time.status(), &context); return; } @@ -2042,7 +2068,7 @@ void TabletServiceAdminImpl::FlushTablets(const FlushTabletsRequestPB* req, case FlushTabletsRequestPB::LOG_GC: for (const auto& tablet : tablet_peers) { resp->set_failed_tablet_id(tablet->tablet_id()); - RETURN_UNKNOWN_ERROR_IF_NOT_OK(tablet->RunLogGC(), resp, &context); + RETURN_UNKNOWN_ERROR_IF_NOT_OK(tablet->RunLogGC(req->rollover()), resp, &context); resp->clear_failed_tablet_id(); } break; @@ -2391,7 +2417,7 @@ void TabletServiceAdminImpl::WaitForYsqlBackendsCatalogVersion( server_->GetSharedMemoryPostgresAuthKey(), modified_deadline) .Connect(); if (!res.ok()) { - LOG_WITH_PREFIX_AND_FUNC(ERROR) << "failed to connect to local postgres: " << res.status(); + LOG_WITH_PREFIX_AND_FUNC(WARNING) << "failed to connect to local postgres: " << res.status(); SetupErrorAndRespond(resp->mutable_error(), res.status(), &context); return; } @@ -2438,7 +2464,7 @@ void TabletServiceAdminImpl::WaitForYsqlBackendsCatalogVersion( LOG_WITH_PREFIX(INFO) << "Deadline reached: still waiting on " << num_lagging_backends << " backends " << db_ver_tag; } else if (!s.ok()) { - LOG_WITH_PREFIX_AND_FUNC(ERROR) << "num lagging backends query failed: " << s; + LOG_WITH_PREFIX_AND_FUNC(WARNING) << "num lagging backends query failed: " << s; SetupErrorAndRespond(resp->mutable_error(), s, &context); return; } @@ -3645,13 +3671,10 @@ void TabletServiceImpl::AcquireObjectLocks( SetupErrorAndRespond( resp->mutable_error(), STATUS(IllegalState, "TSLocalLockManager not found..."), &context); } - auto s = ts_local_lock_manager->AcquireObjectLocks(*req, context.GetClientDeadline()); - resp->set_propagated_hybrid_time(server_->Clock()->Now().ToUint64()); - if (!s.ok()) { - SetupErrorAndRespond(resp->mutable_error(), s, &context); - } else { - context.RespondSuccess(); - } + const auto deadline = context.GetClientDeadline(); + ts_local_lock_manager->AcquireObjectLocksAsync( + *req, deadline, + MakeRpcOperationCompletionCallback(std::move(context), resp, server_->Clock())); } void TabletServiceImpl::ReleaseObjectLocks( @@ -3684,7 +3707,13 @@ void TabletServiceImpl::ReleaseObjectLocks( Result TabletServiceImpl::GetYSQLLeaseInfo( const GetYSQLLeaseInfoRequestPB& req, CoarseTimePoint deadline) { - return server_->GetYSQLLeaseInfo(); + auto lease_info = VERIFY_RESULT(server_->GetYSQLLeaseInfo()); + GetYSQLLeaseInfoResponsePB resp; + resp.set_is_live(lease_info.is_live); + if (lease_info.is_live) { + resp.set_lease_epoch(lease_info.lease_epoch); + } + return resp; } void TabletServiceImpl::AdminExecutePgsql( diff --git a/src/yb/tserver/tablet_validator.cc b/src/yb/tserver/tablet_validator.cc index 4402f8645a48..35cdaf7982d1 100644 --- a/src/yb/tserver/tablet_validator.cc +++ b/src/yb/tserver/tablet_validator.cc @@ -226,7 +226,8 @@ void TabletMetadataValidator::Impl::DoValidate() { auto sync_result = SyncWithMaster(); if (!sync_result.ok()) { - LOG_WITH_PREFIX(ERROR) << "Failed to sync with the master, status: " << sync_result.status(); + LOG_WITH_PREFIX(WARNING) + << "Failed to sync with the master, status: " << sync_result.status(); break; } @@ -240,8 +241,8 @@ bool TabletMetadataValidator::Impl::HandleMasterResponse( VLOG_WITH_PREFIX_AND_FUNC(2) << "response: " << response.ShortDebugString(); if (response.has_error()) { - LOG_WITH_PREFIX(ERROR) << "Failed to get backfilling status from the master, " - << "error: " << response.error().ShortDebugString(); + LOG_WITH_PREFIX(WARNING) << "Failed to get backfilling status from the master, " + << "error: " << response.error().ShortDebugString(); return false; // Will try during next period. } @@ -531,7 +532,7 @@ void TabletMetadataValidator::Impl::TriggerMetadataUpdate( // has been changed from the last flush (some operation has been applied), but this cannot be // guaranteed as no raft opeation is used for retain_delete_markers recovery. auto status = tablet_meta->Flush(); - LOG_IF_WITH_PREFIX(ERROR, !status.ok()) + LOG_IF_WITH_PREFIX(WARNING, !status.ok()) << "Tablet " << index_tablet_id << " metadata flush failed: " << status; if (status.ok()) { diff --git a/src/yb/tserver/ts_local_lock_manager-test.cc b/src/yb/tserver/ts_local_lock_manager-test.cc index fc06ae63a7e5..775a163c7859 100644 --- a/src/yb/tserver/ts_local_lock_manager-test.cc +++ b/src/yb/tserver/ts_local_lock_manager-test.cc @@ -35,12 +35,22 @@ #include "yb/util/test_util.h" #include "yb/util/tsan_util.h" +DECLARE_bool(TEST_enable_object_locking_for_table_locks); +DECLARE_bool(TEST_assert_olm_empty_locks_map); +DECLARE_bool(TEST_olm_skip_scheduling_waiter_resumption); +DECLARE_bool(TEST_olm_skip_sending_wait_for_probes); + using namespace std::literals; +using yb::docdb::IntentTypeSetAdd; +using yb::docdb::LockState; using yb::docdb::ObjectLockOwner; +using yb::docdb::ObjectLockPrefix; namespace yb::tserver { +using LockStateMap = std::unordered_map; + auto kTxn1 = ObjectLockOwner{TransactionId::GenerateRandom(), 1}; auto kTxn2 = ObjectLockOwner{TransactionId::GenerateRandom(), 1}; @@ -50,27 +60,20 @@ constexpr auto kObject1 = 1; class TSLocalLockManagerTest : public TabletServerTestBase { protected: - TSLocalLockManagerTest() { - auto ts = TabletServerTestBase::CreateMiniTabletServer(); - CHECK_OK(ts); - mini_server_.reset(ts->release()); - lm_ = std::make_unique( - new server::HybridClock(), mini_server_->server()); - lm_->TEST_MarkBootstrapped(); - } - - std::unique_ptr mini_server_; - std::unique_ptr lm_; - void SetUp() override { - YBTest::SetUp(); - ASSERT_OK(lm_->clock()->Init()); + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_enable_object_locking_for_table_locks) = true; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_assert_olm_empty_locks_map) = true; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_olm_skip_sending_wait_for_probes) = true; + TabletServerTestBase::SetUp(); + StartTabletServer(); + lm_ = CHECK_NOTNULL(mini_server_->server()->ts_local_lock_manager()); + lm_->TEST_MarkBootstrapped(); } Status LockObjects( const ObjectLockOwner& owner, uint64_t database_id, const std::vector& object_ids, const std::vector& lock_types, - CoarseTimePoint deadline = CoarseTimePoint::max()) { + CoarseTimePoint deadline = CoarseTimePoint::max(), LockStateMap* state_map = nullptr) { SCHECK_EQ(object_ids.size(), lock_types.size(), IllegalState, "Expected equal sizes"); tserver::AcquireObjectLockRequestPB req; owner.PopulateLockRequest(&req); @@ -80,27 +83,38 @@ class TSLocalLockManagerTest : public TabletServerTestBase { lock->set_object_oid(object_ids[i]); lock->set_lock_type(lock_types[i]); } - return lm_->AcquireObjectLocks(req, deadline); + req.set_propagated_hybrid_time(MonoTime::Now().ToUint64()); + Synchronizer synchronizer; + lm_->AcquireObjectLocksAsync(req, deadline, synchronizer.AsStdStatusCallback()); + RETURN_NOT_OK(synchronizer.Wait()); + if (!state_map) { + return Status::OK(); + } + auto res = VERIFY_RESULT(DetermineObjectsToLock(req.object_locks())); + for (auto& lock_batch_entry : res.lock_batch) { + (*state_map)[lock_batch_entry.key] += IntentTypeSetAdd(lock_batch_entry.intent_types); + } + return Status::OK(); } Status LockObject( const ObjectLockOwner& owner, uint64_t database_id, uint64_t object_id, - TableLockType lock_type, CoarseTimePoint deadline = CoarseTimePoint::max()) { - return LockObjects(owner, database_id, {object_id}, {lock_type}, deadline); + TableLockType lock_type, CoarseTimePoint deadline = CoarseTimePoint::max(), + LockStateMap* state_map = nullptr) { + return LockObjects(owner, database_id, {object_id}, {lock_type}, deadline, state_map); } - Status ReleaseObjectLock( + Status ReleaseLocksForSubtxn( const ObjectLockOwner& owner, CoarseTimePoint deadline = CoarseTimePoint::max()) { tserver::ReleaseObjectLockRequestPB req; - owner.PopulateLockRequest(&req); + owner.PopulateReleaseRequest(&req, false /* release all locks */); return lm_->ReleaseObjectLocks(req, deadline); } Status ReleaseAllLocksForTxn( const ObjectLockOwner& owner, CoarseTimePoint deadline = CoarseTimePoint::max()) { tserver::ReleaseObjectLockRequestPB req; - req.set_txn_id(owner.txn_id.data(), owner.txn_id.size()); - req.set_subtxn_id(owner.subtxn_id); + owner.PopulateReleaseRequest(&req); return lm_->ReleaseObjectLocks(req, deadline); } @@ -111,6 +125,8 @@ class TSLocalLockManagerTest : public TabletServerTestBase { size_t WaitingLocksSize() const { return lm_->TEST_WaitingLocksSize(); } + + std::shared_ptr lm_; }; TEST_F(TSLocalLockManagerTest, TestLockAndRelease) { @@ -119,7 +135,7 @@ TEST_F(TSLocalLockManagerTest, TestLockAndRelease) { ASSERT_GE(GrantedLocksSize(), 1); ASSERT_EQ(WaitingLocksSize(), 0); - ASSERT_OK(ReleaseObjectLock(kTxn1)); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn1)); ASSERT_EQ(GrantedLocksSize(), 0); ASSERT_EQ(WaitingLocksSize(), 0); } @@ -175,7 +191,7 @@ TEST_F(TSLocalLockManagerTest, TestWaitersAndBlocker) { ASSERT_GE(WaitingLocksSize(), 1); for (int i = 0; i < kNumReaders; i++) { - ASSERT_OK(ReleaseObjectLock(reader_txns[i])); + ASSERT_OK(ReleaseAllLocksForTxn(reader_txns[i])); if (i + 1 < kNumReaders) { ASSERT_NE(status_future.wait_for(0s), std::future_status::ready); } @@ -192,12 +208,12 @@ TEST_F(TSLocalLockManagerTest, TestWaitersAndBlocker) { } ASSERT_EQ(waiters_blocked.count(), 5); - ASSERT_OK(ReleaseObjectLock(kTxn1)); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn1)); ASSERT_TRUE(waiters_blocked.WaitFor(2s * kTimeMultiplier)); ASSERT_EQ(GrantedLocksSize(), kNumReaders); thread_holder.WaitAndStop(2s * kTimeMultiplier); for (int i = 0; i < kNumReaders; i++) { - ASSERT_OK(ReleaseObjectLock(reader_txns[i])); + ASSERT_OK(ReleaseAllLocksForTxn(reader_txns[i])); } } @@ -214,6 +230,7 @@ TEST_F(TSLocalLockManagerTest, TestSessionIgnoresLockConflictWithSelf) { // {1, kWeakObjectLock} // {1, kStrongObjectLock} ASSERT_EQ(GrantedLocksSize(), 2); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn1)); } // The below test asserts that the lock manager signals the corresponding condition variable on @@ -227,8 +244,9 @@ TEST_F(TSLocalLockManagerTest, TestWaitersSignaledOnEveryRelease) { }); ASSERT_NE(status_future.wait_for(1s * kTimeMultiplier), std::future_status::ready); - ASSERT_OK(ReleaseObjectLock(kTxn2)); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn2)); ASSERT_OK(status_future.get()); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn1)); } #ifndef NDEBUG @@ -250,7 +268,7 @@ TEST_F(TSLocalLockManagerTest, TestFailedLockRpcSemantics) { ASSERT_EQ(GrantedLocksSize(), 4); SyncPoint::GetInstance()->LoadDependency({ - {"ObjectLockedBatchEntry::Lock", "TestFailedLockRpcSemantics"}}); + {"ObjectLockManagerImpl::DoLockSingleEntry", "TestFailedLockRpcSemantics"}}); SyncPoint::GetInstance()->ClearTrace(); SyncPoint::GetInstance()->EnableProcessing(); @@ -272,13 +290,14 @@ TEST_F(TSLocalLockManagerTest, TestFailedLockRpcSemantics) { ASSERT_OK(ReleaseAllLocksForTxn(kTxn1)); ASSERT_EQ(GrantedLocksSize(), 1); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn2)); } TEST_F(TSLocalLockManagerTest, TestReleaseWaitingLocks) { ASSERT_OK(LockObject(kTxn1, kDatabase1, kObject1, TableLockType::ACCESS_SHARE)); SyncPoint::GetInstance()->LoadDependency( - {{"ObjectLockedBatchEntry::Lock", "TestReleaseWaitingLocks"}}); + {{"ObjectLockManagerImpl::DoLockSingleEntry", "TestReleaseWaitingLocks"}}); SyncPoint::GetInstance()->ClearTrace(); SyncPoint::GetInstance()->EnableProcessing(); @@ -293,6 +312,8 @@ TEST_F(TSLocalLockManagerTest, TestReleaseWaitingLocks) { ASSERT_TRUE(status_future.valid()); ASSERT_OK(ReleaseAllLocksForTxn(kTxn2)); ASSERT_NOK(status_future.get()); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn2)); + ASSERT_OK(ReleaseAllLocksForTxn(kTxn1)); } #endif // NDEBUG @@ -321,8 +342,9 @@ TEST_F(TSLocalLockManagerTest, TestDowngradeDespiteExclusiveLockWaiter) { docdb::DocDBTableLocksConflictMatrixTest::ObjectLocksConflict(entries1, entries2)); if (is_conflicting) { - SyncPoint::GetInstance()->LoadDependency( - {{"ObjectLockedBatchEntry::Lock", "TestDowngradeDespiteExclusiveLockWaiter"}}); + SyncPoint::GetInstance()->LoadDependency({ + {"ObjectLockManagerImpl::DoLockSingleEntry", + "TestDowngradeDespiteExclusiveLockWaiter"}}); SyncPoint::GetInstance()->ClearTrace(); SyncPoint::GetInstance()->EnableProcessing(); @@ -357,4 +379,74 @@ TEST_F(TSLocalLockManagerTest, TestDowngradeDespiteExclusiveLockWaiter) { } } +TEST_F(TSLocalLockManagerTest, TestSanity) { + const auto kNumConns = 30; + const auto kNumbObjects = 5; + TestThreadHolder thread_holder; + for (int i = 0; i < kNumConns; i++) { + thread_holder.AddThreadFunctor([&, &stop = thread_holder.stop_flag()]() { + LockStateMap state_map; + auto owner = ObjectLockOwner{TransactionId::GenerateRandom(), 1}; + while (!stop) { + LockStateMap prev = state_map; + auto deadline = CoarseMonoClock::Now() + 2s; + unsigned int seed = SeedRandom(); + auto lock_type = TableLockType((rand_r(&seed) % TableLockType_MAX) + 1); + auto failed = false; + while(!failed && rand_r(&seed) % 3) { + failed = !LockObject( + owner, kDatabase1, rand_r(&seed) % kNumbObjects, lock_type, deadline, + &state_map).ok(); + } + if (failed) { + ASSERT_OK(ReleaseLocksForSubtxn(owner, deadline)); + state_map = prev; + } + owner.subtxn_id++; + auto actual_state_map = lm_->TEST_GetLockStateMapForTxn(owner.txn_id); + for (auto& [key, state] : state_map) { + auto it = actual_state_map.find(key); + ASSERT_TRUE(it != actual_state_map.end()); + ASSERT_EQ(it->second, state); + actual_state_map.erase(it); + } + for (auto& [_, state] : actual_state_map) { + ASSERT_EQ(state, 0); + } + if (rand_r(&seed) % 3) { + ASSERT_OK(ReleaseAllLocksForTxn(owner, deadline)); + state_map.clear(); + owner.subtxn_id = 1; + } + } + ASSERT_OK(ReleaseAllLocksForTxn(owner, CoarseTimePoint::max())); + }); + } + thread_holder.WaitAndStop(45s); +} + +#ifndef NDEBUG +TEST_F(TSLocalLockManagerTest, TestWaiterResetsStateDuringShutdown) { + ASSERT_OK(LockObject(kTxn1, kDatabase1, kObject1, TableLockType::ACCESS_SHARE)); + + SyncPoint::GetInstance()->LoadDependency( + {{"ObjectLockManagerImpl::DoLockSingleEntry", "TestWaiterResetsStateDuringShutdown"}}); + SyncPoint::GetInstance()->ClearTrace(); + SyncPoint::GetInstance()->EnableProcessing(); + + auto status_future = std::async(std::launch::async, [&]() { + return LockObject( + kTxn2, kDatabase1, kObject1, TableLockType::ACCESS_EXCLUSIVE, CoarseMonoClock::Now() + 10s); + }); + DEBUG_ONLY_TEST_SYNC_POINT("TestWaiterResetsStateDuringShutdown"); + + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_olm_skip_scheduling_waiter_resumption) = true; + ASSERT_OK(ReleaseAllLocksForTxn(kTxn1, CoarseTimePoint::max())); + mini_server_->Shutdown(); + auto status = status_future.get(); + ASSERT_NOK(status); + ASSERT_STR_CONTAINS(status.ToString(), "Object Lock Manager shutting down"); +} +#endif + } // namespace yb::tserver diff --git a/src/yb/tserver/ts_local_lock_manager.cc b/src/yb/tserver/ts_local_lock_manager.cc index 737e5ca887b4..8657a6e8f4d9 100644 --- a/src/yb/tserver/ts_local_lock_manager.cc +++ b/src/yb/tserver/ts_local_lock_manager.cc @@ -18,6 +18,11 @@ #include "yb/docdb/docdb.h" #include "yb/docdb/docdb_fwd.h" #include "yb/docdb/object_lock_manager.h" + +#include "yb/rpc/messenger.h" +#include "yb/rpc/poller.h" + +#include "yb/server/server_base.h" #include "yb/util/backoff_waiter.h" #include "yb/util/monotime.h" #include "yb/util/scope_exit.h" @@ -27,11 +32,22 @@ using namespace std::literals; DECLARE_bool(dump_lock_keys); +DEFINE_RUNTIME_int64(olm_poll_interval_ms, 100, + "Poll interval for Object lock Manager. Waiting requests that are unblocked by other release " + "requests are independent of this interval since the release schedules unblocking of potential " + "waiters. Yet this might help release timedout requests soon and also avoid probable issues " + "with the signaling mechanism if any."); + namespace yb::tserver { class TSLocalLockManager::Impl { public: - Impl(const server::ClockPtr& clock, TabletServerIf* server) : clock_(clock), server_(server) {} + Impl( + const server::ClockPtr& clock, TabletServerIf* tablet_server, + server::RpcServerBase& messenger_server, ThreadPool* thread_pool) + : clock_(clock), server_(tablet_server), messenger_base_(messenger_server), + object_lock_manager_(thread_pool, messenger_server), + poller_("TSLocalLockManager", std::bind(&Impl::Poll, this)) {} ~Impl() = default; @@ -56,10 +72,34 @@ class TSLocalLockManager::Impl { return Status::OK(); } + Status CheckShutdown() { + return shutdown_ + ? STATUS_FORMAT(ShutdownInProgress, "Object Lock Manager Shutdown") : Status::OK(); + } + Status AcquireObjectLocks( const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, WaitForBootstrap wait) { + Synchronizer synchronizer; + DoAcquireObjectLocksAsync( + req, deadline, synchronizer.AsStdStatusCallback(), tserver::WaitForBootstrap::kFalse); + return synchronizer.Wait(); + } + + void DoAcquireObjectLocksAsync( + const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, + StdStatusCallback&& callback, WaitForBootstrap wait) { + auto s = PrepareAndExecuteAcquire(req, deadline, callback, wait); + if (!s.ok()) { + callback(s); + } + } + + Status PrepareAndExecuteAcquire( + const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, + StdStatusCallback& callback, WaitForBootstrap wait) { TRACE_FUNC(); + RETURN_NOT_OK(CheckShutdown()); auto txn = VERIFY_RESULT(FullyDecodeTransactionId(req.txn_id())); docdb::ObjectLockOwner object_lock_owner(txn, req.subtxn_id()); VLOG(3) << object_lock_owner.ToString() << " Acquiring lock : " << req.ShortDebugString(); @@ -80,17 +120,14 @@ class TSLocalLockManager::Impl { UpdateLeaseEpochIfNecessary(req.session_host_uuid(), req.lease_epoch()); auto keys_to_lock = VERIFY_RESULT(DetermineObjectsToLock(req.object_locks())); - if (object_lock_manager_.Lock(keys_to_lock.lock_batch, deadline, object_lock_owner)) { - TRACE("Successfully obtained object locks."); - return Status::OK(); - } - TRACE("Could not get the object locks."); - std::string batch_str; - if (FLAGS_dump_lock_keys) { - batch_str = Format(", batch: $0", keys_to_lock.lock_batch); - } - return STATUS_FORMAT( - TryAgain, "Failed to obtain object locks until deadline: $0$1", deadline, batch_str); + object_lock_manager_.Lock(docdb::LockData { + .key_to_lock = std::move(keys_to_lock), + .deadline = deadline, + .object_lock_owner = std::move(object_lock_owner), + .status_tablet = req.status_tablet(), + .start_time = MonoTime::FromUint64(req.propagated_hybrid_time()), + .callback = std::move(callback)}); + return Status::OK(); } Status WaitToApplyIfNecessary( @@ -129,6 +166,7 @@ class TSLocalLockManager::Impl { Status ReleaseObjectLocks( const tserver::ReleaseObjectLockRequestPB& req, CoarseTimePoint deadline) { + RETURN_NOT_OK(CheckShutdown()); auto txn = VERIFY_RESULT(FullyDecodeTransactionId(req.txn_id())); docdb::ObjectLockOwner object_lock_owner(txn, req.subtxn_id()); VLOG(3) << object_lock_owner.ToString() @@ -142,10 +180,37 @@ class TSLocalLockManager::Impl { if (req.has_db_catalog_version_data()) { server_->SetYsqlDBCatalogVersions(req.db_catalog_version_data()); } - object_lock_manager_.Unlock(object_lock_owner); + Status abort_status = req.has_abort_status() && req.abort_status().code() != AppStatusPB::OK + ? StatusFromPB(req.abort_status()) + : Status::OK(); + object_lock_manager_.Unlock(object_lock_owner, abort_status); return Status::OK(); } + void Poll() { + object_lock_manager_.Poll(); + } + + void Start(docdb::LocalWaitingTxnRegistry* waiting_txn_registry) { + object_lock_manager_.Start(waiting_txn_registry); + poller_.Start( + &messenger_base_.messenger()->scheduler(), 1ms * FLAGS_olm_poll_interval_ms); + } + + void Shutdown() { + shutdown_ = true; + poller_.Shutdown(); + { + yb::UniqueLock lock(mutex_); + while (!txns_in_progress_.empty()) { + WaitOnConditionVariableUntil(&cv_, &lock, CoarseMonoClock::Now() + 5s); + LOG_WITH_FUNC(WARNING) + << Format("Waiting for $0 in progress requests at the OLM", txns_in_progress_.size()); + } + } + object_lock_manager_.Shutdown(); + } + void UpdateLeaseEpochIfNecessary(const std::string& uuid, uint64_t lease_epoch) EXCLUDES(mutex_) { TRACE_FUNC(); std::lock_guard lock(mutex_); @@ -172,8 +237,8 @@ class TSLocalLockManager::Impl { yb::UniqueLock lock(mutex_); while (txns_in_progress_.find(txn_id) != txns_in_progress_.end()) { if (deadline <= CoarseMonoClock::Now()) { - LOG(ERROR) << "Failed to add txn " << txn_id << " to in progress txns until deadline: " - << ToString(deadline); + LOG(WARNING) << "Failed to add txn " << txn_id << " to in progress txns until deadline: " + << ToString(deadline); TRACE("Failed to add by deadline."); return STATUS_FORMAT( TryAgain, "Failed to add txn $0 to in progress txns until deadline: $1", txn_id, @@ -216,6 +281,11 @@ class TSLocalLockManager::Impl { return object_lock_manager_.TEST_WaitingLocksSize(); } + std::unordered_map + TEST_GetLockStateMapForTxn(const TransactionId& txn) const { + return object_lock_manager_.TEST_GetLockStateMapForTxn(txn); + } + void DumpLocksToHtml(std::ostream& out) { object_lock_manager_.DumpStatusHtml(out); } @@ -238,7 +308,6 @@ class TSLocalLockManager::Impl { private: const server::ClockPtr clock_; - docdb::ObjectLockManager object_lock_manager_; std::atomic_bool is_bootstrapped_{false}; std::unordered_map max_seen_lease_epoch_ GUARDED_BY(mutex_); std::unordered_set txns_in_progress_ GUARDED_BY(mutex_); @@ -246,29 +315,24 @@ class TSLocalLockManager::Impl { using LockType = std::mutex; LockType mutex_; TabletServerIf* server_; + server::RpcServerBase& messenger_base_; + docdb::ObjectLockManager object_lock_manager_; + std::atomic shutdown_{false}; + rpc::Poller poller_; }; -TSLocalLockManager::TSLocalLockManager(const server::ClockPtr& clock, TabletServerIf* server) - : impl_(new Impl(clock, server)) {} +TSLocalLockManager::TSLocalLockManager( + const server::ClockPtr& clock, TabletServerIf* tablet_server, + server::RpcServerBase& messenger_server, ThreadPool* thread_pool) + : impl_(new Impl( + clock, CHECK_NOTNULL(tablet_server), messenger_server, CHECK_NOTNULL(thread_pool))) {} TSLocalLockManager::~TSLocalLockManager() {} -Status TSLocalLockManager::AcquireObjectLocks( +void TSLocalLockManager::AcquireObjectLocksAsync( const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, - WaitForBootstrap wait) { - if (VLOG_IS_ON(4)) { - std::stringstream output; - impl_->DumpLocksToHtml(output); - VLOG(4) << "Dumping current state Before acquire : " << output.str(); - } - auto ret = impl_->AcquireObjectLocks(req, deadline, wait); - if (VLOG_IS_ON(3)) { - std::stringstream output; - impl_->DumpLocksToHtml(output); - VLOG(3) << "Acquire " << (ret.ok() ? "succeded" : "failed") - << ". Dumping current state After acquire : " << output.str(); - } - return ret; + StdStatusCallback&& callback, WaitForBootstrap wait) { + impl_->DoAcquireObjectLocksAsync(req, deadline, std::move(callback), wait); } Status TSLocalLockManager::ReleaseObjectLocks( @@ -287,6 +351,15 @@ Status TSLocalLockManager::ReleaseObjectLocks( return ret; } +void TSLocalLockManager::Start( + docdb::LocalWaitingTxnRegistry* waiting_txn_registry) { + return impl_->Start(waiting_txn_registry); +} + +void TSLocalLockManager::Shutdown() { + impl_->Shutdown(); +} + void TSLocalLockManager::DumpLocksToHtml(std::ostream& out) { return impl_->DumpLocksToHtml(out); } @@ -299,6 +372,10 @@ bool TSLocalLockManager::IsBootstrapped() const { return impl_->IsBootstrapped(); } +server::ClockPtr TSLocalLockManager::clock() const { + return impl_->clock(); +} + size_t TSLocalLockManager::TEST_WaitingLocksSize() const { return impl_->TEST_WaitingLocksSize(); } @@ -311,8 +388,9 @@ void TSLocalLockManager::TEST_MarkBootstrapped() { impl_->MarkBootstrapped(); } -server::ClockPtr TSLocalLockManager::clock() const { - return impl_->clock(); +std::unordered_map + TSLocalLockManager::TEST_GetLockStateMapForTxn(const TransactionId& txn) const { + return impl_->TEST_GetLockStateMapForTxn(txn); } } // namespace yb::tserver diff --git a/src/yb/tserver/ts_local_lock_manager.h b/src/yb/tserver/ts_local_lock_manager.h index dfde6765cfec..f225f342df88 100644 --- a/src/yb/tserver/ts_local_lock_manager.h +++ b/src/yb/tserver/ts_local_lock_manager.h @@ -18,15 +18,21 @@ #include #include "yb/common/common_fwd.h" -#include "yb/common/transaction.pb.h" -#include "yb/docdb/object_lock_manager.h" -#include "yb/dockv/value_type.h" +#include "yb/common/transaction.h" + #include "yb/server/clock.h" +#include "yb/server/server_fwd.h" + #include "yb/tserver/tablet_server_interface.h" #include "yb/tserver/tserver.pb.h" -#include "yb/util/status.h" -namespace yb::tserver { +#include "yb/util/status_callback.h" + +namespace yb { + +class ThreadPool; + +namespace tserver { YB_STRONGLY_TYPED_BOOL(WaitForBootstrap); @@ -49,7 +55,9 @@ YB_STRONGLY_TYPED_BOOL(WaitForBootstrap); // it with all exisitng DDL (global) locks. class TSLocalLockManager { public: - TSLocalLockManager(const server::ClockPtr& clock, TabletServerIf* server); + TSLocalLockManager( + const server::ClockPtr& clock, TabletServerIf* tablet_server, + server::RpcServerBase& messenger_server, ThreadPool* thread_pool); ~TSLocalLockManager(); // Tries acquiring object locks with the specified modes and registers them against the given @@ -62,16 +70,10 @@ class TSLocalLockManager { // conflicting lock types on a key given that there aren't other txns with active conflciting // locks on the key. // - // Continuous influx of readers can starve writers. For instance, if there are multiple txns - // requesting ACCESS_SHARE on a key, a writer requesting ACCESS_EXCLUSIVE may face starvation. - // Since we intend to use this for table locks, DDLs may face starvation if there is influx of - // conflicting DMLs. - // TODO: DDLs don't face starvation in PG. Address the above starvation problem. - // // TODO: Augment the 'pg_locks' path to show the acquired/waiting object/table level locks. - Status AcquireObjectLocks( + void AcquireObjectLocksAsync( const tserver::AcquireObjectLockRequestPB& req, CoarseTimePoint deadline, - WaitForBootstrap wait = WaitForBootstrap::kTrue); + StdStatusCallback&& callback, WaitForBootstrap wait = WaitForBootstrap::kTrue); // When subtxn id is set, releases all locks tagged against . Else releases all // object locks owned by . @@ -80,19 +82,29 @@ class TSLocalLockManager { // lock modes on a key multiple times, and will unlock them all with a single unlock rpc. Status ReleaseObjectLocks( const tserver::ReleaseObjectLockRequestPB& req, CoarseTimePoint deadline); + + void Start(docdb::LocalWaitingTxnRegistry* waiting_txn_registry); + + void Shutdown(); + void DumpLocksToHtml(std::ostream& out); Status BootstrapDdlObjectLocks(const tserver::DdlLockEntriesPB& resp); bool IsBootstrapped() const; + + server::ClockPtr clock() const; + size_t TEST_GrantedLocksSize() const; size_t TEST_WaitingLocksSize() const; void TEST_MarkBootstrapped(); - server::ClockPtr clock() const; + std::unordered_map + TEST_GetLockStateMapForTxn(const TransactionId& txn) const; private: class Impl; std::unique_ptr impl_; }; -} // namespace yb::tserver +} // namespace tserver +} // namespace yb diff --git a/src/yb/tserver/ts_tablet_manager.cc b/src/yb/tserver/ts_tablet_manager.cc index 0f70ddcd6619..b0b5cff57f0f 100644 --- a/src/yb/tserver/ts_tablet_manager.cc +++ b/src/yb/tserver/ts_tablet_manager.cc @@ -1998,7 +1998,7 @@ void TSTabletManager::OpenTablet(const RaftGroupMetadataPtr& meta, auto s = ConsensusMetadata::Load( meta->fs_manager(), tablet_id, meta->fs_manager()->uuid(), &cmeta); if (!s.ok()) { - LOG(ERROR) << kLogPrefix << "Tablet failed to load consensus meta data: " << s; + LOG(DFATAL) << kLogPrefix << "Tablet failed to load consensus meta data: " << s; tablet_peer->SetFailed(s); return; } @@ -2010,7 +2010,7 @@ void TSTabletManager::OpenTablet(const RaftGroupMetadataPtr& meta, tablet_id, fs_manager_, meta->wal_dir()); s = bootstrap_state_manager->Init(); if(!s.ok()) { - LOG(ERROR) << kLogPrefix << "Tablet failed to init bootstrap state manager: " << s; + LOG(DFATAL) << kLogPrefix << "Tablet failed to init bootstrap state manager: " << s; tablet_peer->SetFailed(s); return; } @@ -2042,7 +2042,7 @@ void TSTabletManager::OpenTablet(const RaftGroupMetadataPtr& meta, if (GetAtomicFlag(&FLAGS_TEST_force_single_tablet_failure) && CompareAndSetFlag(&FLAGS_TEST_force_single_tablet_failure, true /* expected */, false /* val */)) { - LOG(ERROR) << "Setting the state of a tablet to FAILED"; + LOG(WARNING) << "Setting the state of a tablet to FAILED"; tablet_peer->SetFailed(STATUS(InternalError, "Setting tablet to failed state for test", tablet_id)); return; @@ -2052,7 +2052,7 @@ void TSTabletManager::OpenTablet(const RaftGroupMetadataPtr& meta, // partially created tablet here? s = tablet_peer->SetBootstrapping(); if (!s.ok()) { - LOG(ERROR) << kLogPrefix << "Tablet failed to set bootstrapping: " << s; + LOG(DFATAL) << kLogPrefix << "Tablet failed to set bootstrapping: " << s; tablet_peer->SetFailed(s); return; } @@ -2156,8 +2156,7 @@ void TSTabletManager::OpenTablet(const RaftGroupMetadataPtr& meta, flush_bootstrap_state_pool()); if (!s.ok()) { - LOG(ERROR) << kLogPrefix << "Tablet failed to init: " - << s.ToString(); + LOG(DFATAL) << kLogPrefix << "Tablet failed to init: " << s.ToString(); tablet_peer->SetFailed(s); return; } @@ -2168,8 +2167,7 @@ void TSTabletManager::OpenTablet(const RaftGroupMetadataPtr& meta, TRACE("Starting tablet peer"); s = tablet_peer->Start(bootstrap_info); if (!s.ok()) { - LOG(ERROR) << kLogPrefix << "Tablet failed to start: " - << s.ToString(); + LOG(DFATAL) << kLogPrefix << "Tablet failed to start: " << s; tablet_peer->SetFailed(s); return; } diff --git a/src/yb/tserver/tserver.proto b/src/yb/tserver/tserver.proto index 5c876733513d..3d2f9156ac3a 100644 --- a/src/yb/tserver/tserver.proto +++ b/src/yb/tserver/tserver.proto @@ -429,6 +429,7 @@ message AcquireObjectLockRequestPB { optional fixed64 ignore_after_hybrid_time = 6; optional fixed64 propagated_hybrid_time = 7; optional AshMetadataPB ash_metadata = 8; + optional bytes status_tablet = 9; } message AcquireObjectLockResponsePB { @@ -467,6 +468,7 @@ message ReleaseObjectLockRequestPB { // Used for tracking in-progress DDL unlock requests at the master. optional uint64 request_id = 8; optional AshMetadataPB ash_metadata = 9; + optional AppStatusPB abort_status = 10; } message ReleaseObjectLockResponsePB { diff --git a/src/yb/tserver/tserver_admin.proto b/src/yb/tserver/tserver_admin.proto index 22eb0a954233..2aef3caa8b0e 100644 --- a/src/yb/tserver/tserver_admin.proto +++ b/src/yb/tserver/tserver_admin.proto @@ -257,6 +257,9 @@ message FlushTabletsRequestPB { // Whether we want to perform operation for all vector indexes for the specified tablets. optional bool all_vector_indexes = 8; + + // Whether to rollover log before LOG_GC. + optional bool rollover = 9; } message FlushTabletsResponsePB { diff --git a/src/yb/tserver/xcluster_consumer.cc b/src/yb/tserver/xcluster_consumer.cc index b7ec84842d4f..e0d8932ae115 100644 --- a/src/yb/tserver/xcluster_consumer.cc +++ b/src/yb/tserver/xcluster_consumer.cc @@ -525,9 +525,9 @@ void XClusterConsumer::TriggerPollForNewTablets() { // it. consumer_namespace_name = ""; } else { - LOG(ERROR) << "Malformed sequences_data alias table ID: " << consumer_table_id - << "; skipping creation of a poller for a tablet belonging to that table: " - << consumer_tablet_info.tablet_id; + LOG(DFATAL) << "Malformed sequences_data alias table ID: " << consumer_table_id + << "; skipping creation of a poller for a tablet belonging to that table: " + << consumer_tablet_info.tablet_id; continue; } } else { diff --git a/src/yb/tserver/xcluster_ddl_queue_handler.cc b/src/yb/tserver/xcluster_ddl_queue_handler.cc index f864d6327a0e..9e5fce490b8a 100644 --- a/src/yb/tserver/xcluster_ddl_queue_handler.cc +++ b/src/yb/tserver/xcluster_ddl_queue_handler.cc @@ -35,6 +35,9 @@ DEFINE_RUNTIME_int32(xcluster_ddl_queue_max_retries_per_ddl, 5, "Maximum number of retries per DDL before we pause processing of the ddl_queue table."); +DEFINE_RUNTIME_uint32(xcluster_ddl_queue_statement_timeout_ms, 0, + "Statement timeout to use for executing DDLs from the ddl_queue table. 0 means no timeout."); + DEFINE_test_flag(bool, xcluster_ddl_queue_handler_cache_connection, true, "Whether we should cache the ddl_queue handler's connection, or always recreate it."); @@ -405,6 +408,9 @@ Status XClusterDDLQueueHandler::ProcessDDLQuery(const DDLQueryInfo& query_info) setup_query << "SET yb_test_fail_next_ddl TO true;"; } + setup_query << Format( + "SET statement_timeout TO $0;", FLAGS_xcluster_ddl_queue_statement_timeout_ms); + RETURN_NOT_OK(RunAndLogQuery(setup_query.str())); RETURN_NOT_OK(ProcessFailedDDLQuery(RunAndLogQuery(query_info.query), query_info)); RETURN_NOT_OK( @@ -431,8 +437,8 @@ Status XClusterDDLQueueHandler::ProcessFailedDDLQuery( if (last_failed_query_ && last_failed_query_->MatchesQueryInfo(query_info)) { num_fails_for_this_ddl_++; if (num_fails_for_this_ddl_ >= FLAGS_xcluster_ddl_queue_max_retries_per_ddl) { - LOG_WITH_PREFIX(ERROR) << "Failed to process DDL after " << num_fails_for_this_ddl_ - << " retries. Pausing DDL replication."; + LOG_WITH_PREFIX(WARNING) << "Failed to process DDL after " << num_fails_for_this_ddl_ + << " retries. Pausing DDL replication."; } } else { last_failed_query_ = QueryIdentifier{query_info.ddl_end_time, query_info.query_id}; diff --git a/src/yb/tserver/xcluster_output_client.cc b/src/yb/tserver/xcluster_output_client.cc index 36bf927d04ab..96f8ba7f97f1 100644 --- a/src/yb/tserver/xcluster_output_client.cc +++ b/src/yb/tserver/xcluster_output_client.cc @@ -838,8 +838,8 @@ void XClusterOutputClient::HandleError(const Status& s) { LOG_WITH_PREFIX(WARNING) << "Retrying applying replicated record for consumer tablet: " << consumer_tablet_info_.tablet_id << ", reason: " << s; } else { - LOG_WITH_PREFIX(ERROR) << "Error while applying replicated record: " << s - << ", consumer tablet: " << consumer_tablet_info_.tablet_id; + LOG_WITH_PREFIX(WARNING) << "Error while applying replicated record: " << s + << ", consumer tablet: " << consumer_tablet_info_.tablet_id; } { ACQUIRE_MUTEX_IF_ONLINE_ELSE_RETURN; diff --git a/src/yb/tserver/xcluster_poller.cc b/src/yb/tserver/xcluster_poller.cc index 0aab3922d5a0..1caec37615ea 100644 --- a/src/yb/tserver/xcluster_poller.cc +++ b/src/yb/tserver/xcluster_poller.cc @@ -650,7 +650,7 @@ bool XClusterPoller::IsStuck() const { const auto lag = MonoTime::Now() - last_task_schedule_time_; if (lag > 1s * GetAtomicFlag(&FLAGS_xcluster_poller_task_delay_considered_stuck_secs)) { - LOG_WITH_PREFIX(ERROR) << "XCluster Poller has not executed any tasks for " << lag.ToString(); + LOG_WITH_PREFIX(DFATAL) << "XCluster Poller has not executed any tasks for " << lag.ToString(); return true; } return false; diff --git a/src/yb/tserver/ysql_lease.h b/src/yb/tserver/ysql_lease.h new file mode 100644 index 000000000000..bc4e1e51750e --- /dev/null +++ b/src/yb/tserver/ysql_lease.h @@ -0,0 +1,24 @@ +// Copyright (c) YugabyteDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations +// under the License. +// +#pragma once + +#include + +namespace yb::tserver { + +struct YSQLLeaseInfo { + bool is_live; + uint64_t lease_epoch; +}; + +} diff --git a/src/yb/tserver/ysql_lease_poller.cc b/src/yb/tserver/ysql_lease_poller.cc index 3cc244b4f98f..83d71cd15700 100644 --- a/src/yb/tserver/ysql_lease_poller.cc +++ b/src/yb/tserver/ysql_lease_poller.cc @@ -22,8 +22,9 @@ #include "yb/server/server_base.proxy.h" -#include "yb/tserver/tablet_server.h" #include "yb/tserver/master_leader_poller.h" +#include "yb/tserver/tablet_server.h" +#include "yb/tserver/ysql_lease.h" #include "yb/util/async_util.h" #include "yb/util/condition_variable.h" @@ -122,18 +123,23 @@ Status YsqlLeasePoller::Poll() { master::RefreshYsqlLeaseRequestPB req; *req.mutable_instance() = server_.instance_pb(); - req.set_needs_bootstrap(!server_.HasBootstrappedLocalLockManager()); + auto current_lease_info = VERIFY_RESULT(server_.GetYSQLLeaseInfo()); + if (current_lease_info.is_live) { + req.set_current_lease_epoch(current_lease_info.lease_epoch); + } + req.set_local_request_send_time_ms(std::chrono::duration_cast( + CoarseMonoClock::now().time_since_epoch()) + .count()); rpc::RpcController rpc; rpc.set_timeout(timeout); master::RefreshYsqlLeaseResponsePB resp; - MonoTime pre_request_time = MonoTime::Now(); RETURN_NOT_OK(proxy_->RefreshYsqlLease(req, &resp, &rpc)); if (RandomActWithProbability( GetAtomicFlag(&FLAGS_TEST_tserver_ysql_lease_refresh_failure_prob))) { return STATUS_FORMAT(NetworkError, "Pretending to fail ysql lease refresh RPC"); } RETURN_NOT_OK(ResponseStatus(resp)); - return server_.ProcessLeaseUpdate(resp.info(), pre_request_time); + return server_.ProcessLeaseUpdate(resp.info()); } MonoDelta YsqlLeasePoller::IntervalToNextPoll(int32_t consecutive_failures) { diff --git a/src/yb/util/allocation_tracker.cc b/src/yb/util/allocation_tracker.cc index 02548b0a15dd..c6f62f46d05b 100644 --- a/src/yb/util/allocation_tracker.cc +++ b/src/yb/util/allocation_tracker.cc @@ -25,13 +25,13 @@ AllocationTrackerBase::~AllocationTrackerBase() { #ifndef NDEBUG std::lock_guard lock(mutex_); for (auto& pair : objects_) { - LOG(ERROR) << "Error of type " << name_ << " not destroyed, id: " << pair.second.second - << ", created at: " << pair.second.first; + LOG(DFATAL) << "Error of type " << name_ << " not destroyed, id: " << pair.second.second + << ", created at: " << pair.second.first; } #else if (count_) { - LOG(ERROR) << "Not all objects of type " << name_ << " were destroyed, " - << count_ << " objects left"; + LOG(DFATAL) << "Not all objects of type " << name_ << " were destroyed, " + << count_ << " objects left"; } #endif } diff --git a/src/yb/util/async_util.cc b/src/yb/util/async_util.cc index 72c7f688bdeb..c328bf2eccc9 100644 --- a/src/yb/util/async_util.cc +++ b/src/yb/util/async_util.cc @@ -104,7 +104,7 @@ void Synchronizer::EnsureWaitDone() { LOG(FATAL) << kErrorMsg; #else const int kWaitSec = 10; - YB_LOG_EVERY_N_SECS(ERROR, 1) << kErrorMsg << " Waiting up to " << kWaitSec << " seconds"; + YB_LOG_EVERY_N_SECS(DFATAL, 1) << kErrorMsg << " Waiting up to " << kWaitSec << " seconds"; CHECK_OK(WaitFor(MonoDelta::FromSeconds(kWaitSec))); #endif } diff --git a/src/yb/util/env_posix.cc b/src/yb/util/env_posix.cc index 7346ff612040..cc203ef854fe 100644 --- a/src/yb/util/env_posix.cc +++ b/src/yb/util/env_posix.cc @@ -401,7 +401,7 @@ class PosixWritableFile : public WritableFile { if (sync_on_close_) { Status sync_status = Sync(); if (!sync_status.ok()) { - LOG(ERROR) << "Unable to Sync " << filename_ << ": " << sync_status.ToString(); + LOG(WARNING) << "Unable to Sync " << filename_ << ": " << sync_status; if (s.ok()) { s = sync_status; } @@ -908,7 +908,7 @@ class PosixRWFile final : public RWFile { // Virtual function call in destructor. s = Sync(); if (!s.ok()) { - LOG(ERROR) << "Unable to Sync " << filename_ << ": " << s.ToString(); + LOG(WARNING) << "Unable to Sync " << filename_ << ": " << s; } } diff --git a/src/yb/util/env_util.cc b/src/yb/util/env_util.cc index d03eb67fa160..15c9521b53cb 100644 --- a/src/yb/util/env_util.cc +++ b/src/yb/util/env_util.cc @@ -121,7 +121,7 @@ std::pair FindRootDir(const std::string& search_for_dir) { std::string GetRootDir(const std::string& search_for_dir) { auto [status, path] = FindRootDir(search_for_dir); if (!status.ok()) { - LOG(ERROR) << status.ToString(); + LOG(WARNING) << status; } return path; } diff --git a/src/yb/util/file_system_posix.cc b/src/yb/util/file_system_posix.cc index 299408212f8d..6d4b1c2a4128 100644 --- a/src/yb/util/file_system_posix.cc +++ b/src/yb/util/file_system_posix.cc @@ -355,7 +355,7 @@ Status PosixWritableFile::Close() { << " filesize_: " << filesize_; } if (ftruncate(fd_, filesize_) != 0) { - LOG(ERROR) << STATUS_IO_ERROR(filename_, errno) << " filesize_: " << filesize_; + LOG(WARNING) << STATUS_IO_ERROR(filename_, errno) << " filesize_: " << filesize_; } #ifdef ROCKSDB_FALLOCATE_PRESENT // in some file systems, ftruncate only trims trailing space if the @@ -374,9 +374,9 @@ Status PosixWritableFile::Close() { if (fallocate( fd_, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, filesize_, block_size * last_allocated_block - filesize_) != 0) { - LOG(ERROR) << STATUS_IO_ERROR(filename_, errno) << " block_size: " << block_size - << " last_allocated_block: " << last_allocated_block - << " filesize_: " << filesize_; + LOG(WARNING) << STATUS_IO_ERROR(filename_, errno) << " block_size: " << block_size + << " last_allocated_block: " << last_allocated_block + << " filesize_: " << filesize_; } } #endif diff --git a/src/yb/util/lockfree-test.cc b/src/yb/util/lockfree-test.cc index 8f46a29fbb6f..8ef7d870f951 100644 --- a/src/yb/util/lockfree-test.cc +++ b/src/yb/util/lockfree-test.cc @@ -388,7 +388,8 @@ TEST(LockfreeTest, QueuePerformance) { helper.Perform(0x10, true); } -TEST(LockfreeTest, Stack) { +template class Collection> +void TestIntrusive() { constexpr int kNumEntries = 100; constexpr int kNumThreads = 5; @@ -396,11 +397,11 @@ TEST(LockfreeTest, Stack) { int value; }; - LockFreeStack stack; + Collection collection; std::vector entries(kNumEntries); for (int i = 0; i != kNumEntries; ++i) { entries[i].value = i; - stack.Push(&entries[i]); + collection.Push(&entries[i]); } TestThreadHolder holder; @@ -408,24 +409,24 @@ TEST(LockfreeTest, Stack) { // Each thread randomly does one of // 1) pull items from shared stack and store it to local set. // 2) push random item from local set to shared stack. - holder.AddThread([&stack, &stop = holder.stop_flag()] { + holder.AddThread([&collection, &stop = holder.stop_flag()] { std::vector local; while (!stop.load(std::memory_order_acquire)) { bool push = !local.empty() && RandomUniformInt(0, 1); if (push) { size_t index = RandomUniformInt(0, local.size() - 1); - stack.Push(local[index]); + collection.Push(local[index]); local[index] = local.back(); local.pop_back(); } else { - auto entry = stack.Pop(); + auto entry = collection.Pop(); if (entry) { local.push_back(entry); } } } while (!local.empty()) { - stack.Push(local.back()); + collection.Push(local.back()); local.pop_back(); } }); @@ -435,14 +436,14 @@ TEST(LockfreeTest, Stack) { std::vector content; while (content.size() <= kNumEntries) { - auto entry = stack.Pop(); + auto entry = collection.Pop(); if (!entry) { break; } content.push_back(entry->value); } - LOG(INFO) << "Content: " << yb::ToString(content); + LOG(INFO) << "Content: " << AsString(content); ASSERT_EQ(content.size(), kNumEntries); @@ -452,6 +453,14 @@ TEST(LockfreeTest, Stack) { } } +TEST(LockfreeTest, Stack) { + TestIntrusive(); +} + +TEST(LockfreeTest, SemiFairQueue) { + TestIntrusive(); +} + TEST(LockfreeTest, WriteOnceWeakPtr) { std::shared_ptr hello = std::make_shared("Hello"); std::shared_ptr world = std::make_shared("world"); diff --git a/src/yb/util/lockfree.h b/src/yb/util/lockfree.h index 6152d51488e6..7e98f1f7d15e 100644 --- a/src/yb/util/lockfree.h +++ b/src/yb/util/lockfree.h @@ -179,8 +179,8 @@ class LockFreeStack { CHECK(IsAcceptableAtomicImpl(head_)); } - void Clear() { - head_.store(nullptr, std::memory_order_release); + bool Empty() const { + return head_.load(boost::memory_order_relaxed).pointer == nullptr; } void Push(T* value) { @@ -223,6 +223,48 @@ class LockFreeStack { boost::atomic head_{Head{nullptr, 0}}; }; +// SemiFairQueue does not guarantee that pushed values will be popped in exactly the same order +// as they were pushed. +// But order will be the same as long as the consumer keeps up with the producer. +// This is useful to implement thread pool, since it does not guarantee exact task execution order. +// A single stack is not suitable for thread pool because older tasks would get starved. +// SemiFairQueue uses two stacks to sort the tasks in the correct order with a high enough +// probability. +template +class SemiFairQueue { + public: + bool Empty() const { + return write_stack_.Empty() && read_stack_.Empty(); + } + + void Push(T* value) { + write_stack_.Push(value); + } + + T* Pop() { + auto result = read_stack_.Pop(); + if (result) { + return result; + } + result = write_stack_.Pop(); + if (!result) { + return nullptr; + } + for (;;) { + auto next = write_stack_.Pop(); + if (!next) { + return result; + } + read_stack_.Push(result); + result = next; + } + } + + private: + LockFreeStack write_stack_; + LockFreeStack read_stack_; +}; + // A weak pointer that can only be written to once, but can be read and written in a lock-free way. template class WriteOnceWeakPtr { diff --git a/src/yb/util/logging.h b/src/yb/util/logging.h index 1853e32b32c5..407624da54c7 100644 --- a/src/yb/util/logging.h +++ b/src/yb/util/logging.h @@ -138,6 +138,17 @@ enum PRIVATE_ThrottleMsg {THROTTLE_MSG}; // benign races on their internal static variables. //////////////////////////////////////////////////////////////////////////////// +#define YB_GLOG_SEVERITY_INFO google::GLOG_INFO +#define YB_GLOG_SEVERITY_WARNING google::GLOG_WARNING +#define YB_GLOG_SEVERITY_ERROR google::GLOG_ERROR +#define YB_GLOG_SEVERITY_FATAL google::GLOG_FATAL +#if DCHECK_IS_ON() +#define YB_GLOG_SEVERITY_DFATAL google::GLOG_FATAL +#else +#define YB_GLOG_SEVERITY_DFATAL google::GLOG_ERROR +#endif +#define YB_GLOG_SEVERITY(severity) BOOST_PP_CAT(YB_GLOG_SEVERITY_, severity) + // The "base" macros. #define YB_SOME_KIND_OF_LOG_EVERY_N(severity, n, what_to_do) \ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \ @@ -147,7 +158,7 @@ enum PRIVATE_ThrottleMsg {THROTTLE_MSG}; if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \ if (LOG_OCCURRENCES_MOD_N == 1) \ google::LogMessage( \ - __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \ + __FILE__, __LINE__, YB_GLOG_SEVERITY(severity), LOG_OCCURRENCES, \ &what_to_do).stream() #define YB_SOME_KIND_OF_LOG_IF_EVERY_N(severity, condition, n, what_to_do) \ @@ -158,7 +169,7 @@ enum PRIVATE_ThrottleMsg {THROTTLE_MSG}; if (condition && \ ((LOG_OCCURRENCES_MOD_N=(LOG_OCCURRENCES_MOD_N + 1) % n) == (1 % n))) \ google::LogMessage( \ - __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \ + __FILE__, __LINE__, YB_GLOG_SEVERITY(severity), LOG_OCCURRENCES, \ &what_to_do).stream() #define YB_SOME_KIND_OF_PLOG_EVERY_N(severity, n, what_to_do) \ @@ -169,7 +180,7 @@ enum PRIVATE_ThrottleMsg {THROTTLE_MSG}; if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \ if (LOG_OCCURRENCES_MOD_N == 1) \ google::ErrnoLogMessage( \ - __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \ + __FILE__, __LINE__, YB_GLOG_SEVERITY(severity), LOG_OCCURRENCES, \ &what_to_do).stream() #define YB_SOME_KIND_OF_LOG_FIRST_N(severity, n, what_to_do) \ @@ -177,13 +188,11 @@ enum PRIVATE_ThrottleMsg {THROTTLE_MSG}; ANNOTATE_BENIGN_RACE(&LOG_OCCURRENCES, "Logging the first N is approximate"); \ if (LOG_OCCURRENCES++ < (n)) \ google::LogMessage( \ - __FILE__, __LINE__, google::GLOG_ ## severity, static_cast(LOG_OCCURRENCES), \ - &what_to_do).stream() + __FILE__, __LINE__, YB_GLOG_SEVERITY(severity), \ + static_cast(LOG_OCCURRENCES), &what_to_do).stream() // The direct user-facing macros. #define YB_LOG_EVERY_N(severity, n) \ - static_assert(google::GLOG_ ## severity < google::NUM_SEVERITIES, \ - "Invalid requested log severity"); \ YB_SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToLog) #define YB_LOG_WITH_PREFIX_EVERY_N(severity, n) YB_LOG_EVERY_N(severity, n) << LogPrefix() diff --git a/src/yb/util/memory/memory.h b/src/yb/util/memory/memory.h index ceceb71c1bb4..fa5c29e9862a 100644 --- a/src/yb/util/memory/memory.h +++ b/src/yb/util/memory/memory.h @@ -966,10 +966,10 @@ void Quota::Free(size_t amount) { usage_ -= amount; // threads allocate/free memory concurrently via the same Quota object that is // not protected with a mutex (thread_safe == false). - if (usage_ > (std::numeric_limits::max() - (1 << 28))) { - LOG(ERROR) << "Suspiciously big usage_ value: " << usage_ - << " (could be a result size_t wrapping around below 0, " - << "for example as a result of race condition)."; + if (usage_ > std::numeric_limits::max() / 2) { + LOG(DFATAL) << "Suspiciously big usage_ value: " << usage_ + << " (could be a result size_t wrapping around below 0, " + << "for example as a result of race condition)."; } } diff --git a/src/yb/util/metrics.h b/src/yb/util/metrics.h index bec437511525..5e101bb756d0 100644 --- a/src/yb/util/metrics.h +++ b/src/yb/util/metrics.h @@ -1003,8 +1003,6 @@ class Counter : public Metric { DISALLOW_COPY_AND_ASSIGN(Counter); }; -using CounterPtr = scoped_refptr; - class MillisLagPrototype : public MetricPrototype { public: explicit MillisLagPrototype(const MetricPrototype::CtorArgs& args) : MetricPrototype(args) { @@ -1288,8 +1286,6 @@ class EventStats : public BaseStats { DISALLOW_COPY_AND_ASSIGN(EventStats); }; -using EventStatsPtr = scoped_refptr; - template inline void IncrementStats(const scoped_refptr& stats, int64_t value) { if (stats) { diff --git a/src/yb/util/metrics_fwd.h b/src/yb/util/metrics_fwd.h index 0890c895c7a4..f0ec471c4f81 100644 --- a/src/yb/util/metrics_fwd.h +++ b/src/yb/util/metrics_fwd.h @@ -30,6 +30,7 @@ class HistogramPrototype; class HistogramSnapshotPB; class HdrHistogram; class Metric; +class MetricEntity; class MetricEntityPrototype; class MetricPrototype; class MetricRegistry; @@ -43,7 +44,8 @@ class StatsOnlyHistogram; struct MetricJsonOptions; struct MetricPrometheusOptions; -class MetricEntity; +using CounterPtr = scoped_refptr; +using EventStatsPtr = scoped_refptr; using MetricEntityPtr = scoped_refptr; template diff --git a/src/yb/util/net/dns_resolver-test.cc b/src/yb/util/net/dns_resolver-test.cc index ca2306816ce2..fda57c14ea31 100644 --- a/src/yb/util/net/dns_resolver-test.cc +++ b/src/yb/util/net/dns_resolver-test.cc @@ -58,7 +58,7 @@ class DnsResolverTest : public YBTest { io_thread_ = CHECK_RESULT(Thread::Make("io_thread", "io_thread", [this] { boost::system::error_code ec; io_service_.run(ec); - LOG_IF(ERROR, ec) << "Failed to run io service: " << ec; + LOG_IF(DFATAL, ec) << "Failed to run io service: " << ec; })); } @@ -72,7 +72,7 @@ class DnsResolverTest : public YBTest { auto deadline = std::chrono::steady_clock::now() + 15s; while (!io_service_.stopped()) { if (std::chrono::steady_clock::now() >= deadline) { - LOG(ERROR) << "Io service failed to stop"; + LOG(DFATAL) << "Io service failed to stop"; io_service_.stop(); break; } diff --git a/src/yb/util/net/inetaddress.cc b/src/yb/util/net/inetaddress.cc index 835bd45923f4..4b51a66bee36 100644 --- a/src/yb/util/net/inetaddress.cc +++ b/src/yb/util/net/inetaddress.cc @@ -156,8 +156,8 @@ void FilterAddresses(const string& filter_spec, vector* addresses) { if (filter_it != kFilters->end()) { filters.push_back(&filter_it->second); } else { - LOG(ERROR) << "Unknown filter spec " << filter_name << " in filter spec " - << filter_spec; + LOG(DFATAL) << "Unknown filter spec " << filter_name << " in filter spec " + << filter_spec; } } diff --git a/src/yb/util/net/net_util.cc b/src/yb/util/net/net_util.cc index f95d405c86d2..921a52d8cfbc 100644 --- a/src/yb/util/net/net_util.cc +++ b/src/yb/util/net/net_util.cc @@ -146,7 +146,7 @@ Status HostPort::RemoveAndGetHostPortList( out.str(master_server_addr); out.str(" "); } - LOG(ERROR) << out.str(); + LOG(DFATAL) << out.str(); return STATUS_SUBSTITUTE(NotFound, "Cannot find $0 in addresses: $1", diff --git a/src/yb/util/physical_time.cc b/src/yb/util/physical_time.cc index 6b7142b8df15..6d7891f8734f 100644 --- a/src/yb/util/physical_time.cc +++ b/src/yb/util/physical_time.cc @@ -81,7 +81,7 @@ Status CallAdjTime(timex* tx) { ErrnoToString(errno)); case TIME_ERROR: if (FLAGS_disable_clock_sync_error) { - YB_LOG_EVERY_N_SECS(ERROR, 15) << "Clock unsynchronized, status: " << tx->status; + YB_LOG_EVERY_N_SECS(DFATAL, 15) << "Clock unsynchronized, status: " << tx->status; return Status::OK(); } return STATUS_FORMAT( @@ -89,8 +89,8 @@ Status CallAdjTime(timex* tx) { tx->status); default: // TODO what to do about leap seconds? see KUDU-146 - YB_LOG_FIRST_N(ERROR, 1) << "Server undergoing leap second. This may cause consistency " - << "issues (rc=" << rc << ")"; + YB_LOG_FIRST_N(DFATAL, 1) << "Server undergoing leap second. This may cause consistency " + << "issues (rc=" << rc << ")"; return Status::OK(); } } diff --git a/src/yb/util/result-test.cc b/src/yb/util/result-test.cc index ebc4b31e6834..82a7dec2f984 100644 --- a/src/yb/util/result-test.cc +++ b/src/yb/util/result-test.cc @@ -307,7 +307,6 @@ void TestNotOk(T t) { const auto LogPrefix = []() -> std::string { return "prefix"; }; WARN_NOT_OK(t, "boo"); WARN_WITH_PREFIX_NOT_OK(t, "foo"); - ERROR_NOT_OK(t, "moo"); } } // namespace diff --git a/src/yb/util/shared_mem.cc b/src/yb/util/shared_mem.cc index 65929b6f9c78..815f7af86dee 100644 --- a/src/yb/util/shared_mem.cc +++ b/src/yb/util/shared_mem.cc @@ -95,8 +95,8 @@ std::string GetSharedMemoryDirectory() { } } } else if (errno != ENOENT) { - LOG(ERROR) << "Unexpected error when reading /proc/mounts: errno=" << errno - << ": " << ErrnoToString(errno); + LOG(DFATAL) << "Unexpected error when reading /proc/mounts: errno=" << errno + << ": " << ErrnoToString(errno); } #endif @@ -117,8 +117,8 @@ int TryMemfdCreate() { struct utsname uts_name; if (uname(&uts_name) == -1) { - LOG(ERROR) << "Failed to get kernel name information: errno=" << errno - << ": " << ErrnoToString(errno); + LOG(DFATAL) << "Failed to get kernel name information: errno=" << errno + << ": " << ErrnoToString(errno); return fd; } @@ -140,8 +140,8 @@ int TryMemfdCreate() { fd = memfd_create(); if (fd == -1) { - LOG(ERROR) << "Error creating shared memory via memfd_create: errno=" << errno - << ": " << ErrnoToString(errno); + LOG(DFATAL) << "Error creating shared memory via memfd_create: errno=" << errno + << ": " << ErrnoToString(errno); } } @@ -180,9 +180,9 @@ Result CreateTempSharedMemoryFile() { // Immediately unlink the file to so it will be removed when all file descriptors close. if (unlink(temp_file_path.c_str()) == -1) { - LOG(ERROR) << "Leaking shared memory file '" << temp_file_path - << "' after failure to unlink: errno=" << errno - << ": " << ErrnoToString(errno); + LOG(DFATAL) << "Leaking shared memory file '" << temp_file_path + << "' after failure to unlink: errno=" << errno + << ": " << ErrnoToString(errno); } break; } @@ -255,8 +255,8 @@ SharedMemorySegment::SharedMemorySegment(SharedMemorySegment&& other) SharedMemorySegment::~SharedMemorySegment() { if (base_address_ && munmap(base_address_, segment_size_) == -1) { - LOG(ERROR) << "Failed to unmap shared memory segment: errno=" << errno - << ": " << ErrnoToString(errno); + LOG(DFATAL) << "Failed to unmap shared memory segment: errno=" << errno + << ": " << ErrnoToString(errno); } if (fd_ != -1) { diff --git a/src/yb/util/stack_trace_tracker.cc b/src/yb/util/stack_trace_tracker.cc index 05a9b36ace91..dff0cc8bffef 100644 --- a/src/yb/util/stack_trace_tracker.cc +++ b/src/yb/util/stack_trace_tracker.cc @@ -66,8 +66,8 @@ class GlobalStackTraceTracker { if (entry.symbolized_trace.empty()) { auto s = StackTrace::MakeStackTrace(frames); if (!s.ok()) { - LOG(ERROR) << "Bad stack trace frames: " - << Slice(frames.data(), frames.size()).ToDebugString(); + LOG(DFATAL) << "Bad stack trace frames: " + << Slice(frames.data(), frames.size()).ToDebugString(); } entry.symbolized_trace = s->Symbolize(); } diff --git a/src/yb/util/status_log.h b/src/yb/util/status_log.h index 1325ca5fe7ec..5ccdf5f10632 100644 --- a/src/yb/util/status_log.h +++ b/src/yb/util/status_log.h @@ -49,7 +49,7 @@ #define ERROR_NOT_OK(to_call, error_prefix) do { \ auto&& _s = (to_call); \ if (PREDICT_FALSE(!_s.ok())) { \ - YB_LOG(ERROR) << (error_prefix) << ": " << StatusToString(_s); \ + YB_LOG(DFATAL) << (error_prefix) << ": " << StatusToString(_s); \ } \ } while (0); @@ -67,6 +67,15 @@ YB_CHECK(_s.ok()) << (msg) << ": " << StatusToString(_s); \ } while (0); +#define YB_RETURN_NOT_OK_WITH_WARNING(expr, warning_prefix) \ + do { \ + auto&& _s = (expr); \ + if (PREDICT_FALSE(!_s.ok())) { \ + YB_LOG(WARNING) << (warning_prefix) << ": " << StatusToString(_s); \ + return MoveStatus(std::move(_s)); \ + } \ + } while (0); + // If the status is bad, CHECK immediately, appending the status to the logged message. #define YB_CHECK_OK(s) YB_CHECK_OK_PREPEND(s, "Bad status") @@ -77,6 +86,7 @@ #define LOG_AND_RETURN YB_LOG_AND_RETURN #define CHECK_OK_PREPEND YB_CHECK_OK_PREPEND #define CHECK_OK YB_CHECK_OK +#define RETURN_NOT_OK_WITH_WARNING YB_RETURN_NOT_OK_WITH_WARNING // These are standard glog macros. #define YB_LOG LOG diff --git a/src/yb/util/trace.cc b/src/yb/util/trace.cc index 7851f97c0717..6218b93a74fc 100644 --- a/src/yb/util/trace.cc +++ b/src/yb/util/trace.cc @@ -389,7 +389,8 @@ TraceEntry* Trace::NewEntry( size_t size = offsetof(TraceEntry, message) + msg_len; void* dst = arena->AllocateBytesAligned(size, alignof(TraceEntry)); if (dst == nullptr) { - LOG(ERROR) << "NewEntry(msg_len, " << file_path << ", " << line_number + LOG(DFATAL) + << "NewEntry(msg_len, " << file_path << ", " << line_number << ") received nullptr from AllocateBytes.\n So far:" << DumpToString(true); return nullptr; } diff --git a/src/yb/util/ulimit_util.cc b/src/yb/util/ulimit_util.cc index 8e344a0821d0..81aab3cb2031 100644 --- a/src/yb/util/ulimit_util.cc +++ b/src/yb/util/ulimit_util.cc @@ -202,8 +202,8 @@ void UlimitUtil::InitUlimits() { const auto limits_or_status = Env::Default()->GetUlimit(resource_id); if (!limits_or_status.ok()) { - LOG(ERROR) << "Unable to fetch hard limit for resource " << resource_name - << " Skipping initialization."; + LOG(DFATAL) << "Unable to fetch hard limit for resource " << resource_name + << " Skipping initialization."; continue; } @@ -221,8 +221,8 @@ void UlimitUtil::InitUlimits() { Status set_ulim_status = Env::Default()->SetUlimit(resource_id, new_soft_limit, resource_name); if (!set_ulim_status.ok()) { - LOG(ERROR) << "Unable to set new soft limit for resource " << resource_name - << " error: " << set_ulim_status.ToString(); + LOG(DFATAL) << "Unable to set new soft limit for resource " << resource_name + << " error: " << set_ulim_status.ToString(); } } } diff --git a/src/yb/vector_index/vector_lsm.cc b/src/yb/vector_index/vector_lsm.cc index 02edcf74d311..72a91bd21d75 100644 --- a/src/yb/vector_index/vector_lsm.cc +++ b/src/yb/vector_index/vector_lsm.cc @@ -1491,7 +1491,7 @@ void VectorLSM::DeleteFile(const VectorLSMFileMetaData& if (status.ok()) { LOG_WITH_PREFIX(INFO) << "Deleted file " << path; } else { - LOG_WITH_PREFIX(ERROR) << "Failed to delete file " << path << ", status: " << status; + LOG_WITH_PREFIX(DFATAL) << "Failed to delete file " << path << ", status: " << status; } } diff --git a/src/yb/yql/cql/cqlserver/cql_processor.cc b/src/yb/yql/cql/cqlserver/cql_processor.cc index b067969aa551..7a31c8a67f47 100644 --- a/src/yb/yql/cql/cqlserver/cql_processor.cc +++ b/src/yb/yql/cql/cqlserver/cql_processor.cc @@ -338,7 +338,7 @@ bool CQLProcessor::CheckAuthentication(const CQLRequest& req) const { unique_ptr CQLProcessor::ProcessRequest(const CQLRequest& req) { if (FLAGS_use_cassandra_authentication && !CheckAuthentication(req)) { - LOG(ERROR) << "Could not execute statement by not authenticated user!"; + LOG(WARNING) << "Could not execute statement by not authenticated user!"; return make_unique( req, ErrorResponse::Code::SERVER_ERROR, "Could not execute statement by not authenticated user"); @@ -588,7 +588,7 @@ unique_ptr CQLProcessor::ProcessRequest(const AuthResponseRequest& "Could not prepare statement for querying user " + params.username); } if (!stmt->ExecuteAsync(this, params, statement_executed_cb_).ok()) { - LOG(ERROR) << "Could not execute prepared statement to fetch login info!"; + LOG(WARNING) << "Could not execute prepared statement to fetch login info!"; return make_unique( req, ErrorResponse::Code::SERVER_ERROR, "Could not execute prepared statement for querying roles for user " + params.username); @@ -675,7 +675,7 @@ unique_ptr CQLProcessor::ProcessError(const Status& s, } } - LOG(ERROR) << "Internal error: invalid error code " << static_cast(GetErrorCode(s)); + LOG(WARNING) << "Internal error: invalid error code " << static_cast(GetErrorCode(s)); return make_unique(*request_, ErrorResponse::Code::SERVER_ERROR, "Invalid error code"); } else if (s.IsNotAuthorized()) { @@ -908,7 +908,7 @@ Result CheckLDAPAuth(const ql::AuthResponseRequest::AuthQueryParameters& p << FLAGS_ycql_ldap_server << "': " << LDAPError(r, ldap); auto error_msg = str.str(); if (r == LDAP_INVALID_CREDENTIALS) { - LOG(ERROR) << error_msg; + LOG(WARNING) << error_msg; return false; } @@ -1069,7 +1069,7 @@ unique_ptr CQLProcessor::ProcessResult(const ExecutedResult::Shared // default: fall through. } - LOG(ERROR) << "Internal error: unknown result type " << static_cast(result->type()); + LOG(WARNING) << "Internal error: unknown result type " << static_cast(result->type()); return make_unique( *request_, ErrorResponse::Code::SERVER_ERROR, "Internal error: unknown result type"); } diff --git a/src/yb/yql/cql/cqlserver/cql_rpc.cc b/src/yb/yql/cql/cqlserver/cql_rpc.cc index 9c6fdfeb00b5..5b9f025a4f76 100644 --- a/src/yb/yql/cql/cqlserver/cql_rpc.cc +++ b/src/yb/yql/cql/cqlserver/cql_rpc.cc @@ -224,7 +224,7 @@ void CQLInboundCall::RespondFailure(rpc::ErrorStatusPB::RpcErrorCodePB error_cod case rpc::ErrorStatusPB::FATAL_VERSION_MISMATCH: FALLTHROUGH_INTENDED; case rpc::ErrorStatusPB::FATAL_UNAUTHORIZED: FALLTHROUGH_INTENDED; case rpc::ErrorStatusPB::FATAL_UNKNOWN: { - LOG(ERROR) << "Unexpected error status: " + LOG(WARNING) << "Unexpected error status: " << rpc::ErrorStatusPB::RpcErrorCodePB_Name(error_code); ErrorResponse(stream_id_, ErrorResponse::Code::SERVER_ERROR, "Server error") .Serialize(compression_scheme, &msg); diff --git a/src/yb/yql/cql/cqlserver/system_query_cache.cc b/src/yb/yql/cql/cqlserver/system_query_cache.cc index 0e4f13854846..4921d38f57a0 100644 --- a/src/yb/yql/cql/cqlserver/system_query_cache.cc +++ b/src/yb/yql/cql/cqlserver/system_query_cache.cc @@ -262,7 +262,7 @@ void SystemQueryCache::ExecuteSync(const std::string& stmt, Status* status, ExecutedResult::SharedPtr* result_ptr) { const auto processor = service_impl_->GetProcessor(); if (!processor.ok()) { - LOG(ERROR) << "Unable to get CQLProcessor for system query cache"; + LOG(DFATAL) << "Unable to get CQLProcessor for system query cache"; *status = processor.status(); return; } diff --git a/src/yb/yql/cql/ql/parser/scanner_util.cc b/src/yb/yql/cql/ql/parser/scanner_util.cc index 7dd685b25a1f..788f5ef40545 100644 --- a/src/yb/yql/cql/ql/parser/scanner_util.cc +++ b/src/yb/yql/cql/ql/parser/scanner_util.cc @@ -45,7 +45,7 @@ unsigned int hexval(unsigned char c) { if (c >= 'A' && c <= 'F') return c - 'A' + 0xA; - LOG(ERROR) << "invalid hexadecimal digit"; + LOG(DFATAL) << "invalid hexadecimal digit"; return 0; /* not reached */ } @@ -233,7 +233,7 @@ void report_invalid_encoding(const char *mbstr, size_t len) { p += sprintf(p, " "); // NOLINT(*) } - LOG(ERROR) << "SQL Error: " << ErrorText(ErrorCode::CHARACTER_NOT_IN_REPERTOIRE) + LOG(DFATAL) << "SQL Error: " << ErrorText(ErrorCode::CHARACTER_NOT_IN_REPERTOIRE) << ". Invalid byte sequence for UTF8 \"" << buf << "\""; } diff --git a/src/yb/yql/cql/ql/ptree/pt_dml_write_property.cc b/src/yb/yql/cql/ql/ptree/pt_dml_write_property.cc index 9f90d41dfb28..91f18adeaba7 100644 --- a/src/yb/yql/cql/ql/ptree/pt_dml_write_property.cc +++ b/src/yb/yql/cql/ql/ptree/pt_dml_write_property.cc @@ -95,7 +95,7 @@ Status PTDmlWritePropertyListNode::Analyze(SemContext *sem_context) { for (PTDmlWriteProperty::SharedPtr tnode : node_list()) { if (tnode == nullptr) { // This shouldn't happen because AppendList ignores null nodes. - LOG(ERROR) << "Invalid null property"; + LOG(DFATAL) << "Invalid null property"; continue; } switch(tnode->property_type()) { diff --git a/src/yb/yql/cql/ql/ptree/pt_table_property.cc b/src/yb/yql/cql/ql/ptree/pt_table_property.cc index 1ddca04ca7b6..30e2f3d102b0 100644 --- a/src/yb/yql/cql/ql/ptree/pt_table_property.cc +++ b/src/yb/yql/cql/ql/ptree/pt_table_property.cc @@ -274,7 +274,7 @@ Status PTTablePropertyListNode::Analyze(SemContext *sem_context) { for (PTTableProperty::SharedPtr tnode : node_list()) { if (tnode == nullptr) { // This shouldn't happen because AppendList ignores null nodes. - LOG(ERROR) << "Invalid null property"; + LOG(DFATAL) << "Invalid null property"; continue; } switch(tnode->property_type()) { @@ -382,7 +382,7 @@ Status PTTableProperty::SetTableProperty(yb::TableProperties *table_property) co case KVProperty::kCompaction: FALLTHROUGH_INTENDED; case KVProperty::kCompression: FALLTHROUGH_INTENDED; case KVProperty::kTransactions: - LOG(ERROR) << "Not primitive table property " << table_property_name; + LOG(DFATAL) << "Not primitive table property " << table_property_name; break; case KVProperty::kNumTablets: int64_t val; diff --git a/src/yb/yql/cql/ql/util/cql_message.cc b/src/yb/yql/cql/ql/util/cql_message.cc index fee6ba605810..62e9da0caa02 100644 --- a/src/yb/yql/cql/ql/util/cql_message.cc +++ b/src/yb/yql/cql/ql/util/cql_message.cc @@ -934,7 +934,7 @@ void SerializeUUID(const string& value, faststring* mesg) { if (value.size() == CQLMessage::kUUIDSize) { mesg->append(value); } else { - LOG(ERROR) << "Internal error: inconsistent UUID size: " << value.size(); + LOG(DFATAL) << "Internal error: inconsistent UUID size: " << value.size(); uint8_t empty_uuid[CQLMessage::kUUIDSize] = {0}; mesg->append(empty_uuid, sizeof(empty_uuid)); } @@ -944,7 +944,7 @@ void SerializeTimeUUID(const string& value, faststring* mesg) { if (value.size() == CQLMessage::kUUIDSize) { mesg->append(value); } else { - LOG(ERROR) << "Internal error: inconsistent TimeUUID size: " << value.size(); + LOG(DFATAL) << "Internal error: inconsistent TimeUUID size: " << value.size(); uint8_t empty_uuid[CQLMessage::kUUIDSize] = {0}; mesg->append(empty_uuid, sizeof(empty_uuid)); } @@ -1013,7 +1013,7 @@ void SerializeValue(const CQLMessage::Value& value, faststring* mesg) { break; // default: fall through } - LOG(ERROR) << "Internal error: invalid/unknown value kind " << static_cast(value.kind); + LOG(DFATAL) << "Internal error: invalid/unknown value kind " << static_cast(value.kind); SerializeInt(-1, mesg); } #endif @@ -1226,7 +1226,7 @@ ResultResponse::RowsMetadata::Type::Type(const Id id) : id(id) { // default: fall through } - LOG(ERROR) << "Internal error: invalid/unknown primitive type id " << static_cast(id); + FATAL_INVALID_ENUM_VALUE(Id, id); } // These union members in Type below are not initialized by default. They need to be explicitly @@ -1273,7 +1273,7 @@ ResultResponse::RowsMetadata::Type::Type(const Id id, shared_ptr ele // default: fall through } - LOG(ERROR) << "Internal error: invalid/unknown list/map type id " << static_cast(id); + FATAL_INVALID_ENUM_VALUE(Id, id); } ResultResponse::RowsMetadata::Type::Type(shared_ptr map_type) : id(Id::MAP) { @@ -1332,7 +1332,7 @@ ResultResponse::RowsMetadata::Type::Type(const Type& t) : id(t.id) { // default: fall through } - LOG(ERROR) << "Internal error: unknown type id " << static_cast(id); + FATAL_INVALID_ENUM_VALUE(Id, id); } ResultResponse::RowsMetadata::Type::Type(const shared_ptr& ql_type) { @@ -1443,7 +1443,7 @@ ResultResponse::RowsMetadata::Type::Type(const shared_ptr& ql_type) { // default: fall through } - LOG(ERROR) << "Internal error: invalid/unsupported type " << type->ToString(); + FATAL_INVALID_ENUM_VALUE(DataType, type->main()); } ResultResponse::RowsMetadata::Type::~Type() { @@ -1489,7 +1489,7 @@ ResultResponse::RowsMetadata::Type::~Type() { // default: fall through } - LOG(ERROR) << "Internal error: unknown type id " << static_cast(id); + FATAL_INVALID_ENUM_VALUE(Id, id); } ResultResponse::RowsMetadata::RowsMetadata() @@ -1576,7 +1576,8 @@ void ResultResponse::SerializeType(const RowsMetadata::Type* type, faststring* m // default: fall through } - LOG(ERROR) << "Internal error: unknown type id " << static_cast(type->id); + + FATAL_INVALID_ENUM_VALUE(RowsMetadata::Type::Id, type->id); } void ResultResponse::SerializeColSpecs( diff --git a/src/yb/yql/pggate/pg_client.cc b/src/yb/yql/pggate/pg_client.cc index 6c39c16ea224..858fdbfc532c 100644 --- a/src/yb/yql/pggate/pg_client.cc +++ b/src/yb/yql/pggate/pg_client.cc @@ -476,9 +476,9 @@ class PgClient::Impl : public BigDataFetcher { // the next user activity will trigger a FATAL anyway. This is done specifically to avoid // log spew of the warning message below in cases where the session is idle (ie. no other // RPCs are being sent to the tserver). - LOG(ERROR) << "Heartbeat failed. Connection needs to be reset. " - << "Shutting down heartbeating mechanism due to unknown session " - << session_id_; + LOG(DFATAL) << "Heartbeat failed. Connection needs to be reset. " + << "Shutting down heartbeating mechanism due to unknown session " + << session_id_; heartbeat_poller_.Shutdown(); return; } diff --git a/src/yb/yql/pggate/pg_dml_write.cc b/src/yb/yql/pggate/pg_dml_write.cc index 8bd5da5fed34..3cd27f289996 100644 --- a/src/yb/yql/pggate/pg_dml_write.cc +++ b/src/yb/yql/pggate/pg_dml_write.cc @@ -51,6 +51,8 @@ Status PgDmlWrite::Prepare(const PgObjectId& table_id, bool is_region_local) { write_req_->dup_table_id(table_id.GetYbTableId()); write_req_->set_schema_version(target_->schema_version()); write_req_->set_stmt_id(reinterpret_cast(write_req_.get())); + // TODO(#26086): Capture and display metrics in EXPLAIN output + write_req_->set_metrics_capture(PgsqlMetricsCaptureType::PGSQL_METRICS_CAPTURE_NONE); doc_op_ = std::make_shared(pg_session_, &target_, std::move(write_op)); PrepareColumns(); diff --git a/src/yb/yql/pggate/pg_metrics_list.h b/src/yb/yql/pggate/pg_metrics_list.h index 89e6871f09da..000e638c1577 100644 --- a/src/yb/yql/pggate/pg_metrics_list.h +++ b/src/yb/yql/pggate/pg_metrics_list.h @@ -241,6 +241,8 @@ typedef enum PgAnalyzeEventStatsMetrics { YB_STORAGE_EVENT_INTENTSDB_BYTES_PER_READ, YB_STORAGE_EVENT_INTENTSDB_BYTES_PER_WRITE, YB_STORAGE_EVENT_INTENTSDB_BYTES_PER_MULTIGET, + YB_STORAGE_EVENT_INTENTSDB_WRITE_JOIN_GROUP_MICROS, + YB_STORAGE_EVENT_INTENTSDB_REMOVE_JOIN_GROUP_MICROS, YB_STORAGE_EVENT_SNAPSHOT_READ_INFLIGHT_WAIT_DURATION, YB_STORAGE_EVENT_QL_READ_LATENCY, diff --git a/src/yb/yql/pggate/pg_sample.cc b/src/yb/yql/pggate/pg_sample.cc index a0bfa94ce17f..9a326e079cd9 100644 --- a/src/yb/yql/pggate/pg_sample.cc +++ b/src/yb/yql/pggate/pg_sample.cc @@ -156,14 +156,16 @@ class PgDocSampleOp : public PgDocReadOp { auto* sampling_state = res.mutable_sampling_state(); VLOG_WITH_PREFIX_AND_FUNC(1) << "Received sampling state: " << sampling_state->ShortDebugString(); + SCHECK(sampling_state->has_rand_state(), InvalidArgument, + "Invalid sampling state, random state is missing"); sampling_stats_ = { .num_blocks_processed = sampling_state->num_blocks_processed(), .num_blocks_collected = sampling_state->num_blocks_collected(), .num_rows_processed = sampling_state->samplerows(), .num_rows_collected = sampling_state->numrows(), - }; + }; - RETURN_NOT_OK(PgDocReadOp::CompleteProcessResponse()); + RETURN_NOT_OK(PgDocReadOp::CompleteProcessResponse()); if (active_op_count_ > 0) { auto& next_active_op = GetReadOp(0); diff --git a/src/yb/yql/pggate/pg_session.cc b/src/yb/yql/pggate/pg_session.cc index 441bf7ca080b..271e342cb0fa 100644 --- a/src/yb/yql/pggate/pg_session.cc +++ b/src/yb/yql/pggate/pg_session.cc @@ -268,6 +268,40 @@ void AdvisoryLockRequestInitCommon( } +YbcTxnPriorityRequirement GetTxnPriorityRequirement( + bool is_ddl_mode, PgIsolationLevel isolation_level, RowMarkType row_mark_type) { + YbcTxnPriorityRequirement txn_priority_requirement; + if (is_ddl_mode) { + // DDLs acquire object locks to serialize conflicting concurrent DDLs. Concurrent DDLs that + // don't conflict can make progress without blocking each other. + // + // However, if object locks are disabled, concurrent DDLs are disallowed for safety. + // This is done by relying on conflicting increments to the catalog version (most DDLs do this + // except some like CREATE TABLE). Note that global DDLs (those that affect catalog tables + // shared across databases) conflict with all other DDLs since they increment all per-db + // catalog versions. + // + // For detecting and resolving these conflicts, DDLs use Fail-on-Conflict concurrency + // control (system catalog table doesn't have wait queues enabled). All DDLs except + // Auto-ANALYZEs use kHighestPriority priority to mimic first-come-first-serve behavior. We + // want to give Auto-ANALYZEs a lower priority to ensure they don't abort already running + // user DDLs. Also, user DDLs should preempt Auto-ANALYZEs. + // + // With object level locking, priorities are meaningless since DDLs don't rely on DocDB's + // conflict resolution for concurrent DDLs. + if (!yb_use_internal_auto_analyze_service_conn) + txn_priority_requirement = kHighestPriority; + else + txn_priority_requirement = kHigherPriorityRange; + } else { + txn_priority_requirement = + isolation_level == PgIsolationLevel::READ_COMMITTED ? kHighestPriority : + (RowMarkNeedsHigherPriority(row_mark_type) ? + kHigherPriorityRange : kLowerPriorityRange); + } + return txn_priority_requirement; +} + } // namespace //-------------------------------------------------------------------------------------------------- @@ -376,13 +410,13 @@ class PgSession::RunHelper { return Status::OK(); } - const auto txn_priority_requirement = - pg_session_.GetIsolationLevel() == PgIsolationLevel::READ_COMMITTED - ? kHighestPriority : - (RowMarkNeedsHigherPriority(row_mark_type) ? kHigherPriorityRange : kLowerPriorityRange); read_only = read_only && !IsValidRowMarkType(row_mark_type); - return pg_session_.pg_txn_manager_->CalculateIsolation(read_only, txn_priority_requirement); + return pg_session_.pg_txn_manager_->CalculateIsolation( + read_only, + GetTxnPriorityRequirement( + pg_session_.pg_txn_manager_->IsDdlMode(), pg_session_.GetIsolationLevel(), + row_mark_type)); } Result Flush(std::optional&& cache_options) { @@ -731,12 +765,10 @@ Result PgSession::FlushOperations(BufferableOperations&& ops, boo } if (transactional) { - const auto txn_priority_requirement = - GetIsolationLevel() == PgIsolationLevel::READ_COMMITTED - ? kHighestPriority : kLowerPriorityRange; - RETURN_NOT_OK(pg_txn_manager_->CalculateIsolation( - false /* read_only */, txn_priority_requirement)); + false /* read_only */, + GetTxnPriorityRequirement( + pg_txn_manager_->IsDdlMode(), GetIsolationLevel(), RowMarkType::ROW_MARK_ABSENT))); } // When YSQL is flushing a pipeline of Perform rpcs asynchronously i.e., without waiting for @@ -1067,7 +1099,7 @@ Result PgSession::DoRunAsync( // as writing to ysql catalog so we can avoid incrementing the catalog version. has_catalog_write_ops_in_ddl_mode_ = has_catalog_write_ops_in_ddl_mode_ || - (is_ddl && !IsReadOnly(*op) && is_ysql_catalog_table); + (is_ddl && op->is_write() && is_ysql_catalog_table); return runner.Apply(table, op); }; RETURN_NOT_OK(processor(first_table_op)); diff --git a/src/yb/yql/pggate/pg_txn_manager.cc b/src/yb/yql/pggate/pg_txn_manager.cc index db962dc571fb..505e9d3a6235 100644 --- a/src/yb/yql/pggate/pg_txn_manager.cc +++ b/src/yb/yql/pggate/pg_txn_manager.cc @@ -312,6 +312,11 @@ Status PgTxnManager::SetReadOnlyStmt(bool read_only_stmt) { } uint64_t PgTxnManager::NewPriority(YbcTxnPriorityRequirement txn_priority_requirement) { + VLOG(1) << "txn_priority_requirement: " << txn_priority_requirement + << " txn_priority_highpri_lower_bound: " << txn_priority_highpri_lower_bound + << " txn_priority_highpri_upper_bound: " << txn_priority_highpri_upper_bound + << " txn_priority_regular_lower_bound: " << txn_priority_regular_lower_bound + << " txn_priority_regular_upper_bound: " << txn_priority_regular_upper_bound; if (txn_priority_requirement == kHighestPriority) { return yb::kHighPriTxnUpperBound; } @@ -330,6 +335,8 @@ Status PgTxnManager::CalculateIsolation( if (FLAGS_TEST_ysql_yb_ddl_transaction_block_enabled ? IsDdlModeWithSeparateTransaction() : IsDdlMode()) { VLOG_TXN_STATE(2); + if (!priority_.has_value()) + priority_ = NewPriority(txn_priority_requirement); return Status::OK(); } @@ -366,7 +373,23 @@ Status PgTxnManager::CalculateIsolation( : (pg_isolation_level_ == PgIsolationLevel::READ_COMMITTED ? IsolationLevel::READ_COMMITTED : IsolationLevel::SNAPSHOT_ISOLATION); - const bool defer = read_only_ && deferrable_; + // Users can use the deferrable mode via: + // (1) DEFERRABLE READ ONLY setting in transaction blocks + // (2) SET yb_read_after_commit_visibility = 'deferred'; + // + // The feature doesn't take affect for non-read only serializable isolation txns + // and fast-path transactions because they don't face read restart errors in the first place. + // + // (1) Serializable isolation txns don't face read restart errors because + // they use the latest timestamp for reading. + // (2) Fast-path txns don't face read restart errors because + // they pick a read time after conflict resolution. + // We already skip (2) because CalculateIsolation is not called for fast-path + // (i.e., NON_TRANSACTIONAL). + need_defer_read_point_ = + ((read_only_ && deferrable_) + || yb_read_after_commit_visibility == YB_DEFERRED_READ_AFTER_COMMIT_VISIBILITY) + && docdb_isolation != IsolationLevel::SERIALIZABLE_ISOLATION; VLOG_TXN_STATE(2) << "DocDB isolation level: " << IsolationLevel_Name(docdb_isolation); @@ -384,22 +407,20 @@ Status PgTxnManager::CalculateIsolation( (docdb_isolation == IsolationLevel::SNAPSHOT_ISOLATION || docdb_isolation == IsolationLevel::READ_COMMITTED) && (!FLAGS_TEST_ysql_yb_ddl_transaction_block_enabled || !IsDdlMode())) { - if (defer) { - need_defer_read_point_ = true; - } + // Preserves isolation_level_ as NON_TRANSACTIONAL } else { if (IsDdlMode()) { DCHECK(FLAGS_TEST_ysql_yb_ddl_transaction_block_enabled) << "Unexpected DDL state found in plain transaction"; } - if (!use_saved_priority_) { + if (!use_saved_priority_ && !priority_.has_value()) { priority_ = NewPriority(txn_priority_requirement); } isolation_level_ = docdb_isolation; VLOG_TXN_STATE(2) << "effective isolation level: " << IsolationLevel_Name(docdb_isolation) - << " priority_: " << priority_ + << " priority_: " << (priority_ ? std::to_string(*priority_) : "nullopt") << "; transaction started successfully."; } @@ -526,7 +547,7 @@ Status PgTxnManager::FinishPlainTransaction( void PgTxnManager::ResetTxnAndSession() { txn_in_progress_ = false; isolation_level_ = IsolationLevel::NON_TRANSACTIONAL; - priority_ = 0; + priority_ = std::nullopt; IncTxnSerialNo(); enable_follower_reads_ = false; @@ -536,6 +557,7 @@ void PgTxnManager::ResetTxnAndSession() { snapshot_read_time_is_used_ = false; read_time_manipulation_ = tserver::ReadTimeManipulation::NONE; read_only_stmt_ = false; + need_defer_read_point_ = false; } Status PgTxnManager::SetDdlStateInPlainTransaction() { @@ -657,16 +679,21 @@ Status PgTxnManager::SetupPerformOptions( if (use_saved_priority_) { options->set_use_existing_priority(true); - } else { - options->set_priority(priority_); + } else if (priority_) { + options->set_priority(*priority_); } if (need_restart_) { options->set_restart_transaction(true); need_restart_ = false; } + // Two ways to defer read point: + // 1. SET TRANSACTION READ ONLY DEFERRABLE + // 2. SET yb_read_after_commit_visibility = 'deferred' if (need_defer_read_point_) { options->set_defer_read_point(true); - need_defer_read_point_ = false; + // Setting read point at pg client. Reset other time manipulations. + ensure_read_time = EnsureReadTimeIsSet::kFalse; + read_time_manipulation_ = tserver::ReadTimeManipulation::NONE; } if (!IsDdlModeWithSeparateTransaction()) { // The state in read_time_manipulation_ is only for kPlain transactions. And if YSQL switches to @@ -681,7 +708,7 @@ Status PgTxnManager::SetupPerformOptions( // Do not clamp in the serializable case since // - SERIALIZABLE reads do not pick read time until later. // - SERIALIZABLE reads do not observe read restarts anyways. - if (yb_read_after_commit_visibility + if (!IsDdlMode() && yb_read_after_commit_visibility == YB_RELAXED_READ_AFTER_COMMIT_VISIBILITY && isolation_level_ != IsolationLevel::SERIALIZABLE_ISOLATION) // We clamp uncertainty window when @@ -699,22 +726,26 @@ Status PgTxnManager::SetupPerformOptions( } double PgTxnManager::GetTransactionPriority() const { - if (priority_ <= yb::kRegularTxnUpperBound) { - return ToTxnPriority(priority_, + if (!priority_.has_value()) { + return 0.0; + } + + if (*priority_ <= yb::kRegularTxnUpperBound) { + return ToTxnPriority(*priority_, yb::kRegularTxnLowerBound, yb::kRegularTxnUpperBound); } - return ToTxnPriority(priority_, + return ToTxnPriority(*priority_, yb::kHighPriTxnLowerBound, yb::kHighPriTxnUpperBound); } YbcTxnPriorityRequirement PgTxnManager::GetTransactionPriorityType() const { - if (priority_ <= yb::kRegularTxnUpperBound) { + if (!priority_.has_value() || (*priority_ <= yb::kRegularTxnUpperBound)) { return kLowerPriorityRange; } - if (priority_ < yb::kHighPriTxnUpperBound) { + if (*priority_ < yb::kHighPriTxnUpperBound) { return kHigherPriorityRange; } return kHighestPriority; @@ -809,8 +840,8 @@ Status PgTxnManager::CheckSnapshotTimeConflict() const { SCHECK_EQ( yb_read_time, 0, NotSupported, "Cannot set both 'transaction snapshot' and 'yb_read_time' in the same transaction."); - SCHECK_NE( - yb_read_after_commit_visibility, YB_RELAXED_READ_AFTER_COMMIT_VISIBILITY, NotSupported, + SCHECK_EQ( + yb_read_after_commit_visibility, YB_STRICT_READ_AFTER_COMMIT_VISIBILITY, NotSupported, "Cannot set both 'transaction snapshot' and 'yb_read_after_commit_visibility' in the same " "transaction."); SCHECK(!IsDdlMode(), NotSupported, "Cannot run DDL with exported/imported snapshot."); diff --git a/src/yb/yql/pggate/pg_txn_manager.h b/src/yb/yql/pggate/pg_txn_manager.h index e8ad924f132c..6806f8ceab00 100644 --- a/src/yb/yql/pggate/pg_txn_manager.h +++ b/src/yb/yql/pggate/pg_txn_manager.h @@ -202,7 +202,7 @@ class PgTxnManager : public RefCountedThreadSafe { // On a transaction conflict error we want to recreate the transaction with the same priority as // the last transaction. This avoids the case where the current transaction gets a higher priority // and cancels the other transaction. - uint64_t priority_ = 0; + std::optional priority_; SavePriority use_saved_priority_ = SavePriority::kFalse; int64_t pg_txn_start_us_ = 0; bool snapshot_read_time_is_used_ = false; diff --git a/src/yb/yql/pggate/pggate_flags.cc b/src/yb/yql/pggate/pggate_flags.cc index 6096a7b98fae..f6b54550b86c 100644 --- a/src/yb/yql/pggate/pggate_flags.cc +++ b/src/yb/yql/pggate/pggate_flags.cc @@ -17,8 +17,12 @@ #include "yb/util/flag_validators.h" #include "yb/util/flags.h" +#include "yb/util/size_literals.h" + #include "yb/yql/pggate/pggate_flags.h" +using namespace yb::size_literals; + DEPRECATE_FLAG(int32, pgsql_rpc_keepalive_time_ms, "02_2024"); DEFINE_UNKNOWN_int32(pggate_rpc_timeout_secs, 60, @@ -65,7 +69,13 @@ DEFINE_test_flag(int64, inject_delay_between_prepare_ybctid_execute_batch_ybctid DEFINE_test_flag(bool, index_read_multiple_partitions, false, "Test flag used to simulate tablet spliting by joining tables' partitions."); -DEFINE_NON_RUNTIME_int32(ysql_output_buffer_size, 262144, +#if defined(__APPLE__) +constexpr int32_t kDefaultYsqlOutputBufferSize = 256_KB; +#else +constexpr int32_t kDefaultYsqlOutputBufferSize = 1_MB; +#endif + +DEFINE_NON_RUNTIME_int32(ysql_output_buffer_size, kDefaultYsqlOutputBufferSize, "Size of postgres-level output buffer, in bytes. " "While fetched data resides within this buffer and hasn't been flushed to client yet, " "we're free to transparently restart operation in case of restart read error."); diff --git a/src/yb/yql/pggate/test/pggate_test_select.cc b/src/yb/yql/pggate/test/pggate_test_select.cc index d5236ff18e9c..98aab95446cd 100644 --- a/src/yb/yql/pggate/test/pggate_test_select.cc +++ b/src/yb/yql/pggate/test/pggate_test_select.cc @@ -530,8 +530,7 @@ TEST_F_EX(PggateTestSelect, GetColocatedTableKeyRanges, PggateTestSelectWithYsql ASSERT_OK(cluster_->WaitForAllIntentsApplied(30s * kTimeMultiplier)); for (size_t ts_idx = 0; ts_idx < cluster_->num_tablet_servers(); ++ts_idx) { - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer( - cluster_->tablet_server(ts_idx), {}, tserver::FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(ts_idx, {})); } std::vector> min_max_keys; diff --git a/src/yb/yql/pggate/util/ybc_guc.cc b/src/yb/yql/pggate/util/ybc_guc.cc index 719bc1e6309d..857dbc1bc875 100644 --- a/src/yb/yql/pggate/util/ybc_guc.cc +++ b/src/yb/yql/pggate/util/ybc_guc.cc @@ -124,3 +124,6 @@ bool yb_mixed_mode_expression_pushdown = true; bool yb_debug_log_catcache_events = false; bool yb_mixed_mode_saop_pushdown = false; + +// Internal GUC to help a backend identify that the connection is from the Auto-Analyze service. +bool yb_use_internal_auto_analyze_service_conn = false; diff --git a/src/yb/yql/pggate/util/ybc_guc.h b/src/yb/yql/pggate/util/ybc_guc.h index eb78aecdbe1e..3be59e061a37 100644 --- a/src/yb/yql/pggate/util/ybc_guc.h +++ b/src/yb/yql/pggate/util/ybc_guc.h @@ -237,20 +237,21 @@ extern int yb_explicit_row_locking_batch_size; * Ease transition to YSQL by reducing read restart errors for new apps. * * This option doesn't affect SERIALIZABLE isolation level since - * SERIALIZABLE can't face read restart errors anyway. + * SERIALIZABLE can't face read restart errors anyway. Also, does not affect + * fast path writes. * * See the help text for yb_read_after_commit_visibility GUC for more * information. * * XXX: This GUC is meant as a workaround only by relaxing the - * read-after-commit-visibility guarantee. Ideally, - * (a) Users should fix their apps to handle read restart errors, or - * (b) TODO(#22317): YB should use very accurate clocks to avoid read restart - * errors altogether. + * read-after-commit-visibility guarantee. Ideally, user should + * (a) Fix their apps to handle read restart errors + * (b) Or use accurate clocks provided by time_source=clockbound */ typedef enum { YB_STRICT_READ_AFTER_COMMIT_VISIBILITY = 0, YB_RELAXED_READ_AFTER_COMMIT_VISIBILITY = 1, + YB_DEFERRED_READ_AFTER_COMMIT_VISIBILITY = 2, } YbcReadAfterCommitVisibilityEnum; /* GUC for the enum above. */ @@ -276,6 +277,8 @@ extern bool yb_mixed_mode_expression_pushdown; extern bool yb_mixed_mode_saop_pushdown; +extern bool yb_use_internal_auto_analyze_service_conn; + // Should be in sync with YsqlSamplingAlgorithm protobuf. typedef enum { YB_SAMPLING_ALGORITHM_FULL_TABLE_SCAN = 0, diff --git a/src/yb/yql/pggate/ybc_pggate.cc b/src/yb/yql/pggate/ybc_pggate.cc index a2c846838746..02fb2711b4b6 100644 --- a/src/yb/yql/pggate/ybc_pggate.cc +++ b/src/yb/yql/pggate/ybc_pggate.cc @@ -163,6 +163,16 @@ DEFINE_RUNTIME_PREVIEW_bool( "Enables the support for synchronizing snapshots across transactions, using pg_export_snapshot " "and SET TRANSACTION SNAPSHOT"); +DEFINE_RUNTIME_PG_FLAG( + bool, yb_force_early_ddl_serialization, true, + "If object locking is off (i.e., TEST_enable_object_locking_for_table_locks=false), concurrent " + "DDLs might face a conflict error on the catalog version increment at the end after doing all " + "the work. Setting this flag enables a fail-fast strategy by locking the catalog version at " + "the start of DDLs, causing conflict errors to occur before useful work is done. This flag is " + "only applicable without object locking. If object locking is enabled, it ensures that " + "concurrent DDLs block on each other for serialization. Also, this flag is valid only if " + "ysql_enable_db_catalog_version_mode and yb_enable_invalidation_messages are enabled."); + DECLARE_bool(TEST_ash_debug_aux); DECLARE_bool(TEST_generate_ybrowid_sequentially); DECLARE_bool(TEST_ysql_log_perdb_allocated_new_objectid); @@ -504,7 +514,7 @@ static Result GetYbLsnTypeString( case tserver::PGReplicationSlotLsnType::ReplicationSlotLsnTypePg_HYBRID_TIME: return YBC_LSN_TYPE_HYBRID_TIME; default: - LOG(ERROR) << "Received unexpected LSN type " << yb_lsn_type << " for stream " << stream_id; + LOG(DFATAL) << "Received unexpected LSN type " << yb_lsn_type << " for stream " << stream_id; return STATUS_FORMAT( InternalError, "Received unexpected LSN type $0 for stream $1", yb_lsn_type, stream_id); } @@ -2690,7 +2700,7 @@ void YBCStoreTServerAshSamples( acquire_cb_lock_fn(true /* exclusive */); if (!result.ok()) { // We don't return error status to avoid a restart loop of the ASH collector - LOG(ERROR) << result.status(); + LOG(WARNING) << result.status(); } else { AshCopyTServerSamples(get_cb_slot_fn, result->tserver_wait_states(), sample_time); AshCopyTServerSamples(get_cb_slot_fn, result->cql_wait_states(), sample_time); diff --git a/src/yb/yql/pggate/ysql_bench_metrics_handler/ybc_ysql_bench_metrics_handler.cc b/src/yb/yql/pggate/ysql_bench_metrics_handler/ybc_ysql_bench_metrics_handler.cc index 1f3dd2f0fcd4..a911aa9a18d3 100644 --- a/src/yb/yql/pggate/ysql_bench_metrics_handler/ybc_ysql_bench_metrics_handler.cc +++ b/src/yb/yql/pggate/ysql_bench_metrics_handler/ybc_ysql_bench_metrics_handler.cc @@ -110,7 +110,7 @@ int StartWebserver(WebserverWrapper *webserver_wrapper) { "/prometheus-metrics", "Metrics", PgPrometheusMetricsHandler, false, false); auto status = WithMaskedYsqlSignals([webserver]() { return webserver->Start(); }); if (!status.ok()) { - LOG(ERROR) << "Error starting webserver: " << status.ToString(); + LOG(DFATAL) << "Error starting webserver: " << status.ToString(); return 1; } diff --git a/src/yb/yql/pgwrapper/CMakeLists.txt b/src/yb/yql/pgwrapper/CMakeLists.txt index cb1c45169c34..f301e3ed2b90 100644 --- a/src/yb/yql/pgwrapper/CMakeLists.txt +++ b/src/yb/yql/pgwrapper/CMakeLists.txt @@ -171,6 +171,7 @@ ADD_YB_TEST(pg_tablet_split-test) ADD_YB_TEST(pg_type-test) ADD_YB_TEST(pg_txn-test) ADD_YB_TEST(pg_txn_status-test) +ADD_YB_TEST(pg_vector_index-itest) ADD_YB_TEST(pg_vector_index-test) ADD_YB_TEST(pg_wait_on_conflict-test) ADD_YB_TEST(pg_wrapper-test) diff --git a/src/yb/yql/pgwrapper/pg_ash-test.cc b/src/yb/yql/pgwrapper/pg_ash-test.cc index a54bfe2484fa..9d31a28b2f4e 100644 --- a/src/yb/yql/pgwrapper/pg_ash-test.cc +++ b/src/yb/yql/pgwrapper/pg_ash-test.cc @@ -527,8 +527,7 @@ TEST_F_EX(PgWaitEventAuxTest, YB_DISABLE_TEST_IN_TSAN(TabletSplitRPCs), PgTablet ASSERT_OK(conn_->ExecuteFormat( "INSERT INTO $0 SELECT i, i FROM generate_series(1, 100) AS i", kTableName)); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(cluster_->tablet_server(0), {tablet_id}, - tserver::FlushTabletsRequestPB_Operation::FlushTabletsRequestPB_Operation_FLUSH)); + ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(0, {tablet_id})); // keep running selects until GetTablePartitionList RPC is found ASSERT_OK(WaitFor([this]() -> Result { diff --git a/src/yb/yql/pgwrapper/pg_auto_analyze-test.cc b/src/yb/yql/pgwrapper/pg_auto_analyze-test.cc index 02e364bd34ff..769f46750946 100644 --- a/src/yb/yql/pgwrapper/pg_auto_analyze-test.cc +++ b/src/yb/yql/pgwrapper/pg_auto_analyze-test.cc @@ -36,6 +36,7 @@ #include "yb/util/logging_test_util.h" #include "yb/util/string_case.h" #include "yb/util/test_macros.h" +#include "yb/util/test_thread_holder.h" #include "yb/util/tostring.h" #include "yb/yql/cql/ql/util/statement_result.h" @@ -53,6 +54,8 @@ DECLARE_uint32(ysql_auto_analyze_batch_size); DECLARE_bool(TEST_sort_auto_analyze_target_table_ids); DECLARE_int32(TEST_simulate_analyze_deleted_table_secs); DECLARE_string(vmodule); +DECLARE_int64(TEST_delay_after_table_analyze_ms); +DECLARE_bool(TEST_enable_object_locking_for_table_locks); using namespace std::chrono_literals; @@ -75,8 +78,8 @@ class PgAutoAnalyzeTest : public PgMiniTestBase { ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_enable_table_mutation_counter) = true; // Set low values for the node level mutation reporting and the cluster level persisting - // intervals ensures that the aggregate mutations are frequently applied to the underlying YCQL - // table, hence capping the test time low. + // intervals. This ensures that the aggregate mutations are frequently applied to the underlying + // YCQL table, hence capping the test time low. ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_node_level_mutation_reporting_interval_ms) = 10; ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_cluster_level_mutation_persist_interval_ms) = 10; google::SetVLOGLevel("pg_auto_analyze_service", 2); @@ -985,5 +988,56 @@ TEST_F(PgAutoAnalyzeTest, MutationsCleanupForDeletedAnalyzeTargetTable) { ASSERT_OK(WaitForTableMutationsCleanUp({table_id})); } +TEST_F(PgAutoAnalyzeTest, DDLsInParallelWithAutoAnalyze) { + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_auto_analyze_threshold) = 1; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_auto_analyze_scale_factor) = 0.01; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_delay_after_table_analyze_ms) = 10; + // Explicitly disable object locking. With object locking, concurrent DDLs will be handled + // without relying on catalog version increments. + ANNOTATE_UNPROTECTED_WRITE(FLAGS_TEST_enable_object_locking_for_table_locks) = false; + + auto conn = ASSERT_RESULT(Connect()); + auto db_name = "abc"; + ASSERT_OK(conn.ExecuteFormat("CREATE DATABASE $0", db_name)); + conn = ASSERT_RESULT(ConnectToDB(db_name)); + const std::string table_name = "test_tbl"; + const std::string table2_name = "test_tbl2"; + ASSERT_OK(conn.ExecuteFormat( + "CREATE TABLE $0 (h1 INT, v1 INT DEFAULT 5, PRIMARY KEY(h1))", table_name)); + ASSERT_OK(conn.ExecuteFormat( + "CREATE TABLE $0 (h1 INT, v1 INT DEFAULT 5, PRIMARY KEY(h1))", table2_name)); + + TestThreadHolder thread_holder; + thread_holder.AddThreadFunctor([this, db_name, table_name, &stop = thread_holder.stop_flag()] { + auto conn = ASSERT_RESULT(ConnectToDB(db_name)); + auto num_inserts = 0; + while (!stop.load(std::memory_order_acquire)) { + auto status = conn.ExecuteFormat("INSERT INTO $0 (h1) VALUES ($1)", table_name, num_inserts); + if (status.ToString().find("schema version mismatch") == std::string::npos) { + ASSERT_OK(status); + num_inserts++; + } + } + ASSERT_OK(WaitFor([&conn, table_name, num_inserts]() -> Result { + const std::string format_query = "SELECT reltuples FROM pg_class WHERE relname = '$0'"; + auto res = VERIFY_RESULT(conn.FetchFormat(format_query, table_name)); + auto tuples = VERIFY_RESULT(GetValue(res.get(), 0, 0)); + LOG(INFO) << "Saw " << tuples << " reltuples"; + return num_inserts == tuples; + }, 10s * kTimeMultiplier, + Format("Check expected reltuples vs actual reltuples (%0)", num_inserts))); + }); + + // Perform DDLs on another table to avoid read restart errors. + ASSERT_OK(conn.Execute("SET yb_max_query_layer_retries = 0")); + ASSERT_OK(conn.ExecuteFormat("CREATE INDEX idx ON $0 (v1)", table2_name)); + ASSERT_OK(conn.ExecuteFormat("DROP INDEX idx")); + ASSERT_OK(conn.ExecuteFormat("ALTER TABLE $0 ADD COLUMN v2 INT", table2_name)); + ASSERT_OK(conn.ExecuteFormat("ALTER TABLE $0 DROP COLUMN v2", table2_name)); + + thread_holder.Stop(); + thread_holder.JoinAll(); +} + } // namespace pgwrapper } // namespace yb diff --git a/src/yb/yql/pgwrapper/pg_catalog_version-test.cc b/src/yb/yql/pgwrapper/pg_catalog_version-test.cc index 5789e15c732e..55207a991e2d 100644 --- a/src/yb/yql/pgwrapper/pg_catalog_version-test.cc +++ b/src/yb/yql/pgwrapper/pg_catalog_version-test.cc @@ -14,6 +14,7 @@ #include "yb/gutil/strings/util.h" #include "yb/tserver/tserver_service.proxy.h" #include "yb/tserver/tserver_shared_mem.h" +#include "yb/util/env_util.h" #include "yb/util/path_util.h" #include "yb/util/scope_exit.h" #include "yb/util/string_util.h" @@ -579,6 +580,21 @@ class PgCatalogVersionTest : public LibPqTestBase { ASSERT_EQ(count, 2); } + // This function is extracted and adapted from ysql_upgrade.cc. + std::string ReadMigrationFile(const string& migration_file) { + const char* kStaticDataParentDir = "share"; + const char* kMigrationsDir = "ysql_migrations"; + const std::string search_for_dir = JoinPathSegments(kStaticDataParentDir, kMigrationsDir); + const std::string root_dir = env_util::GetRootDir(search_for_dir); + CHECK(!root_dir.empty()); + const std::string migrations_dir = + JoinPathSegments(root_dir, kStaticDataParentDir, kMigrationsDir); + faststring migration_content; + CHECK_OK(ReadFileToString(Env::Default(), + JoinPathSegments(migrations_dir, migration_file), + &migration_content)); + return migration_content.ToString(); + } }; TEST_F(PgCatalogVersionTest, DBCatalogVersion) { @@ -912,6 +928,11 @@ TEST_F(PgCatalogVersionTest, FixCatalogVersionTable) { ASSERT_TRUE(ASSERT_RESULT( VerifyCatalogVersionTableDbOids(&conn_yugabyte, true /* single_row */))); + // Do not force early serialization for DDLs since the pg_yb_catalog_version table is in global + // catalog version mode and early serialization requires taking a lock on the per-db catalog + // version row. + ASSERT_OK(conn_yugabyte.Execute("SET yb_force_early_ddl_serialization=false")); + // At this time, an existing connection is still in per-db catalog version mode // but the table pg_yb_catalog_version has only one row for template1 and is out // of sync with pg_database. Note that once a connection is in per-db catalog @@ -2626,5 +2647,277 @@ SET yb_non_ddl_txn_for_sys_tables_allowed=1; ASSERT_EQ(expected, result); } +// Test YSQL upgrade where we can directly write to catalog tables using DML +// statements under the GUC yb_non_ddl_txn_for_sys_tables_allowed=1. These +// DML statements do generate invalidation messages. We make the COMMIT statement +// in a YSQL migrate script to be a DDL so that we can capture the messages +// generated by these DML statements. +TEST_F(PgCatalogVersionTest, InvalMessageYsqlUpgradeCommit1) { + RestartClusterWithInvalMessageEnabled(); + auto conn_yugabyte = ASSERT_RESULT(Connect()); + ASSERT_OK(conn_yugabyte.Execute("SET log_min_messages = DEBUG1")); + // Use snapshot isolation mode during YSQL upgrade. This is needed as a simple work + // around so that we do not start subtransactions during YSQL upgrade. Otherwise the + // COMMIT will only capture the invalidation messages generated by the last DML statement + // preceding the COMMIT statement. + ASSERT_OK(conn_yugabyte.Execute("SET DEFAULT_TRANSACTION_ISOLATION TO \"REPEATABLE READ\"")); + auto v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 1); + string migrate_sql = "SET yb_non_ddl_txn_for_sys_tables_allowed=1;\n"; + // We directly make an update to pg_class that will generate 1 invalidation message. + // Write for a random number of times, and verify we have captured the same number + // of messages by the COMMIT statement. + const auto inval_message_count = RandomUniformInt(1, 100); + LOG(INFO) << "inval_message_count: " << inval_message_count; + for (int i = 0; i < inval_message_count; ++i) { + // The nested BEGIN; does not have any effect other than causing a warning messages + // WARNING: there is already a transaction in progress + // However if we allow YSQL upgrade to run in read committed isolation, then + // each statement will start a subtransaction which prevents the final COMMIT + // statement to catpure all the invalidation messages. For now we disallow YSQL + // upgrade to run in read committed isolation to avoid that. + migrate_sql += "BEGIN;\nUPDATE pg_class SET relam = 2 WHERE oid = 8010;\n"; + } + migrate_sql += "COMMIT;\n"; + ASSERT_OK(conn_yugabyte.Execute("SET ysql_upgrade_mode TO true")); + ASSERT_OK(conn_yugabyte.Execute(migrate_sql)); + // The migrate sql is run under YSQL upgrade mode. Therefore the COMMIT is + // considered as a DDL and causes catalog version to increment. + v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 2); + const auto count = ASSERT_RESULT(conn_yugabyte.FetchRow( + "SELECT COUNT(*) FROM pg_yb_invalidation_messages")); + ASSERT_EQ(count, 1); + auto query = "SELECT encode(messages, 'hex') FROM pg_yb_invalidation_messages " + "WHERE current_version=$0"s; + auto result2 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 2))); + // Each invalidation messages is 24 bytes, in hex is 48 bytes. + ASSERT_EQ(result2.size(), inval_message_count * 48U); + // Make sure we only have simple usage of COMMIT in a migration script. PG allows + // COMMIT inside a an anonymous code block, in YSQL upgrade we do not allow. + migrate_sql = + R"#( +DO $$ +BEGIN + UPDATE pg_class SET relam = 2 WHERE oid = 8010; + COMMIT; +END$$; + )#"; + auto status = conn_yugabyte.Execute(migrate_sql); + ASSERT_TRUE(status.IsNetworkError()) << status; + ASSERT_STR_CONTAINS(status.ToString(), "invalid transaction termination"); + ASSERT_OK(conn_yugabyte.Execute("ROLLBACK")); + // PG also allows COMMIT inside a procedure that is invoked via CALL statement. + // In YSQL upgrade we do not allow. + migrate_sql = + R"#( +CREATE OR REPLACE PROCEDURE myproc() AS +$$ +BEGIN + UPDATE pg_class SET relam = 2 WHERE oid = 8010; + COMMIT; +END $$ LANGUAGE 'plpgsql'; +CALL myproc(); + )#"; + status = conn_yugabyte.Execute(migrate_sql); + ASSERT_TRUE(status.IsNetworkError()) << status; + ASSERT_STR_CONTAINS(status.ToString(), "invalid transaction termination"); +} + +TEST_F(PgCatalogVersionTest, InvalMessageYsqlUpgradeCommit2) { + RestartClusterWithInvalMessageEnabled(); + // Prepare the test setup by reverting + // V75__26335__pg_set_relation_stats.sql + auto conn_yugabyte = ASSERT_RESULT(Connect()); + ASSERT_OK(conn_yugabyte.Execute("SET log_min_messages = DEBUG1")); + ASSERT_OK(conn_yugabyte.Execute("SET DEFAULT_TRANSACTION_ISOLATION TO \"REPEATABLE READ\"")); + auto v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 1); + const string setup_sql = + R"#( +BEGIN; +SET LOCAL yb_non_ddl_txn_for_sys_tables_allowed TO true; +DELETE FROM pg_catalog.pg_proc WHERE oid in (8091, 8092, 8093, 8094); +DELETE FROM pg_catalog.pg_description WHERE objoid in (8091, 8092, 8093, 8094) + AND classoid = 1255 AND objsubid = 0; +COMMIT; + )#"; + ASSERT_OK(conn_yugabyte.Execute(setup_sql)); + // The setup sql is not run under YSQL upgrade mode. Therefore its COMMIT is + // considered as a DML and does not cause catalog version to increment. + v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 1); + + // Now run the migrate sql under YSQL upgrade mode: + // V75__26335__pg_set_relation_stats.sql + const string migrate_sql = + ReadMigrationFile("V75__26335__pg_set_relation_stats.sql"); + ASSERT_OK(conn_yugabyte.Execute("SET ysql_upgrade_mode TO true")); + ASSERT_OK(conn_yugabyte.Execute(migrate_sql)); + // The migrate sql is run under YSQL upgrade mode. Therefore each COMMIT is + // considered as a DDL and causes catalog version to increment. + v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 5); + const auto count = ASSERT_RESULT(conn_yugabyte.FetchRow( + "SELECT COUNT(*) FROM pg_yb_invalidation_messages")); + ASSERT_EQ(count, 4); + auto query = "SELECT encode(messages, 'hex') FROM pg_yb_invalidation_messages " + "WHERE current_version=$0"s; + + // version 2 messages. + auto result2 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 2))); + ASSERT_EQ(result2.size(), 144U); + + // version 3 messages. + auto result3 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 3))); + ASSERT_EQ(result3.size(), 144U); + + // version 4 messages. + auto result4 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 4))); + ASSERT_EQ(result4.size(), 144U); + + // version 5 messages. + auto result5 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 5))); + ASSERT_EQ(result5.size(), 144U); +} + +TEST_F(PgCatalogVersionTest, InvalMessageYsqlUpgradeCommit3) { + RestartClusterWithInvalMessageEnabled(); + auto conn_yugabyte = ASSERT_RESULT(Connect()); + ASSERT_OK(conn_yugabyte.Execute("SET log_min_messages = DEBUG1")); + ASSERT_OK(conn_yugabyte.Execute("SET DEFAULT_TRANSACTION_ISOLATION TO \"REPEATABLE READ\"")); + auto v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 1); + + // Now run the migrate sql under YSQL upgrade mode. + // V77__26590__query_id_yb_terminated_queries_view.sql + const string migrate_sql = + ReadMigrationFile("V77__26590__query_id_yb_terminated_queries_view.sql"); + ASSERT_OK(conn_yugabyte.Execute("SET ysql_upgrade_mode TO true")); + ASSERT_OK(conn_yugabyte.Execute(migrate_sql)); + // The migrate sql is run under YSQL upgrade mode. Therefore its COMMIT is + // considered as a DDL. There are two COMMIT statements. The first COMMIT + // has got invalidation messages so it causes catalog version to increment + // from 1 to 2. Then the DROP VIEW statement causes catalog version to + // increment from 2 to 3, the next CREATE OR REPLACE VIEW statement causes + // catalog version to increment from 3 to 4. The last COMMIT statement got + // 1 invalidation messages because even though there is no catalog table + // writes between the CREATE OR REPLACE VIEW and the last COMMIT, the call + // to increment catalog version does generate one message that is not + // captured by the call itself. Therefore the last COMMIT still causes + // catalog version to increment. + v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 5); + const auto count = ASSERT_RESULT(conn_yugabyte.FetchRow( + "SELECT COUNT(*) FROM pg_yb_invalidation_messages")); + ASSERT_EQ(count, 4); + auto query = "SELECT encode(messages, 'hex') FROM pg_yb_invalidation_messages " + "WHERE current_version=$0"s; + + // version 2 messages. + auto result2 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 2))); + ASSERT_EQ(result2.size(), 144U); + + // version 3 messages. + auto result3 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 3))); + ASSERT_EQ(result3.size(), 1248U); + + // version 4 messages. + auto result4 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 4))); + ASSERT_EQ(result4.size(), 1344U); + + // version 5 messages. + auto result5 = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(Format(query, 5))); + ASSERT_EQ(result5.size(), 48U); +} + +TEST_F(PgCatalogVersionTest, InvalMessageYsqlUpgradeCommit4) { + RestartClusterWithInvalMessageEnabled(); + // Prepare the test setup by reverting + // V78__26645__yb_binary_upgrade_set_next_pg_enum_sortorder.sql + auto conn_yugabyte = ASSERT_RESULT(Connect()); + ASSERT_OK(conn_yugabyte.Execute("SET log_min_messages = DEBUG1")); + ASSERT_OK(conn_yugabyte.Execute("SET DEFAULT_TRANSACTION_ISOLATION TO \"REPEATABLE READ\"")); + auto v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 1); + const string setup_sql = + R"#( +BEGIN; +SET LOCAL yb_non_ddl_txn_for_sys_tables_allowed TO true; +DELETE FROM pg_catalog.pg_proc WHERE oid = 8095; +DELETE FROM pg_catalog.pg_description WHERE objoid = 8095 AND classoid = 1255 AND objsubid = 0; +COMMIT; + )#"; + ASSERT_OK(conn_yugabyte.Execute(setup_sql)); + // The setup sql is not run under YSQL upgrade mode. Therefore its COMMIT is + // considered as a DML and does not cause catalog version to increment. + v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 1); + + // Now run the migrate sql under YSQL upgrade mode: + // V78__26645__yb_binary_upgrade_set_next_pg_enum_sortorder.sql + const string migrate_sql = + ReadMigrationFile("V78__26645__yb_binary_upgrade_set_next_pg_enum_sortorder.sql"); + ASSERT_OK(conn_yugabyte.Execute("SET ysql_upgrade_mode TO true")); + ASSERT_OK(conn_yugabyte.Execute(migrate_sql)); + // The migrate sql is run under YSQL upgrade mode. Therefore its COMMIT is + // considered as a DDL and causes catalog version to increment. + v = ASSERT_RESULT(GetCatalogVersion(&conn_yugabyte)); + ASSERT_EQ(v, 2); + auto query = "SELECT encode(messages, 'hex') FROM pg_yb_invalidation_messages"s; + auto result = ASSERT_RESULT(conn_yugabyte.FetchAllAsString(query)); + // The migrate script has generated 3 messages: + // 1 SharedInvalCatcacheMsg for PROCNAMEARGSNSP + // 1 SharedInvalCatcacheMsg for PROCOID + // 1 SharedInvalSnapshotMsg for pg_description + // each messages is 24 raw bytes and 48 bytes in 'hex' (48 * 3 = 144). + ASSERT_EQ(result.size(), 144U); +} + +// https://github.com/yugabyte/yugabyte-db/issues/27170 +TEST_F(PgCatalogVersionTest, InvalMessageDuplicateVersion) { + RestartClusterWithInvalMessageEnabled( + { "--check_lagging_catalog_versions_interval_secs=1" }); + // Make two connections on two different nodes. + pg_ts = cluster_->tablet_server(0); + auto conn1 = ASSERT_RESULT(Connect()); + pg_ts = cluster_->tablet_server(1); + auto conn2 = ASSERT_RESULT(Connect()); + // Let two concurrent DDLs operate on two tables to avoid any concurrent DDL related + // errors to interfere and prevent the case that we are trying to contrive. + ASSERT_OK(conn1.Execute("CREATE TABLE foo(id INT)")); + ASSERT_OK(conn1.Execute("CREATE TABLE bar(id INT)")); + ASSERT_OK(conn1.Execute("SET yb_test_delay_set_local_tserver_inval_message_ms = 3000")); + TestThreadHolder thread_holder; + auto ddl1 = "ALTER TABLE foo ADD COLUMN val TEXT"s; + auto ddl2 = "ALTER TABLE bar ADD COLUMN val TEXT"s; + thread_holder.AddThreadFunctor([&conn2, &ddl2] { + // Delay 1s so that conn1's ddl1 is executed first. + SleepFor(1s); + // Statement ddl2 leads to version 3. + ASSERT_OK(conn2.Execute(ddl2)); + }); + + // Execute ddl1 on conn1 that increments the catalog version. The 3-second delay caused by + // SET yb_test_delay_set_local_tserver_inval_message_ms = 3000 will be long enough for ddl2 + // on conn2 to complete, and heartbeat should happen to propagate the new version of ddl1 + // and the new version of ddl2 to the local tserver. + // Statement ddl1 leads to version 2. + ASSERT_OK(conn1.Execute(ddl1)); + + // This wait is needed to reproduce the bug 27170 so that we don't jump to the next + // query right away which will trigger calling TabletServer::GetTserverCatalogMessageLists + // that also detects the duplication of version 2, causing tserver to FATAL differently + // from what we expect to see as in GHI 27170. + SleepFor(5s); + + // In pg_yb_invalidation_messages we should see two rows for DB yugabyte: version 2 and + // version 3 because version 2 has not expired yet when version 3 was inserted. + const auto count = ASSERT_RESULT(conn2.FetchRow( + "SELECT COUNT(*) FROM pg_yb_invalidation_messages")); + ASSERT_EQ(count, 2); + thread_holder.Stop(); +} + } // namespace pgwrapper } // namespace yb diff --git a/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc b/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc index 60bcc92be3f5..bf1b3f4bfb59 100644 --- a/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc +++ b/src/yb/yql/pgwrapper/pg_get_lock_status-test.cc @@ -38,6 +38,9 @@ DECLARE_bool(enable_wait_queues); DECLARE_bool(TEST_skip_returning_old_transactions); DECLARE_uint64(force_single_shard_waiter_retry_ms); DECLARE_int32(tserver_unresponsive_timeout_ms); +DECLARE_double(leader_failure_max_missed_heartbeat_periods); +DECLARE_int32(raft_heartbeat_interval_ms); +DECLARE_int32(leader_lease_duration_ms); using namespace std::literals; using std::string; @@ -47,6 +50,9 @@ namespace pgwrapper { YB_STRONGLY_TYPED_BOOL(RequestSpecifiedTxnIds); +constexpr auto kPgLocksDistTxnsQuery = + "SELECT COUNT(DISTINCT(ybdetails->>'transactionid')) FROM pg_locks"; + struct TestTxnLockInfo { TestTxnLockInfo() {} explicit TestTxnLockInfo(int num_locks) : num_locks(num_locks) {} @@ -240,8 +246,7 @@ TEST_F(PgGetLockStatusTest, TestLocksFromWaitQueue) { // Assert that locks corresponding to the waiter txn as well are returned in pg_locks; SleepFor(MonoDelta::FromSeconds(2 * kTimeMultiplier)); - auto num_txns = ASSERT_RESULT(session.conn->FetchRow( - "SELECT COUNT(DISTINCT(ybdetails->>'transactionid')) FROM pg_locks")); + auto num_txns = ASSERT_RESULT(session.conn->FetchRow(kPgLocksDistTxnsQuery)); ASSERT_EQ(num_txns, 2); ASSERT_OK(session.conn->Execute("COMMIT")); @@ -887,8 +892,7 @@ TEST_F(PgGetLockStatusTest, TestLockStatusRespHasHostNodeSet) { constexpr int kMinTxnAgeMs = 1; // All distributed txns returned as part of pg_locks should have the host node uuid set. const auto kPgLocksQuery = - Format("SELECT COUNT(DISTINCT(ybdetails->>'transactionid')) FROM pg_locks " - "WHERE NOT fastpath AND ybdetails->>'node' IS NULL"); + Format("$0 WHERE NOT fastpath AND ybdetails->>'node' IS NULL", kPgLocksDistTxnsQuery); const auto table = "foo"; const auto key = "1"; @@ -1148,8 +1152,6 @@ TEST_F(PgGetLockStatusTest, TestPgLocksOutputAfterNodeOperations) { // tombstoned amidst the two passes. #ifndef NDEBUG TEST_F(PgGetLockStatusTest, FetchLocksAmidstTransactionCommit) { - const auto kPgLocksQuery = "SELECT COUNT(DISTINCT(ybdetails->>'transactionid')) FROM pg_locks"; - auto setup_conn = ASSERT_RESULT(Connect()); ASSERT_OK(setup_conn.Execute("CREATE TABLE foo(k INT PRIMARY KEY, v INT) SPLIT INTO 1 TABLETS")); ASSERT_OK(setup_conn.Execute("INSERT INTO foo SELECT generate_series(1, 10), 0")); @@ -1171,7 +1173,7 @@ TEST_F(PgGetLockStatusTest, FetchLocksAmidstTransactionCommit) { auto result_future = std::async(std::launch::async, [&]() -> Result { auto conn = VERIFY_RESULT(Connect()); - return conn.FetchRow(kPgLocksQuery); + return conn.FetchRow(kPgLocksDistTxnsQuery); }); // Wait for the lock status request to scan the transaction reverse index section and store @@ -1187,5 +1189,47 @@ TEST_F(PgGetLockStatusTest, FetchLocksAmidstTransactionCommit) { } #endif // NDEBUG +class PgGetLockStatusTestFastElection : public PgGetLockStatusTestRF3 { + protected: + void SetUp() override { + ANNOTATE_UNPROTECTED_WRITE(FLAGS_leader_failure_max_missed_heartbeat_periods) = 4; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_raft_heartbeat_interval_ms) = 100; + ANNOTATE_UNPROTECTED_WRITE(FLAGS_leader_lease_duration_ms) = 400; + PgGetLockStatusTestRF3::SetUp(); + } +}; + +TEST_F_EX( + PgGetLockStatusTestRF3, TestPgLocksAfterTserverShutdown, PgGetLockStatusTestFastElection) { + const auto kTable = "foo"; + auto setup_conn = ASSERT_RESULT(Connect()); + ASSERT_OK(setup_conn.ExecuteFormat("CREATE TABLE $0(k INT, v INT) SPLIT INTO 1 TABLETS", kTable)); + ASSERT_OK(setup_conn.ExecuteFormat("INSERT INTO $0 SELECT generate_series(1, 10), 0", kTable)); + + auto conn = ASSERT_RESULT(Connect()); + ASSERT_OK(conn.StartTransaction(IsolationLevel::READ_COMMITTED)); + ASSERT_OK(conn.ExecuteFormat("UPDATE $0 SET v=v+1 WHERE k=1", kTable)); + SleepFor(FLAGS_heartbeat_interval_ms * 2ms * kTimeMultiplier); + ASSERT_EQ(ASSERT_RESULT(setup_conn.FetchRow(kPgLocksDistTxnsQuery)), 1); + + const auto table_id = ASSERT_RESULT(GetTableIDFromTableName(kTable)); + auto leader_peers = ListTableActiveTabletLeadersPeers(cluster_.get(), table_id); + ASSERT_EQ(leader_peers.size(), 1); + auto& leader_peer = leader_peers[0]; + + auto* leader_ts = cluster_->find_tablet_server(leader_peer->permanent_uuid()); + if (leader_ts == cluster_->mini_tablet_server(kPgTsIndex)) { + leader_ts = cluster_->mini_tablet_server((kPgTsIndex + 1) % cluster_->num_tablet_servers()); + ASSERT_OK(StepDown(leader_peer, leader_ts->server()->permanent_uuid(), ForceStepDown::kTrue)); + } + ASSERT_NE(leader_ts, cluster_->mini_tablet_server(kPgTsIndex)); + leader_ts->Shutdown(); + ASSERT_OK(WaitForTableLeaders( + cluster_.get(), ASSERT_RESULT(GetTableIDFromTableName("transactions")), + 5s * kTimeMultiplier)); + ASSERT_OK(WaitForTableLeaders(cluster_.get(), table_id, 5s * kTimeMultiplier)); + ASSERT_EQ(ASSERT_RESULT(setup_conn.FetchRow(kPgLocksDistTxnsQuery)), 1); +} + } // namespace pgwrapper } // namespace yb diff --git a/src/yb/yql/pgwrapper/pg_heap_snapshot-test.cc b/src/yb/yql/pgwrapper/pg_heap_snapshot-test.cc index dfddcc16c263..1dcd177f15f9 100644 --- a/src/yb/yql/pgwrapper/pg_heap_snapshot-test.cc +++ b/src/yb/yql/pgwrapper/pg_heap_snapshot-test.cc @@ -31,6 +31,16 @@ using namespace std::chrono_literals; namespace yb::pgwrapper { class PgHeapSnapshotTest : public PgMiniTestBase { + public: + void SetUp() override { + if (CURRENT_TEST_CASE_AND_TEST_NAME_STR() == "PgHeapSnapshotTest.TestYsqlHeapSnapshotSimple") { + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_yb_enable_invalidation_messages) = false; + } + LOG(INFO) << "FLAGS_ysql_yb_enable_invalidation_messages: " + << FLAGS_ysql_yb_enable_invalidation_messages; + PgMiniTestBase::SetUp(); + } + protected: auto PgConnect(const std::string& username) { auto settings = MakeConnSettings(); @@ -40,7 +50,6 @@ class PgHeapSnapshotTest : public PgMiniTestBase { }; TEST_F(PgHeapSnapshotTest, YB_DISABLE_TEST_IN_SANITIZERS(TestYsqlHeapSnapshotSimple)) { - ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_yb_enable_invalidation_messages) = false; auto conn1 = ASSERT_RESULT(Connect()); auto conn2 = ASSERT_RESULT(Connect()); diff --git a/src/yb/yql/pgwrapper/pg_hint_table-test.cc b/src/yb/yql/pgwrapper/pg_hint_table-test.cc index d339fb8649f4..739b97ce7870 100644 --- a/src/yb/yql/pgwrapper/pg_hint_table-test.cc +++ b/src/yb/yql/pgwrapper/pg_hint_table-test.cc @@ -39,9 +39,9 @@ class PgHintTableTest : public LibPqTestBase { } static Result ExecuteExplainAndGetJoinType( - PGConn& conn, + PGConn *conn, const std::string& explain_query) { - auto explain_str = VERIFY_RESULT(conn.FetchRow(explain_query)); + auto explain_str = VERIFY_RESULT(conn->FetchRow(explain_query)); return GetJoinType(explain_str); } @@ -62,6 +62,45 @@ class PgHintTableTest : public LibPqTestBase { RETURN_NOT_OK(conn.Execute("SET pg_hint_plan.yb_use_query_id_for_hinting TO on")); return conn; } + + Result> InsertHintsAndRunQueries() { + // ------------------------------------------------------------------------------------------ + // 1. Setup connections + // ------------------------------------------------------------------------------------------ + auto conn_query = VERIFY_RESULT(ConnectWithHintTable()); + auto conn_hint = VERIFY_RESULT(ConnectWithHintTable()); + + // ------------------------------------------------------------------------------------------ + // 2. Insert hints and run queries to force hint table lookups + // ------------------------------------------------------------------------------------------ + const int num_queries = 100; + for (int i = 0; i < num_queries; i++) { + std::string whitespace(i * 1000, ' '); + auto hint_value = Format("YbBatchedNL(pg_class $0 pg_attribute)", whitespace); + + RETURN_NOT_OK(conn_hint.ExecuteFormat( + "INSERT INTO hint_plan.hints (norm_query_string, application_name, hints) " + "VALUES ('$0', '', '$1')", + query_id + i, hint_value)); + + // Execute the query to force hint cache lookups and refreshes + auto join_type = VERIFY_RESULT( + ExecuteExplainAndGetJoinType(&conn_query, "EXPLAIN (ANALYZE, FORMAT JSON) " + query)); + SCHECK_EQ(std::string("YB Batched Nested Loop"), join_type, IllegalState, + Format("Unexpected join type: %s", join_type)); + } + LOG(INFO) << "Completed " << num_queries << " queries"; + return std::make_pair(std::move(conn_query), std::move(conn_hint)); + } +}; + +class PgHintTableTestWithoutHintCache : public PgHintTableTest { + public: + void UpdateMiniClusterOptions(ExternalMiniClusterOptions* options) override { + PgHintTableTest::UpdateMiniClusterOptions(options); + options->extra_tserver_flags.push_back( + "--ysql_pg_conf_csv=pg_hint_plan.yb_enable_hint_table_cache=off"); + } }; const std::string PgHintTableTest::query = @@ -108,7 +147,7 @@ TEST_F(PgHintTableTest, ForceBatchedNestedLoop) { // 4. Re-run the query on the first connection and verify it uses a Batched Nested Loop now // ---------------------------------------------------------------------------------------------- ASSERT_STR_EQ("YB Batched Nested Loop", - ASSERT_RESULT(ExecuteExplainAndGetJoinType(conn1, explain_query))); + ASSERT_RESULT(ExecuteExplainAndGetJoinType(&conn1, explain_query))); // ---------------------------------------------------------------------------------------------- // 5. Delete the hint from the hint table @@ -122,7 +161,7 @@ TEST_F(PgHintTableTest, ForceBatchedNestedLoop) { // ---------------------------------------------------------------------------------------------- // 6. Re-run the query on the first connection and verify it's back to the original plan // ---------------------------------------------------------------------------------------------- - ASSERT_STR_EQ("Hash Join", ASSERT_RESULT(ExecuteExplainAndGetJoinType(conn1, explain_query))); + ASSERT_STR_EQ("Hash Join", ASSERT_RESULT(ExecuteExplainAndGetJoinType(&conn1, explain_query))); } TEST_F(PgHintTableTest, SimpleConcurrencyTest) { @@ -171,7 +210,7 @@ TEST_F(PgHintTableTest, SimpleConcurrencyTest) { &iterations]() { while (!stop_threads) { std::string join_type = ASSERT_RESULT( - ExecuteExplainAndGetJoinType(conn_explain, explain_query)); + ExecuteExplainAndGetJoinType(&conn_explain, explain_query)); LOG(INFO) << "Observed join type: " << join_type; { @@ -212,6 +251,21 @@ TEST_F(PgHintTableTest, SimpleConcurrencyTest) { } } +void FailIfNotConcurrentDDLErrors(const Status& status) { + // Expect a catalog version mismatch error during concurrent operations + if (!status.ok()) { + std::string error_msg = status.ToString(); + if (error_msg.find("pgsql error 40001") != std::string::npos || + error_msg.find("Catalog Version Mismatch") != std::string::npos || + error_msg.find("Restart read required") != std::string::npos || + error_msg.find("schema version mismatch") != std::string::npos) { + LOG(INFO) << "Expected error: " << error_msg; + } else { + FAIL() << "Unexpected error: " << error_msg; + } + } +} + // Test that hints work correctly when ANALYZE is running concurrently TEST_F(PgHintTableTest, HintWithConcurrentAnalyze) { // ---------------------------------------------------------------------------------------------- @@ -224,42 +278,35 @@ TEST_F(PgHintTableTest, HintWithConcurrentAnalyze) { // Thread to run ANALYZE auto conn_analyze = ASSERT_RESULT(ConnectWithHintTable()); + // Thread to insert the hint + auto conn_hint = ASSERT_RESULT(ConnectWithHintTable()); + threads.AddThreadFunctor([&stop_threads, &conn_analyze]() { LOG(INFO) << "Starting ANALYZE thread"; while (!stop_threads.load()) { auto status = conn_analyze.Execute("ANALYZE VERBOSE"); - // Expect a catalog version mismatch error during concurrent operations - if (!status.ok()) { - std::string error_msg = status.ToString(); - if (error_msg.find("pgsql error 40001") != std::string::npos || - error_msg.find("Catalog Version Mismatch") != std::string::npos || - error_msg.find("Restart read required") != std::string::npos || - error_msg.find("schema version mismatch") != std::string::npos) { - // These errors are expected during concurrent operations - LOG(INFO) << "Expected error during ANALYZE: " << error_msg; - } else { - FAIL() << "Unexpected error during ANALYZE: " << error_msg; - } - } + FailIfNotConcurrentDDLErrors(status); } LOG(INFO) << "ANALYZE completed"; }); - // Thread to insert the hint - auto conn_hint = ASSERT_RESULT(ConnectWithHintTable()); threads.AddThreadFunctor([&stop_threads, &hint_num, &conn_hint]() { LOG(INFO) << "Starting hint insertion thread"; while (!stop_threads.load()) { - ASSERT_OK(conn_hint.ExecuteFormat( + auto status = conn_hint.ExecuteFormat( "INSERT INTO hint_plan.hints (norm_query_string, application_name, hints) " "VALUES ('$0', '', 'MergeJoin(pg_class pg_attribute)') " "ON CONFLICT (norm_query_string, application_name) " "DO UPDATE SET hints = 'MergeJoin(pg_class pg_attribute)'", - hint_num++)); + hint_num); + FailIfNotConcurrentDDLErrors(status); + if (status.ok()) { + hint_num++; + } } LOG(INFO) << "Successfully inserted " << hint_num << " hints"; }); @@ -328,7 +375,7 @@ TEST_F(PgHintTableTest, PreparedStatementHintCacheRefresh) { int64_t custom_plan_refreshes = 0; for (int i = 0; i < 6; i++) { auto join_type = ASSERT_RESULT(ExecuteExplainAndGetJoinType( - conn_pstmt, Format("EXPLAIN (ANALYZE, FORMAT JSON) EXECUTE test_stmt($0)", i))); + &conn_pstmt, Format("EXPLAIN (ANALYZE, FORMAT JSON) EXECUTE test_stmt($0)", i))); ASSERT_STR_EQ("YB Batched Nested Loop", join_type); // Verify that we got 5 misses and no hits auto metrics_custom_plan = GetPrometheusMetrics(); @@ -352,7 +399,7 @@ TEST_F(PgHintTableTest, PreparedStatementHintCacheRefresh) { int64_t generic_plan_refreshes = 0; for (int i = 7; i < 12; i++) { auto join_type = ASSERT_RESULT(ExecuteExplainAndGetJoinType( - conn_pstmt, Format("EXPLAIN (ANALYZE, FORMAT JSON) EXECUTE test_stmt($0)", i))); + &conn_pstmt, Format("EXPLAIN (ANALYZE, FORMAT JSON) EXECUTE test_stmt($0)", i))); ASSERT_STR_EQ("YB Batched Nested Loop", join_type); // Verify that we got no additional hits/misses @@ -400,7 +447,7 @@ TEST_F(PgHintTableTest, PreparedStatementHintCacheRefresh) { // used // ---------------------------------------------------------------------------------------------- auto join_type = ASSERT_RESULT(ExecuteExplainAndGetJoinType( - conn_pstmt, "EXPLAIN (ANALYZE, FORMAT JSON) EXECUTE test_stmt(10)")); + &conn_pstmt, "EXPLAIN (ANALYZE, FORMAT JSON) EXECUTE test_stmt(10)")); ASSERT_STR_EQ("Merge Join", join_type); auto new_hint_metrics = GetPrometheusMetrics(); @@ -427,7 +474,7 @@ TEST_F(PgHintTableTest, InvalidHint) { // Execute the prepared statement once to establish a baseline auto initial_join_type = ASSERT_RESULT(ExecuteExplainAndGetJoinType( - conn_explain, "EXPLAIN (ANALYZE, FORMAT JSON) " + query)); + &conn_explain, "EXPLAIN (ANALYZE, FORMAT JSON) " + query)); LOG(INFO) << "Initial join type: " << initial_join_type; // ---------------------------------------------------------------------------------------------- @@ -450,7 +497,7 @@ TEST_F(PgHintTableTest, InvalidHint) { // ---------------------------------------------------------------------------------------------- for (int i = 0; i < 10; i++) { auto join_type_after_invalid = ASSERT_RESULT(ExecuteExplainAndGetJoinType( - conn_explain, "EXPLAIN (ANALYZE, FORMAT JSON) " + query)); + &conn_explain, "EXPLAIN (ANALYZE, FORMAT JSON) " + query)); // The join type should remain the same as the initial one // since the invalid hint should be ignored ASSERT_STR_EQ(initial_join_type, join_type_after_invalid); @@ -463,36 +510,12 @@ TEST_F(PgHintTableTest, InvalidHint) { */ TEST_F(PgHintTableTest, HintCacheMemoryLeakTest) { // ---------------------------------------------------------------------------------------------- - // 1. Setup connections + // 1. Setup connections, add hints and check that the hints are being followed // ---------------------------------------------------------------------------------------------- - auto conn_query = ASSERT_RESULT(ConnectWithHintTable()); - auto conn_hint = ASSERT_RESULT(ConnectWithHintTable()); - - ASSERT_OK(conn_hint.Execute("SET yb_tcmalloc_sample_period = 1")); - ASSERT_OK(conn_query.Execute("SET yb_tcmalloc_sample_period = 1")); + auto [conn_query, conn_hint] = ASSERT_RESULT(InsertHintsAndRunQueries()); // ---------------------------------------------------------------------------------------------- - // 2. Insert hints and run queries to force hint cache lookups & refreshes - // ---------------------------------------------------------------------------------------------- - const int num_queries = 100; - for (int i = 0; i < num_queries; i++) { - std::string whitespace(i * 1000, ' '); - auto hint_value = Format("YbBatchedNL(pg_class $0 pg_attribute)", whitespace); - - ASSERT_OK(conn_hint.ExecuteFormat( - "INSERT INTO hint_plan.hints (norm_query_string, application_name, hints) " - "VALUES ('$0', '', '$1')", - query_id + i, hint_value)); - - // Execute the query to force hint cache lookups and refreshes - auto join_type = ASSERT_RESULT( - ExecuteExplainAndGetJoinType(conn_query, "EXPLAIN (ANALYZE, FORMAT JSON) " + query)); - ASSERT_STR_EQ("YB Batched Nested Loop", join_type); - } - LOG(INFO) << "Completed " << num_queries << " queries"; - - // ---------------------------------------------------------------------------------------------- - // 3. Check size of YbHintCacheContext and the total size of all memory contexts + // 2. Check size of YbHintCacheContext and the total size of all memory contexts // ---------------------------------------------------------------------------------------------- for (PGConn* conn : {&conn_query, &conn_hint}) { ASSERT_TRUE(ASSERT_RESULT( @@ -521,4 +544,24 @@ TEST_F(PgHintTableTest, HintCacheMemoryLeakTest) { } } +// Test that the hint cache is disabled when the yb_enable_hint_table_cache flag is set to off. +TEST_F(PgHintTableTestWithoutHintCache, HintCacheDisabled) { + // ---------------------------------------------------------------------------------------------- + // 1. Setup connections, add hints and check that the hints are being followed + // ---------------------------------------------------------------------------------------------- + auto [conn_query, conn_hint] = ASSERT_RESULT(InsertHintsAndRunQueries()); + + // ---------------------------------------------------------------------------------------------- + // 2. Check that the YbHintCacheContext does not exist on all connections + // ---------------------------------------------------------------------------------------------- + auto count_query = ASSERT_RESULT(conn_query.FetchRow( + "SELECT COUNT(*) FROM pg_backend_memory_contexts " + "WHERE name = 'YbHintCacheContext'")); + ASSERT_EQ(count_query, 0); + auto count_hint = ASSERT_RESULT(conn_hint.FetchRow( + "SELECT COUNT(*) FROM pg_backend_memory_contexts " + "WHERE name = 'YbHintCacheContext'")); + ASSERT_EQ(count_hint, 0); +} + } // namespace yb::pgwrapper diff --git a/src/yb/yql/pgwrapper/pg_index_backfill-test.cc b/src/yb/yql/pgwrapper/pg_index_backfill-test.cc index e87e3ff7d9c0..e3ad9de5dd35 100644 --- a/src/yb/yql/pgwrapper/pg_index_backfill-test.cc +++ b/src/yb/yql/pgwrapper/pg_index_backfill-test.cc @@ -29,7 +29,6 @@ #include "yb/master/master_error.h" #include "yb/tserver/tserver_service.pb.h" -#include "yb/tserver/tserver_service.proxy.h" #include "yb/util/async_util.h" #include "yb/util/backoff_waiter.h" @@ -65,7 +64,6 @@ const auto kPhase = "phase"s; const auto kPhaseBackfilling = "backfilling"s; const auto kPhaseInitializing = "initializing"s; const client::YBTableName kYBTableName(YQLDatabase::YQL_DATABASE_PGSQL, kDatabaseName, kTableName); -constexpr auto kBackfillSleepSec = 10 * kTimeMultiplier; } // namespace @@ -88,8 +86,6 @@ class PgIndexBackfillTest : public LibPqTestBase, public ::testing::WithParamInt options->extra_tserver_flags.push_back("--ysql_disable_index_backfill=false"); options->extra_tserver_flags.push_back( Format("--ysql_num_shards_per_tserver=$0", kTabletsPerServer)); - options->extra_tserver_flags.push_back( - Format("--TEST_sleep_before_vector_index_backfill_seconds=$0", kBackfillSleepSec)); if (EnableTableLocks()) { options->extra_master_flags.push_back("--TEST_enable_object_locking_for_table_locks=true"); @@ -913,6 +909,7 @@ TEST_P(PgIndexBackfillTestSimultaneously, CreateIndexSimultaneously) { // TODO (#19975): Enable read committed isolation PGConn create_conn = ASSERT_RESULT(SetDefaultTransactionIsolation( ConnectToDB(kDatabaseName), IsolationLevel::SNAPSHOT_ISOLATION)); + ASSERT_OK(create_conn.Execute("SET yb_force_early_ddl_serialization=false")); statuses[i] = MoveStatus(create_conn.ExecuteFormat( "CREATE INDEX $0 ON $1 (i)", kIndexName, kTableName)); @@ -1860,6 +1857,9 @@ TEST_P(PgIndexBackfillFastClientTimeout, DropWhileBackfilling) { thread_holder_.AddThreadFunctor([this] { LOG(INFO) << "Begin create thread"; PGConn create_conn = ASSERT_RESULT(ConnectToDB(kDatabaseName)); + // We don't want the DROP INDEX to face a serialization error when acquiring the FOR UPDATE lock + // on the catalog version row. + ASSERT_OK(create_conn.Execute("SET yb_force_early_ddl_serialization=false")); Status status = create_conn.ExecuteFormat("CREATE INDEX $0 ON $1 (i)", kIndexName, kTableName); // Expect timeout because // DROP INDEX is currently not online and removes the index info from the indexed table @@ -2440,6 +2440,9 @@ TEST_P(PgIndexBackfillReadCommittedBlockIndisliveBlockDoBackfill, CatVerBumps) { thread_holder_.AddThreadFunctor([this] { LOG(INFO) << "Begin create index thread"; auto create_idx_conn = ASSERT_RESULT(ConnectToDB(kDatabaseName)); + // We don't want the catalog version increments to conflict with the FOR UPDATE lock on the + // catalog version row. + ASSERT_OK(create_idx_conn.Execute("SET yb_force_early_ddl_serialization=false")); ASSERT_OK(create_idx_conn.ExecuteFormat("CREATE INDEX $0 ON $1 (i)", kIndexName, kTableName)); LOG(INFO) << "End create index thread"; }); @@ -2572,166 +2575,6 @@ TEST_P(PgIndexBackfill1kRowsPerSec, ConcurrentDelete) { thread_holder_.JoinAll(); } -struct VectorIndexWriter { - static constexpr int kBig = 100000000; - - std::atomic counter = 0; - std::atomic extra_values_counter = kBig * 2; - std::atomic last_write; - std::atomic max_time_without_inserts = MonoDelta::FromNanoseconds(0); - std::atomic failure = false; - - void Perform(PGConn& conn) { - std::vector values; - for (int i = RandomUniformInt(3, 6); i > 0; --i) { - values.push_back(++counter); - } - size_t keep_values = values.size(); - for (int i = RandomUniformInt(0, 2); i > 0; --i) { - values.push_back(++extra_values_counter); - } - bool use_2_steps = RandomUniformBool(); - - int offset = use_2_steps ? kBig : 0; - ASSERT_NO_FATALS(Insert(conn, values, offset)); - if (use_2_steps || keep_values != values.size()) { - ASSERT_NO_FATALS(UpdateAndDelete(conn, values, keep_values)); - } - } - - void Insert(PGConn& conn, const std::vector& values, int offset) { - for (;;) { - ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); - bool failed = false; - for (auto value : values) { - auto res = conn.ExecuteFormat( - "INSERT INTO test VALUES ($0, '[$1.0]')", value, value + offset); - if (!res.ok()) { - ASSERT_OK(conn.RollbackTransaction()); - LOG(INFO) << "Insert " << value << " failed: " << res; - ASSERT_STR_CONTAINS(res.message().ToBuffer(), "schema version mismatch"); - failed = true; - break; - } - } - if (!failed) { - ASSERT_OK(conn.CommitTransaction()); - auto now = CoarseMonoClock::Now(); - auto prev_last_write = last_write.exchange(now); - if (prev_last_write != CoarseTimePoint()) { - MonoDelta new_value(now - prev_last_write); - if (MakeAtLeast(max_time_without_inserts, new_value)) { - LOG(INFO) << "Update max time without inserts: " << new_value; - } - } - std::this_thread::sleep_for(100ms); - break; - } - } - } - - void UpdateAndDelete(PGConn& conn, const std::vector& values, size_t keep_values) { - for (;;) { - ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); - bool failed = false; - for (size_t i = 0; i != values.size(); ++i) { - auto value = values[i]; - Status res; - if (i < keep_values) { - res = conn.ExecuteFormat( - "UPDATE test SET embedding = '[$0.0]' WHERE id = $0", value); - } else { - res = conn.ExecuteFormat("DELETE FROM test WHERE id = $0", value); - } - if (!res.ok()) { - ASSERT_OK(conn.RollbackTransaction()); - LOG(INFO) << - (i < keep_values ? "Update " : "Delete " ) << value << " failed: " << res; - ASSERT_STR_CONTAINS(res.message().ToBuffer(), "schema version mismatch"); - failed = true; - break; - } - } - if (!failed) { - ASSERT_OK(conn.CommitTransaction()); - std::this_thread::sleep_for(100ms); - break; - } - } - } - - void WaitWritten(int num_rows) { - auto limit = counter.load() + num_rows; - while (counter.load() < limit && !failure) { - std::this_thread::sleep_for(10ms); - } - } - - void Verify(PGConn& conn) { - int num_bad_results = 0; - for (int i = 2; i < counter.load(); ++i) { - auto rows = ASSERT_RESULT(conn.FetchAllAsString(Format( - "SELECT id FROM test ORDER BY embedding <-> '[$0]' LIMIT 3", i * 1.0 - 0.01))); - auto expected = Format("$0; $1; $2", i, i - 1, i + 1); - if (rows != expected) { - LOG(INFO) << "Bad result: " << rows << " vs " << expected; - ++num_bad_results; - } - } - // Expect recall 98% or better. - ASSERT_LE(num_bad_results, counter.load() / 50); - } -}; - -TEST_P(PgIndexBackfillTest, VectorIndex) { - ASSERT_OK(conn_->Execute("CREATE EXTENSION vector")); - ASSERT_OK(conn_->ExecuteFormat( - "CREATE TABLE test (id INT PRIMARY KEY, embedding vector(1))")); - TestThreadHolder thread_holder; - VectorIndexWriter writer; - for (int i = 0; i != 8; ++i) { - thread_holder.AddThreadFunctor( - [this, &stop_flag = thread_holder.stop_flag(), &writer] { - bool done = false; - auto se = ScopeExit([&done, &writer] { - if (!done) { - writer.failure = true; - } - }); - auto conn = ASSERT_RESULT(Connect()); - while (!stop_flag.load()) { - ASSERT_NO_FATALS(writer.Perform(conn)); - } - done = true; - }); - } - writer.WaitWritten(32); - LOG(INFO) << "Started to create index"; - // TODO(vector_index) Switch to using CONCURRENT index creation when it will be ready. - ASSERT_OK(conn_->Execute( - "CREATE INDEX ON test USING ybhnsw (embedding vector_l2_ops)")); - LOG(INFO) << "Finished to create index"; - writer.WaitWritten(32); - thread_holder.Stop(); - LOG(INFO) << "Max time without inserts: " << writer.max_time_without_inserts; - ASSERT_LT(writer.max_time_without_inserts, 1s * kBackfillSleepSec); - SCOPED_TRACE(Format("Total rows: $0", writer.counter.load())); - - // VerifyVectorIndexes does not take intents into account, so could produce false failure. - ASSERT_OK(cluster_->WaitForAllIntentsApplied(30s * kTimeMultiplier)); - - for (size_t i = 0; i != cluster_->num_tablet_servers(); ++i) { - tserver::VerifyVectorIndexesRequestPB req; - tserver::VerifyVectorIndexesResponsePB resp; - rpc::RpcController controller; - controller.set_timeout(30s); - auto proxy = cluster_->tablet_server(i)->Proxy(); - ASSERT_OK(proxy->VerifyVectorIndexes(req, &resp, &controller)); - ASSERT_FALSE(resp.has_error()) << resp.ShortDebugString(); - } - writer.Verify(*conn_); -} - class PgSerializeBackfillTest : public PgIndexBackfillTest { public: void UpdateMiniClusterOptions(ExternalMiniClusterOptions* options) override { diff --git a/src/yb/yql/pgwrapper/pg_libpq-test.cc b/src/yb/yql/pgwrapper/pg_libpq-test.cc index 4c290c6a5269..484ef0b0ae0e 100644 --- a/src/yb/yql/pgwrapper/pg_libpq-test.cc +++ b/src/yb/yql/pgwrapper/pg_libpq-test.cc @@ -165,6 +165,8 @@ class PgLibPqTest : public LibPqTestBase { void TestSecondaryIndexInsertSelect(); + Status TestEmbeddedIndexScanOptimization(bool is_colocated_with_tablespaces); + void KillPostmasterProcessOnTservers(); Result GetPostmasterPidViaShell(pid_t backend_pid); @@ -947,6 +949,107 @@ TEST_F_EX(PgLibPqTest, SecondaryIndexInsertSelectWithSharedMem, PgLibPqWithShare TestSecondaryIndexInsertSelect(); } +Status PgLibPqTest::TestEmbeddedIndexScanOptimization(bool is_colocated_with_tablespaces) { + auto conn = VERIFY_RESULT(Connect()); + constexpr auto kPgCatalogOid = 11; + + // Secondary index scan on system table. + auto query = Format( + "EXPLAIN (ANALYZE, DIST, FORMAT JSON)" + " SELECT * FROM pg_class WHERE relname = 'pg_class' AND relnamespace = $0", + kPgCatalogOid); + // First run is to warm up the cache. + RETURN_NOT_OK(conn.FetchRow(query)); + // Second run is the real test. + auto explain_str = VERIFY_RESULT(conn.FetchRow(query)); + rapidjson::Document explain_json; + explain_json.Parse(explain_str.c_str()); + auto scan_type = std::string(explain_json[0]["Plan"]["Node Type"].GetString()); + SCHECK_EQ(scan_type, "Index Scan", + IllegalState, + "Unexpected scan type"); + SCHECK_EQ(explain_json[0]["Catalog Read Requests"].GetDouble(), 1, + IllegalState, + "Unexpected number of catalog read requests"); + + // Secondary index scan on copartitioned table. + RETURN_NOT_OK(conn.Execute("CREATE EXTENSION vector")); + RETURN_NOT_OK(conn.Execute( + "CREATE TABLE vector_test (id int PRIMARY KEY, embedding vector(3)) SPLIT INTO 2 TABLETS")); + RETURN_NOT_OK(conn.Execute( + "CREATE INDEX ON vector_test USING ybhnsw (embedding vector_l2_ops)")); + RETURN_NOT_OK(conn.Execute("INSERT INTO vector_test VALUES (1, '[1, 2, 3]')")); + explain_str = VERIFY_RESULT(conn.FetchRow( + "EXPLAIN (ANALYZE, DIST, FORMAT JSON)" + " SELECT * FROM vector_test ORDER BY embedding <-> '[0, 0, 0]' LIMIT 1")); + explain_json.Parse(explain_str.c_str()); + scan_type = std::string(explain_json[0]["Plan"]["Node Type"].GetString()); + SCHECK_EQ(scan_type, "Limit", + IllegalState, + "Unexpected scan type"); + scan_type = std::string(explain_json[0]["Plan"]["Plans"][0]["Node Type"].GetString()); + SCHECK_EQ(scan_type, "Index Scan", + IllegalState, + "Unexpected scan type"); + SCHECK_EQ(explain_json[0]["Storage Read Requests"].GetDouble(), 1, + IllegalState, + "Unexpected number of storage read requests"); + + // Secondary index scan on colocated table and index. + RETURN_NOT_OK(conn.Execute("CREATE DATABASE colodb WITH colocation = true")); + conn = VERIFY_RESULT(ConnectToDB("colodb")); + RETURN_NOT_OK(conn.Execute( + "CREATE TABLE colo_test (id int PRIMARY KEY, value TEXT)")); + RETURN_NOT_OK(conn.Execute("CREATE INDEX ON colo_test (value)")); + RETURN_NOT_OK(conn.Execute("INSERT INTO colo_test VALUES (1, 'hi')")); + query = "EXPLAIN (ANALYZE, DIST, FORMAT JSON) SELECT * FROM colo_test WHERE value = 'hi'"; + explain_str = VERIFY_RESULT(conn.FetchRow(query)); + explain_json.Parse(explain_str.c_str()); + scan_type = std::string(explain_json[0]["Plan"]["Node Type"].GetString()); + SCHECK_EQ(scan_type, "Index Scan", + IllegalState, + "Unexpected scan type"); + SCHECK_EQ(explain_json[0]["Storage Read Requests"].GetDouble(), 1, + IllegalState, + "Unexpected number of storage read requests"); + + // Secondary index scan on colocated table and index on different tablespaces. + if (is_colocated_with_tablespaces) { + RETURN_NOT_OK(conn.Execute("DROP INDEX colo_test_value_idx")); + RETURN_NOT_OK(conn.Execute("CREATE TABLESPACE spc LOCATION '/dne'")); + RETURN_NOT_OK(conn.Execute("CREATE INDEX ON colo_test (value) TABLESPACE spc")); + explain_str = VERIFY_RESULT(conn.FetchRow(query)); + explain_json.Parse(explain_str.c_str()); + scan_type = std::string(explain_json[0]["Plan"]["Node Type"].GetString()); + SCHECK_EQ(scan_type, "Index Scan", + IllegalState, + "Unexpected scan type"); + SCHECK_GT(explain_json[0]["Storage Read Requests"].GetDouble(), 1, + IllegalState, + "Unexpected number of storage read requests"); + } + + return Status::OK(); +} + +TEST_F(PgLibPqTest, EmbeddedIndexScanOptimizationColocatedWithTablespacesFalse) { + ASSERT_OK(TestEmbeddedIndexScanOptimization(false)); +} + +class PgLibPqColocatedTablesWithTablespacesTest : public PgLibPqTest { + void UpdateMiniClusterOptions(ExternalMiniClusterOptions* options) override { + const auto flag = "--ysql_enable_colocated_tables_with_tablespaces=true"s; + options->extra_master_flags.push_back(flag); + options->extra_tserver_flags.push_back(flag); + } +}; + +TEST_F_EX(PgLibPqTest, + EmbeddedIndexScanOptimizationColocatedWithTablespacesTrue, + PgLibPqColocatedTablesWithTablespacesTest) { + ASSERT_OK(TestEmbeddedIndexScanOptimization(true)); +} + void AssertRows(PGConn *conn, int expected_num_rows) { auto res = ASSERT_RESULT(conn->Fetch("SELECT * FROM test")); ASSERT_EQ(PQntuples(res.get()), expected_num_rows); diff --git a/src/yb/yql/pgwrapper/pg_mini-test.cc b/src/yb/yql/pgwrapper/pg_mini-test.cc index bc46cecc5d3f..5e2e4fc489cd 100644 --- a/src/yb/yql/pgwrapper/pg_mini-test.cc +++ b/src/yb/yql/pgwrapper/pg_mini-test.cc @@ -89,6 +89,7 @@ DECLARE_bool(flush_rocksdb_on_shutdown); DECLARE_bool(pg_client_use_shared_memory); DECLARE_bool(rocksdb_disable_compactions); DECLARE_bool(use_bootstrap_intent_ht_filter); +DECLARE_bool(ysql_allow_duplicating_repeatable_read_queries); DECLARE_bool(ysql_yb_enable_ash); DECLARE_bool(ysql_yb_enable_replica_identity); @@ -906,6 +907,8 @@ TEST_F(PgMiniTest, TruncateColocatedBigTable) { } TEST_F_EX(PgMiniTest, BulkCopyWithRestart, PgMiniSmallWriteBufferTest) { + ANNOTATE_UNPROTECTED_WRITE(FLAGS_ysql_allow_duplicating_repeatable_read_queries) = true; + const std::string kTableName = "key_value"; auto conn = ASSERT_RESULT(Connect()); ASSERT_OK(conn.ExecuteFormat( diff --git a/src/yb/yql/pgwrapper/pg_object_locks-test.cc b/src/yb/yql/pgwrapper/pg_object_locks-test.cc index e5a655ed2678..720e45e95739 100644 --- a/src/yb/yql/pgwrapper/pg_object_locks-test.cc +++ b/src/yb/yql/pgwrapper/pg_object_locks-test.cc @@ -107,8 +107,9 @@ class PgObjectLocksTestRF1 : public PgMiniTestBase { ASSERT_OK(conn1.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); ASSERT_OK(conn1.Execute(lock_stmt_1)); - // In sync point ObjectLockedBatchEntry::Lock, the lock is in waiting state. - SyncPoint::GetInstance()->LoadDependency({{"WaitingLock", "ObjectLockedBatchEntry::Lock"}}); + // In sync point ObjectLockManagerImpl::DoLockSingleEntry, the lock is in waiting state. + SyncPoint::GetInstance()->LoadDependency( + {{"WaitingLock", "ObjectLockManagerImpl::DoLockSingleEntry"}}); SyncPoint::GetInstance()->ClearTrace(); SyncPoint::GetInstance()->EnableProcessing(); @@ -427,13 +428,16 @@ TEST_F(PgObjectLocksTestRF1, ExclusiveLocksRemovedAfterDocDBSchemaChange) { class PgObjectLocksTest : public LibPqTestBase { protected: void UpdateMiniClusterOptions(ExternalMiniClusterOptions* opts) override { - opts->extra_tserver_flags.emplace_back("--TEST_enable_object_locking_for_table_locks=true"); + const bool table_locks_enabled = EnableTableLocks(); + opts->extra_tserver_flags.emplace_back( + yb::Format("--TEST_enable_object_locking_for_table_locks=$0", table_locks_enabled)); opts->extra_tserver_flags.emplace_back("--enable_ysql_operation_lease=true"); opts->extra_tserver_flags.emplace_back("--TEST_tserver_enable_ysql_lease_refresh=true"); opts->extra_tserver_flags.emplace_back( Format("--ysql_lease_refresher_interval_ms=$0", kDefaultYSQLLeaseRefreshIntervalMilli)); - opts->extra_master_flags.emplace_back("--TEST_enable_object_locking_for_table_locks=true"); + opts->extra_master_flags.emplace_back( + yb::Format("--TEST_enable_object_locking_for_table_locks=$0", table_locks_enabled)); opts->extra_master_flags.emplace_back("--enable_ysql_operation_lease=true"); opts->extra_master_flags.emplace_back( Format("--master_ysql_operation_lease_ttl_ms=$0", kDefaultMasterYSQLLeaseTTLMilli)); @@ -442,8 +446,53 @@ class PgObjectLocksTest : public LibPqTestBase { int GetNumTabletServers() const override { return 3; } + + virtual bool EnableTableLocks() const { + return true; + } +}; + +class PgObjectLocksTestAbortTxns : public PgObjectLocksTest, + public ::testing::WithParamInterface { + protected: + void UpdateMiniClusterOptions(ExternalMiniClusterOptions* opts) override { + PgObjectLocksTest::UpdateMiniClusterOptions(opts); + opts->extra_tserver_flags.emplace_back("--ysql_colocate_database_by_default=true"); + opts->extra_master_flags.emplace_back("--ysql_colocate_database_by_default=true"); + } + + bool EnableTableLocks() const override { + return GetParam(); + } }; +TEST_P(PgObjectLocksTestAbortTxns, TestDDLAbortsTxns) { + auto conn = ASSERT_RESULT(ConnectToDB("yugabyte")); + ASSERT_OK(conn.Execute("CREATE DATABASE testdb with colocation=true")); + + auto conn1 = ASSERT_RESULT(ConnectToDB("testdb")); + ASSERT_OK(conn1.Execute("CREATE TABLE test(k INT PRIMARY KEY, v INT) with (colocated=true)")); + ASSERT_OK(conn1.Execute("CREATE TABLE test2(k INT PRIMARY KEY, v INT) with (colocated=true)")); + + ASSERT_OK(conn1.Execute("BEGIN TRANSACTION")); + ASSERT_OK(conn1.Execute("INSERT INTO test SELECT generate_series(1,11), 0")); + + auto conn2 = ASSERT_RESULT(ConnectToDB("testdb")); + ASSERT_OK(conn2.Execute("BEGIN TRANSACTION")); + ASSERT_OK(conn2.Execute("ALTER TABLE test2 ADD COLUMN v1 INT")); + ASSERT_OK(conn2.Execute("COMMIT")); + + if (EnableTableLocks()) { + ASSERT_OK(conn1.Execute("COMMIT")); + } else { + ASSERT_NOK(conn1.Execute("COMMIT")); + } +} + +INSTANTIATE_TEST_CASE_P( + TableLocksEnabled, PgObjectLocksTestAbortTxns, ::testing::Bool(), + ::testing::PrintToStringParamName()); + TEST_F(PgObjectLocksTest, ExclusiveLockReleaseInvalidatesCatalogCache) { const auto ts1_idx = 1; const auto ts2_idx = 2; diff --git a/src/yb/yql/pgwrapper/pg_on_conflict-test.cc b/src/yb/yql/pgwrapper/pg_on_conflict-test.cc index 55aad8258f7d..5c95f77ae520 100644 --- a/src/yb/yql/pgwrapper/pg_on_conflict-test.cc +++ b/src/yb/yql/pgwrapper/pg_on_conflict-test.cc @@ -40,6 +40,9 @@ class PgFailOnConflictTest : public PgOnConflictTest { // TODO(wait-queues): https://github.com/yugabyte/yugabyte-db/issues/17871 opts->extra_tserver_flags.push_back("--enable_wait_queues=false"); opts->extra_tserver_flags.push_back("--yb_enable_read_committed_isolation=true"); + // Disable the tserver's lease expiry check as the window between killing and resuming a master + // can exceed the lease TTL. + opts->extra_tserver_flags.push_back("--TEST_enable_ysql_operation_lease_expiry_check=false"); } }; diff --git a/src/yb/yql/pgwrapper/pg_packed_row-test.cc b/src/yb/yql/pgwrapper/pg_packed_row-test.cc index 2abe99d4605c..151358727c2a 100644 --- a/src/yb/yql/pgwrapper/pg_packed_row-test.cc +++ b/src/yb/yql/pgwrapper/pg_packed_row-test.cc @@ -309,7 +309,7 @@ TEST_P(PgPackedRowTest, Random) { continue; } std::unordered_set values; - peer->tablet()->TEST_DocDBDumpToContainer(tablet::IncludeIntents::kTrue, &values); + peer->tablet()->TEST_DocDBDumpToContainer(docdb::IncludeIntents::kTrue, &values); std::vector sorted_values(values.begin(), values.end()); std::sort(sorted_values.begin(), sorted_values.end()); for (const auto& line : sorted_values) { @@ -805,7 +805,7 @@ TEST_P(PgPackedRowTest, CleanupIntentDocHt) { if (!peer->tablet()->regular_db()) { continue; } - auto dump = peer->tablet()->TEST_DocDBDumpStr(tablet::IncludeIntents::kTrue); + auto dump = peer->tablet()->TEST_DocDBDumpStr(docdb::IncludeIntents::kTrue); LOG(INFO) << "Dump: " << dump; ASSERT_EQ(dump.find("intent doc ht"), std::string::npos); } diff --git a/src/yb/yql/pgwrapper/pg_read_after_commit_visibility-test.cc b/src/yb/yql/pgwrapper/pg_read_after_commit_visibility-test.cc index 4071deefad08..c5f913196456 100644 --- a/src/yb/yql/pgwrapper/pg_read_after_commit_visibility-test.cc +++ b/src/yb/yql/pgwrapper/pg_read_after_commit_visibility-test.cc @@ -43,6 +43,7 @@ using namespace std::literals; DECLARE_string(time_source); DECLARE_int32(replication_factor); +DECLARE_bool(yb_enable_read_committed_isolation); namespace yb::pgwrapper { @@ -81,6 +82,7 @@ class PgReadAfterCommitVisibilityTest : public PgMiniTestBase { public: void SetUp() override { server::SkewedClock::Register(); + ANNOTATE_UNPROTECTED_WRITE(FLAGS_yb_enable_read_committed_isolation) = true; ANNOTATE_UNPROTECTED_WRITE(FLAGS_time_source) = server::SkewedClock::kName; ANNOTATE_UNPROTECTED_WRITE(FLAGS_replication_factor) = 1; PgMiniTestBase::SetUp(); @@ -214,6 +216,12 @@ class PgReadAfterCommitVisibilityTest : public PgMiniTestBase { } } + enum class Visibility { + STRICT, + RELAXED, + DEFERRED, + }; + struct Config { bool same_node = false; bool same_conn = false; @@ -221,7 +229,7 @@ class PgReadAfterCommitVisibilityTest : public PgMiniTestBase { bool has_dup_key = false; bool wait_for_skew = false; bool is_hidden_dml = false; - std::string visibility = "relaxed"; + Visibility visibility = Visibility::RELAXED; }; // General framework to observe the behavior of reads in different scenarios @@ -292,14 +300,25 @@ class PgReadAfterCommitVisibilityTest : public PgMiniTestBase { } // Perform a select using the the relaxed yb_read_after_commit_visibility option. + auto visibility = [](auto visibility) { + switch (visibility) { + case Visibility::STRICT: + return "strict"; + case Visibility::RELAXED: + return "relaxed"; + case Visibility::DEFERRED: + return "deferred"; + } + return ""; // keep gcc happy + }(config.visibility); ASSERT_OK(readConn.ExecuteFormat( - "SET yb_read_after_commit_visibility = $0", config.visibility)); + "SET yb_read_after_commit_visibility = $0", visibility)); if (!config.is_dml) { auto rows = ASSERT_RESULT(readConn.FetchRows(query)); // Observe the recent insert despite the clock skew when on the same node. - if (config.same_node || config.wait_for_skew) { + if (config.visibility != Visibility::RELAXED || config.same_node || config.wait_for_skew) { ASSERT_EQ(rows.size(), 1); } else { ASSERT_EQ(rows.size(), 0); @@ -320,7 +339,9 @@ class PgReadAfterCommitVisibilityTest : public PgMiniTestBase { auto pg_err_ptr = status.ErrorData(PgsqlError::kCategory); ASSERT_NE(pg_err_ptr, nullptr); YBPgErrorCode error_code = PgsqlErrorTag::Decode(pg_err_ptr); - ASSERT_EQ(error_code, YBPgErrorCode::YB_PG_UNIQUE_VIOLATION); + ASSERT_TRUE( + error_code == YBPgErrorCode::YB_PG_UNIQUE_VIOLATION || + error_code == YBPgErrorCode::YB_PG_T_R_SERIALIZATION_FAILURE); } } } @@ -377,16 +398,28 @@ TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeBoundedStaleness) // Inserts should not miss other recent inserts to // avoid missing duplicate key violations. This is guaranteed because // we don't apply "relaxed" to non-read transactions. -TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeDuplicateInsertCheck) { +TEST_F(PgReadAfterCommitVisibilityTest, RelaxedModeIgnoredForInsert) { RunTest(Config{ .is_dml = true, .has_dup_key = true, }, "INSERT INTO kv(k) VALUES (1)"); } +// Ensure that relaxed mode doesn't apply to fast-path writes. +TEST_F(PgReadAfterCommitVisibilityTest, RelaxedModeIgnoredForFastPathUpdate) { + RunTest(Config{ + .is_dml = true, + }, "UPDATE kv SET v = 2 WHERE k = 1"); + + // Ensure that the update happened. + auto conn = ASSERT_RESULT(ConnectToIdx(host_idx_)); + auto row = ASSERT_RESULT(conn.FetchRow("SELECT v FROM kv")); + ASSERT_EQ(row, 2); +} + // Updates should not miss recent DMLs either. This is guaranteed // because we don't apply "relaxed" to non-read transactions. -TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeUpdateKeyCheck) { +TEST_F(PgReadAfterCommitVisibilityTest, RelaxedModeIgnoredForDistributedUpdateTxn) { RunTest(Config{ .is_dml = true, }, "UPDATE kv SET k = 2 WHERE k = 1"); @@ -401,7 +434,7 @@ TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeUpdateKeyCheck) { // Otherwise, DELETE FROM table would not delete all the rows. // This is guaranteed because we don't apply "relaxed" to non-read // transactions. -TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeDeleteKeyCheck) { +TEST_F(PgReadAfterCommitVisibilityTest, RelaxedModeIgnoredForFastPathDelete) { RunTest(Config{ .is_dml = true, }, "DELETE FROM kv WHERE k = 1"); @@ -416,7 +449,56 @@ TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeDeleteKeyCheck) { // a SELECT but there is an insert hiding underneath. // We are guaranteed read-after-commit-visibility in this case // since "relaxed" is not applied to non-read transactions. -TEST_F(PgReadAfterCommitVisibilityTest, SessionOnDifferentNodeDmlHidden) { +TEST_F(PgReadAfterCommitVisibilityTest, RelaxedModeIgnoredForHiddenWrite) { + RunTest( + Config{ + .is_dml = true, + .has_dup_key = true, + .is_hidden_dml = true, + }, + "WITH new_kv AS (" + "INSERT INTO kv(k) VALUES (1) RETURNING k" + ") SELECT k FROM new_kv" + ); +} + +TEST_F(PgReadAfterCommitVisibilityTest, DifferentNodeDeferredRead) { + RunTest(Config{ + .visibility = Visibility::DEFERRED, + }, "SELECT k FROM kv"); +} + +TEST_F(PgReadAfterCommitVisibilityTest, DeferredModeInsert) { + RunTest(Config{ + .is_dml = true, + .has_dup_key = true, + .visibility = Visibility::DEFERRED, + }, "INSERT INTO kv(k) VALUES (1)"); +} + +TEST_F(PgReadAfterCommitVisibilityTest, DeferredModeDistributedTxnUpdate) { + RunTest(Config{ + .is_dml = true, + }, "UPDATE kv SET k = 2 WHERE k = 1"); + + // Ensure that the update happened. + auto conn = ASSERT_RESULT(ConnectToIdx(host_idx_)); + auto row = ASSERT_RESULT(conn.FetchRow("SELECT k FROM kv")); + ASSERT_EQ(row, 2); +} + +TEST_F(PgReadAfterCommitVisibilityTest, DeferredModeFastPathUpdate) { + RunTest(Config{ + .is_dml = true, + }, "UPDATE kv SET v = 2 WHERE k = 1"); + + // Ensure that the update happened. + auto conn = ASSERT_RESULT(ConnectToIdx(host_idx_)); + auto row = ASSERT_RESULT(conn.FetchRow("SELECT v FROM kv")); + ASSERT_EQ(row, 2); +} + +TEST_F(PgReadAfterCommitVisibilityTest, DeferredModeHiddenDml) { RunTest( Config{ .is_dml = true, diff --git a/src/yb/yql/pgwrapper/pg_read_time-test.cc b/src/yb/yql/pgwrapper/pg_read_time-test.cc index 95c788a24862..84092b4075b4 100644 --- a/src/yb/yql/pgwrapper/pg_read_time-test.cc +++ b/src/yb/yql/pgwrapper/pg_read_time-test.cc @@ -629,12 +629,12 @@ TEST_F(PgMiniTestBase, YB_DISABLE_TEST_IN_SANITIZERS(TestYSQLDumpAsOfTime)) { // // There are two primary effects of relaxed yb_read_after_commit_visibility: // - SELECTs now always pick their read time on local proxy. -// - The read time is clamped whenever it is picked this way (not relevant). +// - The read time is clamped whenever it is picked this way (not relevant for this test). // // This implies the following changes compared to the vanilla test // - Case 1: no pipeline, single operation in first batch, no distributed txn. -// Read time is picked on proxy for SELECTs and not DMLs. -// - Case 3: no pipeline, multiple operations to the same tablet in first batch, no distributed txn. +// Read time is picked on proxy for plain SELECTs. +// - Case 2: no pipeline, multiple operations to the same tablet in first batch, no distributed txn. // Read time is picked on proxy. TEST_F(PgReadTimeTest, CheckRelaxedReadAfterCommitVisibility) { auto conn = ASSERT_RESULT(Connect()); @@ -662,6 +662,7 @@ TEST_F(PgReadTimeTest, CheckRelaxedReadAfterCommitVisibility) { // Relax read-after-commit-visiblity guarantee. ASSERT_OK(conn.Execute("SET yb_read_after_commit_visibility TO relaxed")); + ASSERT_OK(conn.Execute("SET log_statement = 'all'")); // 1. no pipeline, single operation in first batch, no distributed txn // @@ -706,13 +707,142 @@ TEST_F(PgReadTimeTest, CheckRelaxedReadAfterCommitVisibility) { // a read time is picked in read_query.cc, but an extra picking is done in write_query.cc just // after conflict resolution is done (see DoTransactionalConflictsResolved()). // - // relaxed yb_read_after_commit_visibility does not affect FOR UDPATE queries. + // relaxed yb_read_after_commit_visibility does not affect FOR UPDATE queries. CheckReadTimePickedOnDocdb( [&conn, kTable]() { ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); ASSERT_OK(conn.FetchFormat("SELECT * FROM $0 WHERE k=1 FOR UPDATE", kTable)); ASSERT_OK(conn.CommitTransaction()); - }, 2 /* expected_num_picked_read_time_on_doc_db_metric */); + }, 2); + + // 5. no pipeline, multiple operations to various tablets in first batch, starts a distributed + // transation + ASSERT_OK(SetHighMaxBatchSize(&conn)); + CheckReadTimeProvidedToDocdb( + [&conn, kTable]() { + ASSERT_OK(conn.ExecuteFormat("CALL insert_rows_$0(101, 110)", kTable)); + }); + ASSERT_OK(ResetMaxBatchSize(&conn)); + + // 6. no pipeline, multiple operations to the same tablet in first batch, starts a distributed + // transation + ASSERT_OK(SetHighMaxBatchSize(&conn)); + CheckReadTimeProvidedToDocdb( + [&conn, kSingleTabletTable]() { + ASSERT_OK(conn.ExecuteFormat("CALL insert_rows_$0(101, 110)", kSingleTabletTable)); + }); + ASSERT_OK(ResetMaxBatchSize(&conn)); + + // 7. Pipeline, single operation in first batch, starts a distributed transation + ASSERT_OK(SetMaxBatchSize(&conn, 1)); + CheckReadTimeProvidedToDocdb( + [&conn, kTable]() { + ASSERT_OK(conn.ExecuteFormat("CALL insert_rows_$0(111, 120)", kTable)); + }); + ASSERT_OK(ResetMaxBatchSize(&conn)); + + // 8. Pipeline, multiple operations to various tablets in first batch, starts a distributed + // transation + ASSERT_OK(SetMaxBatchSize(&conn, 10)); + CheckReadTimeProvidedToDocdb( + [&conn, kTable]() { + ASSERT_OK(conn.ExecuteFormat("CALL insert_rows_$0(121, 150)", kTable)); + }); + ASSERT_OK(ResetMaxBatchSize(&conn)); + + // 9. Pipeline, multiple operations to the same tablet in first batch, starts a distributed + // transation + ASSERT_OK(SetMaxBatchSize(&conn, 10)); + CheckReadTimeProvidedToDocdb( + [&conn, kSingleTabletTable]() { + ASSERT_OK(conn.ExecuteFormat("CALL insert_rows_$0(121, 150)", kSingleTabletTable)); + }); + ASSERT_OK(ResetMaxBatchSize(&conn)); +} + +TEST_F(PgReadTimeTest, CheckDeferredReadAfterCommitVisibility) { + auto conn = ASSERT_RESULT(Connect()); + ASSERT_OK(conn.Execute("SET DEFAULT_TRANSACTION_ISOLATION TO \"REPEATABLE READ\"")); + constexpr auto kTable = "test"sv; + constexpr auto kSingleTabletTable = "test_with_single_tablet"sv; + ASSERT_OK(conn.ExecuteFormat("CREATE TABLE $0 (k INT PRIMARY KEY, v INT)", kTable)); + ASSERT_OK(conn.ExecuteFormat( + "CREATE TABLE $0 (k INT PRIMARY KEY, v INT) SPLIT INTO 1 TABLETS", kSingleTabletTable)); + + for (const auto& table_name : {kTable, kSingleTabletTable}) { + ASSERT_OK(conn.ExecuteFormat( + "INSERT INTO $0 SELECT generate_series(1, 100), 0", table_name)); + ASSERT_OK(conn.ExecuteFormat( + "CREATE OR REPLACE PROCEDURE insert_rows_$0(first integer, last integer) " + "LANGUAGE plpgsql " + "as $$body$$ " + "BEGIN " + " FOR i in first..last LOOP " + " INSERT INTO $0 VALUES (i, i); " + " END LOOP; " + "END; " + "$$body$$", table_name)); + } + + // Defer read-after-commit-visiblity guarantee. + ASSERT_OK(conn.Execute("SET yb_read_after_commit_visibility TO deferred")); + ASSERT_OK(conn.Execute("SET log_statement = 'all'")); + + // 1. no pipeline, single operation in first batch, no distributed txn + for (const auto& table_name : {kTable, kSingleTabletTable}) { + CheckReadTimeProvidedToDocdb( + [&conn, table_name]() { + ASSERT_OK(conn.FetchFormat("SELECT * FROM $0 WHERE k=1", table_name)); + }); + + CheckReadTimePickedOnDocdb( + [&conn, table_name]() { + ASSERT_OK(conn.ExecuteFormat("UPDATE $0 SET v=1 WHERE k=1", table_name)); + }); + + CheckReadTimePickedOnDocdb( + [&conn, table_name]() { + ASSERT_OK(conn.ExecuteFormat("INSERT INTO $0 VALUES (1000, 1000)", table_name)); + }); + + CheckReadTimePickedOnDocdb( + [&conn, table_name]() { + ASSERT_OK(conn.ExecuteFormat("DELETE FROM $0 WHERE k=1000", table_name)); + }); + + // Not a fast path write. + CheckReadTimeProvidedToDocdb( + [&conn, table_name]() { + ASSERT_OK(conn.ExecuteFormat("UPDATE $0 SET k=2000 WHERE k=1", table_name)); + }); + } + + // 2. no pipeline, multiple operations to various tablets in first batch, no distributed txn + CheckReadTimeProvidedToDocdb( + [&conn, kTable]() { + ASSERT_OK(conn.FetchFormat("SELECT COUNT(*) FROM $0", kTable)); + }); + + // 3. no pipeline, multiple operations to the same tablet in first batch, no distributed txn + CheckReadTimeProvidedToDocdb( + [&conn, kSingleTabletTable]() { + ASSERT_OK(conn.FetchFormat("SELECT COUNT(*) FROM $0", kSingleTabletTable)); + }); + + // 4. no pipeline, single operation in first batch, starts a distributed transation + // + // expected_num_picked_read_time_on_doc_db_metric is set because in case of a SELECT FOR UPDATE, + // a read time is picked in read_query.cc, but an extra picking is done in write_query.cc just + // after conflict resolution is done (see DoTransactionalConflictsResolved()). + // + // deferred yb_read_after_commit_visibility picks read time at pg client even + // for SELECT FOR UPDATE queries. + CheckReadTimeProvidedToDocdb( + [&conn, kTable]() { + ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); + ASSERT_OK(conn.FetchFormat("SELECT * FROM $0 WHERE k=1 FOR UPDATE", kTable)); + ASSERT_OK(conn.CommitTransaction()); + }); // 5. no pipeline, multiple operations to various tablets in first batch, starts a distributed // transation diff --git a/src/yb/yql/pgwrapper/pg_server_restart-test.cc b/src/yb/yql/pgwrapper/pg_server_restart-test.cc index c045d2f944da..7b2932ff39e9 100644 --- a/src/yb/yql/pgwrapper/pg_server_restart-test.cc +++ b/src/yb/yql/pgwrapper/pg_server_restart-test.cc @@ -64,8 +64,7 @@ TEST_F(PgSingleServerRestartTest, GetSafeTimeBeforeConsensusStarted) { ASSERT_OK(itest::WaitForServersToAgree(10s, ts_map, tablet_id, /* minimum_index = */ 4)); SleepFor(1s); - ASSERT_OK(cluster_->FlushTabletsOnSingleTServer( - cluster_->tablet_server(0), {tablet_id}, tserver::FlushTabletsRequestPB::FLUSH)); + ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(0, {tablet_id})); leader->Shutdown(SafeShutdown::kFalse); ASSERT_OK(cluster_->WaitForTSToCrash(leader, 10s)); diff --git a/src/yb/yql/pgwrapper/pg_vector_index-itest.cc b/src/yb/yql/pgwrapper/pg_vector_index-itest.cc new file mode 100644 index 000000000000..dc880858e4b4 --- /dev/null +++ b/src/yb/yql/pgwrapper/pg_vector_index-itest.cc @@ -0,0 +1,257 @@ +// Copyright (c) YugabyteDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations +// under the License. +// + +#include "yb/tserver/tserver_service.proxy.h" + +#include "yb/util/backoff_waiter.h" +#include "yb/util/scope_exit.h" +#include "yb/util/test_thread_holder.h" + +#include "yb/yql/pgwrapper/libpq_test_base.h" + +using namespace std::literals; + +namespace yb::pgwrapper { + +constexpr auto kBackfillSleepSec = 10 * kTimeMultiplier; + +class PgVectorIndexITest : public LibPqTestBase { + public: + Result ConnectAndInit(std::optional num_tablets = std::nullopt) { + auto conn = VERIFY_RESULT(Connect()); + RETURN_NOT_OK(conn.Execute("CREATE EXTENSION vector")); + std::string stmt = "CREATE TABLE test (id INT PRIMARY KEY, embedding vector(1))"; + if (num_tablets) { + stmt += Format(" SPLIT INTO $0 TABLETS", *num_tablets); + } + RETURN_NOT_OK(conn.ExecuteFormat(stmt)); + return conn; + } + + Status CreateIndex(PGConn& conn) { + // TODO(vector_index) Switch to using CONCURRENT index creation when it will be ready. + return conn.Execute( + "CREATE INDEX ON test USING ybhnsw (embedding vector_l2_ops)"); + } +}; + +struct VectorIndexWriter { + static constexpr int kBig = 100000000; + + std::atomic counter = 0; + std::atomic extra_values_counter = kBig * 2; + std::atomic last_write; + std::atomic max_time_without_inserts = MonoDelta::FromNanoseconds(0); + std::atomic failure = false; + + void Perform(PGConn& conn) { + std::vector values; + for (int i = RandomUniformInt(3, 6); i > 0; --i) { + values.push_back(++counter); + } + size_t keep_values = values.size(); + for (int i = RandomUniformInt(0, 2); i > 0; --i) { + values.push_back(++extra_values_counter); + } + bool use_2_steps = RandomUniformBool(); + + int offset = use_2_steps ? kBig : 0; + ASSERT_NO_FATALS(Insert(conn, values, offset)); + if (use_2_steps || keep_values != values.size()) { + ASSERT_NO_FATALS(UpdateAndDelete(conn, values, keep_values)); + } + } + + void Insert(PGConn& conn, const std::vector& values, int offset) { + for (;;) { + ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); + bool failed = false; + for (auto value : values) { + auto res = conn.ExecuteFormat( + "INSERT INTO test VALUES ($0, '[$1.0]')", value, value + offset); + if (!res.ok()) { + ASSERT_OK(conn.RollbackTransaction()); + LOG(INFO) << "Insert " << value << " failed: " << res; + ASSERT_STR_CONTAINS(res.message().ToBuffer(), "schema version mismatch"); + failed = true; + break; + } + } + if (!failed) { + ASSERT_OK(conn.CommitTransaction()); + auto now = CoarseMonoClock::Now(); + auto prev_last_write = last_write.exchange(now); + if (prev_last_write != CoarseTimePoint()) { + MonoDelta new_value(now - prev_last_write); + if (MakeAtLeast(max_time_without_inserts, new_value)) { + LOG(INFO) << "Update max time without inserts: " << new_value; + } + } + std::this_thread::sleep_for(100ms); + break; + } + } + } + + void UpdateAndDelete(PGConn& conn, const std::vector& values, size_t keep_values) { + for (;;) { + ASSERT_OK(conn.StartTransaction(IsolationLevel::SNAPSHOT_ISOLATION)); + bool failed = false; + for (size_t i = 0; i != values.size(); ++i) { + auto value = values[i]; + Status res; + if (i < keep_values) { + res = conn.ExecuteFormat( + "UPDATE test SET embedding = '[$0.0]' WHERE id = $0", value); + } else { + res = conn.ExecuteFormat("DELETE FROM test WHERE id = $0", value); + } + if (!res.ok()) { + ASSERT_OK(conn.RollbackTransaction()); + LOG(INFO) << + (i < keep_values ? "Update " : "Delete " ) << value << " failed: " << res; + ASSERT_STR_CONTAINS(res.message().ToBuffer(), "schema version mismatch"); + failed = true; + break; + } + } + if (!failed) { + ASSERT_OK(conn.CommitTransaction()); + std::this_thread::sleep_for(100ms); + break; + } + } + } + + void WaitWritten(int num_rows) { + auto limit = counter.load() + num_rows; + while (counter.load() < limit && !failure) { + std::this_thread::sleep_for(10ms); + } + } + + void Verify(PGConn& conn) { + int num_bad_results = 0; + for (int i = 2; i < counter.load(); ++i) { + auto rows = ASSERT_RESULT(conn.FetchAllAsString(Format( + "SELECT id FROM test ORDER BY embedding <-> '[$0]' LIMIT 3", i * 1.0 - 0.01))); + auto expected = Format("$0; $1; $2", i, i - 1, i + 1); + if (rows != expected) { + LOG(INFO) << "Bad result: " << rows << " vs " << expected; + ++num_bad_results; + } + } + // Expect recall 98% or better. + ASSERT_LE(num_bad_results, counter.load() / 50); + } +}; + +class PgVectorIndexBackfillITest : public PgVectorIndexITest { + public: + void UpdateMiniClusterOptions(ExternalMiniClusterOptions* options) override { + PgVectorIndexITest::UpdateMiniClusterOptions(options); + options->extra_master_flags.push_back("--ysql_disable_index_backfill=false"); + options->extra_tserver_flags.push_back( + Format("--TEST_sleep_before_vector_index_backfill_seconds=$0", kBackfillSleepSec)); + } +}; + +TEST_F_EX(PgVectorIndexITest, Backfill, PgVectorIndexBackfillITest) { + auto conn = ASSERT_RESULT(ConnectAndInit()); + TestThreadHolder thread_holder; + VectorIndexWriter writer; + for (int i = 0; i != 8; ++i) { + thread_holder.AddThreadFunctor( + [this, &stop_flag = thread_holder.stop_flag(), &writer] { + auto se = CancelableScopeExit([&writer] { + writer.failure = true; + }); + auto conn = ASSERT_RESULT(Connect()); + while (!stop_flag.load()) { + ASSERT_NO_FATALS(writer.Perform(conn)); + } + se.Cancel(); + }); + } + writer.WaitWritten(32); + LOG(INFO) << "Started to create index"; + ASSERT_OK(CreateIndex(conn)); + LOG(INFO) << "Finished to create index"; + writer.WaitWritten(32); + thread_holder.Stop(); + LOG(INFO) << "Max time without inserts: " << writer.max_time_without_inserts; + ASSERT_LT(writer.max_time_without_inserts, 1s * kBackfillSleepSec); + SCOPED_TRACE(Format("Total rows: $0", writer.counter.load())); + + // VerifyVectorIndexes does not take intents into account, so could produce false failure. + ASSERT_OK(cluster_->WaitForAllIntentsApplied(30s * kTimeMultiplier)); + + for (size_t i = 0; i != cluster_->num_tablet_servers(); ++i) { + tserver::VerifyVectorIndexesRequestPB req; + tserver::VerifyVectorIndexesResponsePB resp; + rpc::RpcController controller; + controller.set_timeout(30s); + auto proxy = cluster_->tablet_server(i)->Proxy(); + ASSERT_OK(proxy->VerifyVectorIndexes(req, &resp, &controller)); + ASSERT_FALSE(resp.has_error()) << resp.ShortDebugString(); + } + writer.Verify(conn); +} + +class PgVectorIndexRBSITest : public PgVectorIndexITest { + public: + void UpdateMiniClusterOptions(ExternalMiniClusterOptions* options) override { + options->extra_tserver_flags.push_back("--log_min_seconds_to_retain=0"); + options->extra_tserver_flags.push_back("--xcluster_checkpoint_max_staleness_secs=0"); + } +}; + +TEST_F_EX(PgVectorIndexITest, CrashAfterRBSDownload, PgVectorIndexRBSITest) { + constexpr size_t kNumRows = 5; + constexpr size_t kTsIndex = 2; + + auto conn = ASSERT_RESULT(ConnectAndInit(1)); + ASSERT_OK(CreateIndex(conn)); + auto* lagging_ts = cluster_->tablet_server(kTsIndex); + lagging_ts->Shutdown(); + + auto tablets = ASSERT_RESULT(cluster_->ListTablets(cluster_->tablet_server(0))); + ASSERT_EQ(tablets.status_and_schema().size(), 1); + auto tablet_id = tablets.status_and_schema()[0].tablet_status().tablet_id(); + + for (size_t i = 1; i <= kNumRows; ++i) { + ASSERT_OK(conn.ExecuteFormat("INSERT INTO test VALUES ($0, '[$0.0]')", i)); + for (size_t j = 0; j != 2; ++j) { + ASSERT_OK(cluster_->FlushTabletsOnSingleTServer(j, {tablet_id})); + ASSERT_OK(cluster_->LogGCOnSingleTServer(j, {tablet_id}, true)); + } + } + + ASSERT_OK(lagging_ts->Start()); + + auto good_tablets = ASSERT_RESULT(cluster_->ListTablets(cluster_->tablet_server(0))); + LOG(INFO) << "Good tablets: " << AsString(good_tablets); + ASSERT_EQ(good_tablets.status_and_schema().size(), 1); + ASSERT_OK(WaitFor([this, &good_tablets, lagging_ts]() -> Result { + auto lagging_tablets = VERIFY_RESULT(cluster_->ListTablets(lagging_ts)); + LOG(INFO) << "Lagging tablets: " << AsString(lagging_tablets); + if (lagging_tablets.status_and_schema().size() != 1) { + return false; + } + EXPECT_EQ(lagging_tablets.status_and_schema().size(), 1); + return lagging_tablets.status_and_schema()[0].tablet_status().last_op_id().index() >= + good_tablets.status_and_schema()[0].tablet_status().last_op_id().index(); + }, 5s, "Wait lagging TS to catch up")); +} + +} // namespace yb::pgwrapper diff --git a/src/yb/yql/pgwrapper/pg_wrapper.cc b/src/yb/yql/pgwrapper/pg_wrapper.cc index 9ae2d7df9225..4a6536ef915b 100644 --- a/src/yb/yql/pgwrapper/pg_wrapper.cc +++ b/src/yb/yql/pgwrapper/pg_wrapper.cc @@ -807,9 +807,8 @@ Status PgWrapper::Start() { } } #else - if (FLAGS_yb_enable_valgrind) { - LOG(ERROR) << "yb_enable_valgrind is ON, but Yugabyte was not compiled with Valgrind support."; - } + LOG_IF(DFATAL, FLAGS_yb_enable_valgrind) + << "yb_enable_valgrind is ON, but Yugabyte was not compiled with Valgrind support."; #endif vector postgres_argv { @@ -1152,9 +1151,7 @@ Status PgWrapper::InitDbForYSQL( LOG(INFO) << "initdb took " << std::chrono::duration_cast(elapsed_time).count() << " ms"; - if (!initdb_status.ok()) { - LOG(ERROR) << "initdb failed: " << initdb_status; - } + ERROR_NOT_OK(initdb_status, "initdb failed"); return initdb_status; } @@ -1277,7 +1274,7 @@ Status PgWrapper::CleanupLockFileAndKillHungPg(const std::string& lock_file) { } if (postgres_pid == 0) { - LOG(ERROR) << strings::Substitute( + LOG(WARNING) << Format( "Error reading postgres process ID from lock file $0. $1 $2", lock_file, ErrnoToString(errno), errno); } else { @@ -1335,6 +1332,7 @@ PgSupervisor::PgSupervisor(PgProcessConf conf, PgWrapperContext* server) if (server_) { server_->RegisterCertificateReloader(std::bind(&PgSupervisor::ReloadConfig, this)); server_->RegisterPgProcessRestarter(std::bind(&PgSupervisor::Restart, this)); + server_->RegisterPgProcessKiller(std::bind(&PgSupervisor::Pause, this)); } } @@ -1466,15 +1464,15 @@ key_t PgSupervisor::GetYsqlConnManagerStatsShmkey() { if (shmid < 0) { switch (errno) { case EACCES: - LOG(ERROR) << "Unable to create shared memory segment, not authorised to create shared " - "memory segment"; + LOG(DFATAL) << "Unable to create shared memory segment, not authorised to create shared " + "memory segment"; return -1; case ENOSPC: - LOG(ERROR) + LOG(DFATAL) << "Unable to create shared memory segment, no space left."; return -1; case ENOMEM: - LOG(ERROR) + LOG(DFATAL) << "Unable to create shared memory segment, no memory left"; return -1; default: diff --git a/src/yb/yql/pgwrapper/pg_wrapper_context.h b/src/yb/yql/pgwrapper/pg_wrapper_context.h index 1f7ff38ff1e2..5b374c0b4b97 100644 --- a/src/yb/yql/pgwrapper/pg_wrapper_context.h +++ b/src/yb/yql/pgwrapper/pg_wrapper_context.h @@ -25,6 +25,7 @@ class PgWrapperContext { virtual ~PgWrapperContext() = default; virtual void RegisterCertificateReloader(tserver::CertificateReloader reloader) = 0; virtual void RegisterPgProcessRestarter(std::function restarter) = 0; + virtual void RegisterPgProcessKiller(std::function killer) = 0; virtual Status StartSharedMemoryNegotiation() = 0; virtual Status StopSharedMemoryNegotiation() = 0; virtual int SharedMemoryNegotiationFd() = 0; diff --git a/src/yb/yql/pgwrapper/ysql_upgrade.cc b/src/yb/yql/pgwrapper/ysql_upgrade.cc index f7e890e96ed8..7a3587bfd6d3 100644 --- a/src/yb/yql/pgwrapper/ysql_upgrade.cc +++ b/src/yb/yql/pgwrapper/ysql_upgrade.cc @@ -215,6 +215,11 @@ class YsqlUpgradeHelper::DatabaseEntry { Result> MakeConnection() { auto pgconn = std::make_shared(VERIFY_RESULT(conn_builder_.Connect())); RETURN_NOT_OK(pgconn->Execute("SET ysql_upgrade_mode TO true;")); + // Use snapshot isolation mode during YSQL upgrade. This is needed as a simple work + // around so that we do not start subtransactions during YSQL upgrade. Otherwise the + // COMMIT will only capture the invalidation messages generated by the last DML statement + // preceding the COMMIT statement. + RETURN_NOT_OK(pgconn->Execute("SET DEFAULT_TRANSACTION_ISOLATION TO \"REPEATABLE READ\"")); return pgconn; } diff --git a/src/yb/yql/redis/redisserver/redis_client.cc b/src/yb/yql/redis/redisserver/redis_client.cc index cf13730a111e..aa90b8925a9f 100644 --- a/src/yb/yql/redis/redisserver/redis_client.cc +++ b/src/yb/yql/redis/redisserver/redis_client.cc @@ -62,7 +62,7 @@ RedisReply CreateReply(redisReply* reply) { default: RedisReply result( RedisReplyType::kError, Format("Unsupported reply type: $0", reply->type)); - LOG(ERROR) << result.ToString(); + LOG(DFATAL) << result.ToString(); return result; } } diff --git a/src/yb/yql/redis/redisserver/redis_commands.cc b/src/yb/yql/redis/redisserver/redis_commands.cc index 32cd98690449..183751271395 100644 --- a/src/yb/yql/redis/redisserver/redis_commands.cc +++ b/src/yb/yql/redis/redisserver/redis_commands.cc @@ -322,7 +322,7 @@ void GetTabletLocations(LocalCommandData data, RedisArrayPB* array_response) { auto s = data.client()->GetTabletsAndUpdateCache( table_name, 0, &tablets, &partitions, &locations); if (!s.ok()) { - LOG(ERROR) << "Error getting tablets: " << s.message(); + LOG(DFATAL) << "Error getting tablets: " << s.message(); return; } vector response, ts_info; @@ -717,7 +717,7 @@ class RenameData : public std::enable_shared_from_this { session_->FlushAsync([retained_self = shared_from_this()](client::FlushStatus* flush_status) { const auto& s = flush_status->status; if (!s.ok()) { - LOG(ERROR) << "Reading from src during a Rename failed. " << s; + LOG(DFATAL) << "Reading from src during a Rename failed. " << s; retained_self->RespondWithError(s.message().ToBuffer()); } else { retained_self->BeginWriteDest(); @@ -824,7 +824,7 @@ class RenameData : public std::enable_shared_from_this { session_->FlushAsync([retained_self = shared_from_this()](client::FlushStatus* flush_status) { const auto& s = flush_status->status; if (!s.ok()) { - LOG(ERROR) << "Writing to dest during a Rename failed. " << s; + LOG(DFATAL) << "Writing to dest during a Rename failed. " << s; retained_self->RespondWithError(s.message().ToBuffer()); return; } @@ -844,7 +844,7 @@ class RenameData : public std::enable_shared_from_this { session_->FlushAsync([retained_self = shared_from_this()](client::FlushStatus* flush_status) { const auto& s = flush_status->status; if (!s.ok()) { - LOG(ERROR) << "Updating ttl for dest during a Rename failed. " << s; + LOG(DFATAL) << "Updating ttl for dest during a Rename failed. " << s; retained_self->RespondWithError(s.message().ToBuffer()); return; } @@ -858,7 +858,7 @@ class RenameData : public std::enable_shared_from_this { session_->FlushAsync([retained_self = shared_from_this()](client::FlushStatus* flush_status) { const auto& s = flush_status->status; if (!s.ok()) { - LOG(ERROR) << "Deleting src during a Rename failed. " << s; + LOG(DFATAL) << "Deleting src during a Rename failed. " << s; retained_self->RespondWithError(s.message().ToBuffer()); return; } diff --git a/src/yb/yql/redis/redisserver/redis_service.cc b/src/yb/yql/redis/redisserver/redis_service.cc index c5d1af07689c..b042faf35cc4 100644 --- a/src/yb/yql/redis/redisserver/redis_service.cc +++ b/src/yb/yql/redis/redisserver/redis_service.cc @@ -1219,7 +1219,7 @@ void RedisServiceImplData::ForwardToInterestedProxies( const string& channel, const string& message, const IntFunctor& f) { auto interested_servers = GetServerAddrsForChannel(channel); if (!interested_servers.ok()) { - LOG(ERROR) << "Could not get servers to forward to " << interested_servers.status(); + LOG(DFATAL) << "Could not get servers to forward to " << interested_servers.status(); return; } std::shared_ptr resp_handler = @@ -1413,7 +1413,7 @@ const RedisCommandInfo* RedisServiceImpl::Impl::FetchHandler(const RedisClientCo } auto iter = command_name_to_info_map_.find(Slice(lower_cmd, len)); if (iter == command_name_to_info_map_.end()) { - YB_LOG_EVERY_N_SECS(ERROR, 60) + YB_LOG_EVERY_N_SECS(WARNING, 60) << "Command " << cmd_name << " not yet supported. " << "Arguments: " << ToString(cmd_args) << ". " << "Raw: " << Slice(cmd_args[0].data(), cmd_args.back().end()).ToDebugString(); @@ -1498,13 +1498,13 @@ void RedisServiceImpl::Impl::Handle(rpc::InboundCallPtr call_ptr) { size_t passed_arguments = c.size() - 1; if (!exact_count && passed_arguments < arity) { // -X means that the command needs >= X arguments. - YB_LOG_EVERY_N_SECS(ERROR, 60) + YB_LOG_EVERY_N_SECS(WARNING, 60) << "Requested command " << c[0] << " does not have enough arguments." << " At least " << arity << " expected, but " << passed_arguments << " found."; RespondWithFailure(call, idx, "Too few arguments."); } else if (exact_count && passed_arguments != arity) { // X (> 0) means that the command needs exactly X arguments. - YB_LOG_EVERY_N_SECS(ERROR, 60) + YB_LOG_EVERY_N_SECS(WARNING, 60) << "Requested command " << c[0] << " has wrong number of arguments. " << arity << " expected, but " << passed_arguments << " found."; RespondWithFailure(call, idx, "Wrong number of arguments.");
Granted object locks)"<< caption << R"(
Lock Owner Object Id Num Holders