diff --git a/docs-main/docs.json b/docs-main/docs.json index 943fbc9a..de0a923b 100644 --- a/docs-main/docs.json +++ b/docs-main/docs.json @@ -2594,7 +2594,8 @@ "pages": [ "global-synchronizer/production-operations/disaster-recovery", "global-synchronizer/production-operations/pruning", - "global-synchronizer/production-operations/security-hardening" + "global-synchronizer/production-operations/security-hardening", + "global-synchronizer/production-operations/logical-synchronizer-upgrade" ] } ] @@ -2742,7 +2743,8 @@ "pages": [ "global-synchronizer/production-operations/disaster-recovery", "global-synchronizer/production-operations/pruning", - "global-synchronizer/production-operations/security-hardening" + "global-synchronizer/production-operations/security-hardening", + "global-synchronizer/production-operations/logical-synchronizer-upgrade" ] } ] @@ -2890,7 +2892,8 @@ "pages": [ "global-synchronizer/production-operations/disaster-recovery", "global-synchronizer/production-operations/pruning", - "global-synchronizer/production-operations/security-hardening" + "global-synchronizer/production-operations/security-hardening", + "global-synchronizer/production-operations/logical-synchronizer-upgrade" ] } ] @@ -2980,7 +2983,8 @@ "group": "Exchanges", "pages": [ "integrations/exchanges/sdk-download", - "integrations/exchanges/guidance" + "integrations/exchanges/guidance", + "integrations/exchanges/node-operations" ] }, { @@ -3017,7 +3021,8 @@ "group": "Exchanges", "pages": [ "integrations/exchanges/sdk-download", - "integrations/exchanges/guidance" + "integrations/exchanges/guidance", + "integrations/exchanges/node-operations" ] }, { @@ -3054,7 +3059,8 @@ "group": "Exchanges", "pages": [ "integrations/exchanges/sdk-download", - "integrations/exchanges/guidance" + "integrations/exchanges/guidance", + "integrations/exchanges/node-operations" ] }, { diff --git a/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx b/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx new file mode 100644 index 00000000..b4117ca7 --- /dev/null +++ b/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx @@ -0,0 +1,168 @@ +--- +title: "Logical Synchronizer Upgrades" +description: "Upgrade the protocol version of a Global Synchronizer with very limited network downtime through Logical Synchronizer Upgrades (LSU)" +--- + +{/* COPIED_START source="splice:docs/src/sv_operator/sv_logical_synchronizer_upgrade.rst" hash="0cf8ccb8" */} + + +This section was copied from existing reviewed documentation. +**Source:** `docs/src/sv_operator/sv_logical_synchronizer_upgrade.rst` +Reviewers: Skip this section. Remove markers after final approval. + + + +Logical Synchronizer Upgrades (LSU) are still in development and the instructions here are intended as a preview primarily targeted at Super Validators but will likely change in minor ways before the full release. + + + +Logical synchronizer upgrades (LSUs) allow upgrading the protocol version of a synchronizer +**with very limited network downtime** and no operational overhead for validator operators and app devs around upgrades. +Super validators still have to perform operational steps to deploy successor nodes and schedule the upgrade but those are done asynchronously before the actual upgrade happens. + +## High-Level Overview + +1. A new Canton release with an updated protocol version becomes available, along with a compatible Splice release. For testing purposes or in some disaster recovery scenarios this can also be the same version and/or protocol version. + This release supports both the old and new protocol version. +2. Validators and super validators upgrade to the new release, but continue running the original physical synchronizer with the old protocol version. This is a regular upgrade and can be done asynchronously + but must be done before the actual upgrade time. +3. A vote is created in the SV UI to schedule an LSU through the `nextScheduledLogicalSynchronizerUpgrade` field in `DsoRulesConfig`. + The schedule includes + + 1. **topology freeze time**: after this time, no topology transactions can be sequenced until the upgrade time, so in particular no parties can be added and no Daml packages can be vetted + 2. **upgrade time**: at this time Daml transactions on the original physical synchronizer will time out and new Daml transactions will run on the new physical synchronizer + 3. **new physical synchronizer serial**: usually just the old serial incremented by 1 + 4. **new protocol version**: the protocol version of the successor synchronizer + +4. All SVs deploy *successor* synchronizer nodes (sequencer, mediator, and optionally CometBFT if DABFT is not used) alongside their existing nodes. Note: There is no new participant, the participant is tied to a logical synchronizer so it does not change on an LSU. As part of that they also [configure](#super-validator-deployment-changes) the successor synchronizer in their SV and scan config. + This deployment should be completed before the freeze time. +5. At the scheduled **topology freeze time**, the SV app automation of each SV transfers the topology state to the successor nodes and publishes the sequencer URL for the new sequencer in the topology state (this is the only topology transaction that can be published after the freeze time). +6. Between the topology freeze time and the upgrade time, SV app + automation will periodically send special health check events on + the new physical synchronizer to verify its health. Each super + validator should use their metrics to validate that they observe at + least one event from each other super validator in the `LSU + Sequencing Test` dashboard as well as that the BFT peer + connections (CometBFT or DABFT) of the successor nodes are healthy. + +
add more details once we have added this
+ +7. At the scheduled **upgrade time**, participants automatically connect to the successor synchronizer. + The SV automation transfers traffic control state from the current sequencer to the successor. + The successor physical synchronizer may be configured with a lower initial rate limit that will be + raised by the SV app after a configurable amount of time to avoid an initial traffic surge on the new synchronizer. + +
add more details once we have added this.
+ +9. The successor physical synchronizer is now fully usable. Super Validators update their [configuration](#super-validator-deployment-changes) to mark the original synchronizer as legacy + and successor as the current synchronizer. +10. After 30 days, the super validators remove the old physical synchronizer node deployment. Super Validators update their [configuration](#super-validator-deployment-changes) to remove the + legacy synchronizer configuration. + +### LSU Cancellation + +Between the topology freeze time and and upgrade time, the upgrade can be cancelled if the successor physical synchronizer is deemed unhealthy, e.g., because the health checks fail. +To do so, a threshold of super validators must send a `POST` request to the `/v0/admin/synchronizer/lsu/cancel` endpoint on the SV API. + +### Disaster Recovery through Roll-Forward LSU + +In case of a disaster that causes the current physical synchronizer to become unavailable, an LSU can be used as a roll-forward recovery mechanism. +The procedure is similar to a regular LSU but because the current physical synchronizer is unusable, the coordination through a vote and topology transactions is not possible and instead validators and super validators need to manually initiate the upgrade. + +This procedure can also be used for recovering from a failed LSU. There are two relevant cases: + +1. The LSU did not get cancelled before the upgrade time but no Daml transactions and topology transactions were able to be sequenced on the successor physical synchronizer after the upgrade time. In this case, the original successor synchronizer can be thrown away and replaced + by a new successor synchronizer with a serial incremented by 1 (so 2 compared to the original non-successor synchronizer). +2. The LSU proceeded and some transactions did get sequenced on the successor physical synchronizer but the successor physical synchronizer then became unusable. The procedure is the same in this case but + the SVs should keep both the original synchronizer and the broken successor synchronizer running (assuming it can still serve events just not sequence new messages) to allow nodes to catchup first and spin up a new successor synchronizer on the side + so they are running 3 synchronizer nodes for some period of time. Allowing nodes to catch up as much as possible limits the potential for desynchronization requiring manual resolution through ACS commitment mismatches. + +Concretely, the procedure is as follows: + +1. The old physical synchronizer is deemed broken and the last sequenced message was at record time R. +2. Super validators configure this as the max sequencing time on the old sequencer to guarantee that nothing accidentally gets sequenced after that time. This is done by applying the following environment variable to the existing sequencer: + +``` +- name: ADDITIONAL_CONFIG_SEQUENCER_LSU_MAX_SEQUENCING_TIME + value: | + canton.sequencers.sequencer.parameters.lsu-repair.global-max-sequencing-time-exclusive=MAX_SEQUENCING_TIME +``` + +2. Super validators deploy successor nodes. Depending on the issue, + the successor nodes may be configured with older image and protocol + versions if the issue is limited to the new version. The successor + sequencer must be configured with two timestamps: + `lower-bound-sequencing-time-exclusive` and + `upgrade-time`. These correspond to the topology freeze time and + the upgrade time in a regular LSU. In particular, after + `lower-bound-sequencing-time-exclusive` sequencing test messages + can be submitted and observed in the `LSU Sequencing Test` + dashboard. After `upgrade-time` all Daml transactions can be + submitted. The actual timestamps will be chosen through coordination with all SVs. + The timestamps are applied through an environment variable on the successor sequencer: + +``` +- name: ADDITIONAL_CONFIG_SEQUENCER_LSU_SEQUENCING_BOUNDS + value: | + canton.sequencers.sequencer.parameters.parameters.lsu-repair.lsu-sequencing-bounds-override.lower-bound-sequencing-time-exclusive=LOWER_BOUND_SEQUENCING_TIME_EXCLUSIVE + canton.sequencers.sequencer.parameters.parameters.lsu-repair.lsu-sequencing-bounds-override.lower-bound-sequencing-time-exclusive=UPGRADE_TIME +``` + +3. Super validators wait until ingestion completed. + +4. Super validators configure their SV app app to transfer the topology and traffic state from the old physical synchronizer to the successor nodes. + To do so, add the following helm values to the SV app: + +``` +rollForwardLsu: + newPhysicalSynchronizerSerial: NEW_PHYSICAL_SYNCHRONIZER_SERIAL # Must be agreed between SVs, usually existing (broken) synchronizer serial + 1 + newPhysicalSynchronizerProtocolVersion: NEW_PHYSICAL_SYNCHRONIZER_PROTOCOL_VERSION # Must be agreed between SVs, usually existing (broken) synchronizer serial + 1 + exportTimes: + topologyExportTime: TOPOLOGY_EXPORT_TIME # Must be agreed between SVs + trafficExportTime: TRAFFIC_EXPORT_TIME # Must be agreed between SVs + upgradeTime: UPGRADE_TIME # Must be agreed between SVs +``` + +5. Validators initiate the *procedure* on their side. + +#### Recovery from a failed LSU where nothing got sequenced + +For the special case where an LSU was announced and not cancelled but +failed and nothing got sequenced on the successor synchronizer, there +is a variant that avoids the need to manually check for ingestion +being completed and does not require explicit interaction from validators. + +To do so, use the following steps: + +1. Super validators configure the manual LSU in their scan. + +``` +rollForwardLsu: + enabled: true + upgradeTime: UPGRADE_TIME # Must be agreed between SVs, optional, if not specified it is taken from an existing LSU announcement which should usually be sufficient. +``` + +2. Validator app automation picks up that configuration and initiates a manual roll-forward LSU to the new synchronizer. + +#### Resolving ACS mismatches + +Note that depending on how exactly the old synchronizer failed, +validators may desynchronize if some validators have observed a +transaction before the failure while others have not. To recover from +that follow the instructions for *validators*. + +## Super Validator Deployment Changes + +
update helm values and link them here
+ +LSU requires deployment changes for super validators. Concretely: + +1. Participants are now preserved as part of LSUs. So if you previously assumed participant, sequencer and mediator always come as one unit per migration id, you now need to move the participant out of that. +2. The `domain` value on the sv app helm chart should be replaced by `synchronizers`. `synchronizers.current` replaces the synchronizer previously configured through `domain`. `synchronizers.successor` + should be configured to the successor physical synchronizer when that is deployed. After the upgrade, `synchronizers.current` becomes `synchronizers.legacy` and `synchronizers.successor` becomes `synchronizers.current`. The legacy configuration should be removed together with removing the old physical synchronizer after 30 days. + The CometBFT configuration also moves under `synchronizers.(current|successor|legacy)`. +3. The `sequencerAddress` and `mediatorAddress`values in scan should be replaced by `synchronizers.current.sequencer` and `synchronizers.current.mediator`. The corresponding values under `synchronizers.successor` should be set together with + the deployment of the successor physical synchronizer. After the upgrade `successor` becomes `current` and `current` is removed. +4. When using DABFT as the successor node, further changes will be required. Most notably the cometbft node goes away as DABFT runs as part of the sequencer pod. The sequencer pod and SV app will require some additional configuration. Details of this will be added later. + +{/* COPIED_END */} diff --git a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx index f0d1b0cb..a5ad9130 100644 --- a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx +++ b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx @@ -7,17 +7,56 @@ Canton Network upgrades fall into two categories: minor upgrades that each node ## Minor Upgrades -{/* COPIED_START source="splice:sv_operator/sv_upgrades.rst" hash="sv-minor-upgrades" */} +### Validator nodes + +{/* COPIED_START source="splice:docs/src/validator_operator/validator_upgrades.rst" hash="1951360a" */} -This section was adapted from existing reviewed documentation. -**Source:** `sv_operator/sv_upgrades.rst` +This section was copied from existing reviewed documentation. +**Source:** `docs/src/validator_operator/validator_upgrades.rst` +Reviewers: Skip this section. Remove markers after final approval. + + +There are two types of upgrades: + +Version upgrades (this corresponds to an upgrade from `0.A.X` to `0.B.Y`) +and protocol upgrades (the actual version can remain the same, only the protocol is upgraded and it requires no action). + +Version upgrades can be done by each node independently and only require +an upgrade of the docker-compose file or a `helm upgrade` for a +kubernetes deployment. +You must not delete or uninstall any Postgres database, change migration IDs or secrets for a version upgrade; +Make sure to read the [Release Notes](/global-synchronizer/release-notes/current-release) to learn +about changes you may need to make as part of the upgrade. + +Note that for docker-compose you must update the full bundle including +the docker compose file and the start.sh script and adjust +`IMAGE_TAG`. Only updating `IMAGE_TAG` is insufficient as the old +docker compose files might be incompatible with the new version. + +{/* COPIED_END */} + +### Super Validator nodes + +{/* COPIED_START source="splice:docs/src/sv_operator/sv_upgrades.rst" hash="5405319d" */} + + +This section was copied from existing reviewed documentation. +**Source:** `docs/src/sv_operator/sv_upgrades.rst` Reviewers: Skip this section. Remove markers after final approval. -Minor upgrades (e.g., `0.5.8` to `0.5.9`) can be performed independently by each node operator. They require only a `helm upgrade` with the new chart version. +There are two types of upgrades: + +Version upgrades (this corresponds to an upgrade from `0.A.X` to `0.B.Y`) +and protocol upgrades (the actual version can remain the same, only the protocol is upgraded). + +Version upgrades can be done by each node independently and only require +a `helm upgrade`. Make sure to read the [Release Notes](/global-synchronizer/release-notes/current-release) to learn +about changes you may need to make as part of the upgrade. -Always read the [release notes](/global-synchronizer/release-notes/current-release) before upgrading to learn about required configuration changes. +Protocol upgrades are performed through [logical synchronizer upgrades](/global-synchronizer/production-operations/logical-synchronizer-upgrade), +which allow upgrading the protocol version with very limited network downtime. {/* COPIED_END */} diff --git a/docs-main/global-synchronizer/production-operations/validator-major-upgrade.mdx b/docs-main/global-synchronizer/production-operations/validator-major-upgrade.mdx index 98dba45c..a35bcef4 100644 --- a/docs-main/global-synchronizer/production-operations/validator-major-upgrade.mdx +++ b/docs-main/global-synchronizer/production-operations/validator-major-upgrade.mdx @@ -135,9 +135,9 @@ additional considerations may include the following: For a representative example runbook covering the migration of a specific integration use-case, see the [Rolling out Major Splice -Upgrades](https://docs.digitalasset.com/integrate/devnet/exchange-integration/node-operations.html#rolling-out-major-splice-upgrades) -section of the Digital Asset [Exchange Integration -Guide](https://docs.digitalasset.com/integrate/devnet/exchange-integration/). +Upgrades](/integrations/exchanges/node-operations#rolling-out-major-splice-upgrades) +section of the [Validator Node Operations](/integrations/exchanges/node-operations) +guide. ### Migration Dumps Migration dumps contain identity and transaction data from the validator diff --git a/docs-main/global-synchronizer/troubleshooting.mdx b/docs-main/global-synchronizer/troubleshooting.mdx index e06d7beb..388bf407 100644 --- a/docs-main/global-synchronizer/troubleshooting.mdx +++ b/docs-main/global-synchronizer/troubleshooting.mdx @@ -589,9 +589,7 @@ Always take snapshots/backups before upgrading. For Kubernetes: snapshot both Va **Solution:** 1. Check current network version at [canton.foundation/sv-network-status](https://canton.foundation/sv-network-status/) 2. Upgrade directly to the current network version (don't stop at intermediate versions) -3. Follow the upgrade guide for your deployment method: - - [Docker Compose Upgrade Guide](https://docs.sync.global/validator_operator/validator_compose.html#upgrading) - - [Kubernetes Upgrade Guide](https://docs.sync.global/validator_operator/validator_k8s.html#upgrading) +3. Follow the [validator upgrade guide](/global-synchronizer/production-operations/upgrade-procedures#validator-nodes) — applies to both Docker Compose (`docker-compose` bundle update) and Kubernetes (`helm upgrade`) deployments. diff --git a/docs-main/images/exchange-integration/wallet_ui.png b/docs-main/images/exchange-integration/wallet_ui.png new file mode 100644 index 00000000..6c287d20 Binary files /dev/null and b/docs-main/images/exchange-integration/wallet_ui.png differ diff --git a/docs-main/integrations/exchanges/node-operations.mdx b/docs-main/integrations/exchanges/node-operations.mdx new file mode 100644 index 00000000..2711357d --- /dev/null +++ b/docs-main/integrations/exchanges/node-operations.mdx @@ -0,0 +1,241 @@ +--- +title: "Validator Node Operations" +description: "Operate an Exchange Validator Node: reward minting, traffic funding, exchange parties setup, Ledger API users, .dar file management, monitoring, and major Splice upgrades." +--- + +{/* COPIED_START source="docs-website:docs/replicated/splice-wallet-kernel/devnet/src/exchange-integration/node-operations.rst" hash="7a5fbddf" */} + + +This section was copied from existing reviewed documentation. +**Source:** `docs/replicated/splice-wallet-kernel/devnet/src/exchange-integration/node-operations.rst` +Reviewers: Skip this section. Remove markers after final approval. + + +## Reward Minting and Traffic Funding + +As explained in *tokenomics-and-rewards*, your validator node will need traffic to submit +the transactions to execute withdrawals or accept multi-step deposits. +As also explained in that section, the network provides rewards that can be used to fund traffic. + +Note also that every validator node has an associated **validator operator party** that represents +that validator node's administrator ([docs](https://docs.dev.sync.global/validator_operator/validator_compose.html#deployment)). +The validator node automatically mints rewards for that party. +It can further be configured to +[automatically purchase traffic](https://docs.dev.sync.global/validator_operator/validator_helm.html#configuring-automatic-traffic-purchases) +using that party's CC balance, which includes the minted rewards. + +We thus recommend the following setup as a starting point to mint +rewards and automatically fund traffic: + +1. Use the validator operator party as your featured `exchangeParty`. + Follow [exchange-party-setup](#setup-the-featured-exchange-party) to get it featured. +2. [treasury-party-setup](#setup-the-treasury-party) to create a `treasuryParty` with a transfer preapproval managed by your `exchangeParty`. +3. Setup [automatic traffic purchases in the validator app](https://docs.dev.sync.global/validator_operator/validator_helm.html#configuring-automatic-traffic-purchases). +4. Optional: setup [auto-sweep](https://docs.dev.sync.global/validator_operator/validator_helm.html#configuring-sweeps-and-auto-accepts-of-transfer-offers) from the `exchangParty` to your `treasuryParty` to limit the funds managed directly by the validator node. + +As a starting point for the automatic traffic purchase configuration, set `targetThroughput` to 2kB/s +and `minTopupInterval` to 1 minute, which should be sufficient to execute about one withdrawal or deposit acceptance every 10 seconds. +Please test this with your expected traffic pattern and adjust as needed. +See this [FAQ to measure the traffic spent on an individual transaction](https://docs.dev.sync.global/faq.html#term-How-do-I-determine-the-traffic-used-for-a-specific-transaction). + +## Setup Exchange Parties + +### Setup the featured exchange party + +As explained above in [reward-minting-and-traffic-funding](#reward-minting-and-traffic-funding), we recommend to use the validator operator party +as your featured `exchangeParty`. This party is automatically created when you +[deploy your validator node](https://docs.dev.sync.global/validator_operator/validator_compose.html#deployment). +Thus the only setup step is to get it featured by the SVs: + +**On DevNet**, you can self-feature your validator operator party as follows: + +1. [Log into the wallet UI for the validator user](https://docs.dev.sync.global/validator_operator/validator_helm.html#logging-into-the-wallet-ui), which presents itself as in this screenshot: + + ![image](/images/exchange-integration/wallet_ui.png) + +2. Tap 20 \$ of CC to ensure that your validator operator party has enough funds to purchase traffic. + +3. Click on the "Self-grant featured app rights" button. + +4. The button is replaced with a star ⭐ icon once the `FeaturedAppRight` contract has been created for your validator operator party. This may take about 10 seconds. + +That's all. Continue with [setting up your treasury party](#setup-the-treasury-party). + +**On MainNet**, apply for featured status for your validator operator party as follows: + +1. [Log into the wallet UI for the validator user](https://docs.dev.sync.global/validator_operator/validator_helm.html#logging-into-the-wallet-ui) on your MainNet validator node. +2. Copy the party-id of your validator operator party using the copy button right of the abbreviated `"google-oaut.."` party name in the screenshot above. +3. Apply for featured application status using this link: + +Wait until your application is approved. +The validator node will automatically pick up the featured status via the corresponding +`FeaturedAppRight` contract issued by the DSO party for its validator operator party. + +**On TestNet** there is currently no official process, but you should be able to use the same procedure as the one for MainNet. + +### Setup the treasury party + +Setup the `treasuryParty` as follows with a transfer preapproval managed by your `exchangeParty`: + +1. Create the `treasuryParty` using the wallet SDK to *create-an-external-party* with a key managed in a system of your choice + +2. Copy the party id of your `exchangeParty` from the Splice Wallet UI as explained above, or retrieve it + by calling `/v0/validator-user` on the [Validator API](https://github.com/hyperledger-labs/splice/blob/36ed55ea1fbb9b0030000bb0d0265ba811101df5/apps/validator/src/main/openapi/validator-internal.yaml#L14C3-L14C21). + +3. Call `/v2/commands/submit-and-wait` on the [Ledger API](https://github.com/digital-asset/canton/blob/eeb56bc5d9779a7f918893b7a6b15e0b312a044e/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml#L6C3-L6C31) + to create a `#splice-wallet:Splice.Wallet.TransferPreapproval:TransferPreapprovalProposal` + ([code](https://github.com/hyperledger-labs/splice/blob/edb2257410dfc3660314765c40e59f41e2381150/daml/splice-wallet/daml/Splice/Wallet/TransferPreapproval.daml#L9)) + directly with the `provider` set to your `exchangeParty`. + + Note that setting up this transfer preapproval requires the `exchangeParty` to pay a small fee of about 0.25 \$ worth of CC. + The funds for this fee usually come from the validator liveness rewards that a validator node starts minting about 30 minutes after it is created. + On DevNet or LocalNet, you don't have to wait that long: just "Tap" the required funds from the built-in faucet. + +### Testing the party setup + +You can test the party setup on LocalNet or DevNet as follows: + +1. Setup your `exchangeParty` and `treasuryParty` as explained above. +2. Setup an additional `testParty` representing a customer. +3. Transfer some CC from the `testParty` to the `treasuryParty` to simulate a deposit. +4. Observe the successful deposit by listing holdings of the `treasuryParty`. +5. Observe about 30' later in the Splice Wallet UI of your validator operator user that the + `exchangeParty` minted app rewards for this deposit. It takes 30', as activity recording and + rewards minting happen in different phases of a minting round. + +## Setup Ledger API Users + +Clients need to +[authenticate as a Ledger API user](https://docs.digitalasset.com/build/3.3/sdlc-howtos/applications/secure/authorization.html) +to access the Ledger API of your Exchange Validator Node. +You can manage Ledger API users and their rights using the +`/v2/users/...` [endpoints of the Ledger API](https://github.com/digital-asset/canton/blob/97b837d7b7e9a499963cba1d39a017648c46e8d7/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml#L1172). + +You will need to authenticate as an existing user that has `participant_admin` rights +to create additional users and grant rights. +One option is to authenticate as the `ledger-api-user` that you +[configured when setting up authentication for your validator node](https://docs.dev.sync.global/validator_operator/validator_helm.html#oidc-provider-requirements). +Another option is to +[log-in to your Splice Wallet UI for the validator operatory party](https://docs.dev.sync.global/validator_operator/validator_helm.html#logging-into-the-wallet-ui) +and use the JWT token used by the UI. + +We recommend that you setup one user per service that needs to access the Ledger API. +This way you can easily manage permissions and access rights for each service independently. +The [rights](https://docs.digitalasset.com/build/3.3/sdlc-howtos/applications/secure/authorization.html#access-tokens-and-rights) +required by the integration components are as follows: + +| Component | Required Rights | Purpose | +|-----------|-----------------|---------| +| Tx History Ingestion | `canReadAs(treasuryParty)` | Read transactions and contracts for the `treasuryParty`. | +| Withdrawal Automation | `canActAs(treasuryParty)` | Prepare and execute transactions on behalf of the `treasuryParty`. | +| Multi-Step Deposit Automation | `canActAs(treasuryParty)` | Prepare and execute transactions on behalf of the `treasuryParty`. | +| Automated [exchange parties setup](#setup-exchange-parties) for *exchange-integration-testing* | `participant_admin` and `canActAs(treasuryParty)` | Create parties and use the `treasuryParty` to create its `TransferPreapprovalProposal`. Hint: grant `canActAs(treasuryParty)` to the user doing the setup after allocating the `treasuryParty`. | + +*Required Ledger API User Rights* + +## .dar File Management + +`.dar` files define the Daml workflows used by the token admins for their tokens. +They must be uploaded to your Exchange Validator Node to be able to process +withdrawals and deposits for those tokens. + +The `.dar` files for Canton Coin are managed by the Validator Node itself. +The `.dar` files for other tokens need to be uploaded by you using the `/v2/packages` endpoint of the +[Ledger API](https://github.com/digital-asset/canton/blob/eeb56bc5d9779a7f918893b7a6b15e0b312a044e/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml#L316). +See this [how-to guide](https://docs.digitalasset.com/build/3.3/sdlc-howtos/applications/develop/manage-daml-packages.html) +for more information. + + +Only upload `.dar` files from token admins that you trust. The uploaded `.dar` files define the choices available on active contracts. Uploading a malicious `.dar` file could result in granting an attacker an unintended delegation on your contracts, which could lead to loss of funds. + + +## Monitoring + +See the Splice documentation for guidance on +[how to monitor your validator node](https://docs.dev.sync.global/deployment/observability/index.html). +Note in particular that it includes +[Grafana dashboards](https://docs.dev.sync.global/deployment/observability/metrics.html#grafana-dashboards) +for monitoring the traffic usage, balances of local parties (e.g., the `exchangeParty`), +and [many other metrics](https://docs.dev.sync.global/deployment/observability/metrics_reference.html). + +## Rolling out Major Splice Upgrades + +For major protocol changes, the global sychronizer undergoes a [Major +Upgrade Procedure](https://docs.dev.sync.global/validator_operator/validator_major_upgrades.html). +The schedule for these upgrades is published by the [Super Validators](https://docs.google.com/document/d/1QhLL5bL0u8temBL86y957VbWDtZJhH9udH-_C7nBlvc/edit?tab=t.0#heading=h.ripdn5ydglli) +and also announced in the `#validator-operations` slack channel. + +As part of this procedure, the old synchronizer is paused, all +validator operators create an export of the state of their validator, +and deploy a new validator connected to the new synchronizer and +import their state again. For a more detailed overview, refer to the +[Splice docs](https://docs.dev.sync.global/validator_operator/validator_major_upgrades.html). + +The procedure requires some experience to get it right, so it is highly +recommended to run nodes on DevNet and TestNet so you can practice the +procedure before you encounter it on MainNet. + +From an integration perspective, there are a few things to keep in mind: + +1. A major upgrade only preserves the active contracts but not the + update history. In particular, you will not be able to get + transactions from before the major upgrade on the update service on + the Ledger API of the newly deployed validator node. +2. Offsets on the upgraded validator node start from `0` again. +3. The update history will include special import transactions for the + contracts imported from the old synchronizer. They all have record time + `0001-01-01T00:00:00.000000Z`, and represent the creation of the imported + contracts. + +### Runbook + +We recommend to roll-out the upgrade as follows: + +1. Wait for the synchronizer to be paused and your node to have + written the migration dump as described in the [Splice + docs](https://docs.dev.sync.global/validator_operator/validator_major_upgrades.html#catching-up-before-the-migration). +2. Open the migration dump and extract the `acs_timestamp` from it, e.g., using `jq .acs_timestamp < /domain-upgrade-dump/domain_migration_dump.json`. + This is the timestamp at which the synchronizer was paused. +3. Wait for your Tx History Ingestion to have caught up to record time + `acs_timestamp` or higher. Note that you must consume *offset checkpoints* + to guarantee that your Tx History Ingestion advances past `acs_timestamp`. +4. Stop your Tx History Ingestion component. +5. Upgrade your validator and connect it to the new synchronizer following the + [Splice docs](https://docs.dev.sync.global/validator_operator/validator_major_upgrades.html#deploying-the-validator-app-and-participant-docker-compose). +6. Follow the shortened version below of the + [procedure for restoring a validator node from a backup](/global-synchronizer/production-operations/backup-and-recovery) + to determine the offset from which to restart your Tx History Ingestion: + + 1. Retrieve the `synchronizerId` of the last ingested transaction from the Canton Integration DB. + 2. Log into the [Canton Console of your validator node](https://docs.dev.sync.global/deployment/console_access.html) and query the offset `offRecovery` assigned to the ACS import transactions at time `0001-01-01T00:00:00.000000Z` using + + ```scala + def parseTimestamp(t: String) = { + val isoFormat = java.time.format.DateTimeFormatter.ISO_INSTANT.withZone(java.time.ZoneId.of("Z")) + isoFormat.parse(t, java.time.Instant.from(_)) + } + val synchronizerId = SynchronizerId.tryFromString("example::1220b1431ef217342db44d516bb9befde802be7d8899637d290895fa58880f19accc") // example + val tRecovery = parseTimestamp("0001-01-01T00:00:00.000000Z") + val offRecovery = participant.parties.find_highest_offset_by_timestamp(synchronizerId, tRecovery) + ``` + + Alternatively, you can use `grpcurl` to query the offset `offRecovery` from the command line as shown in the + example below: + + ```bash + grpcurl -plaintext -d \ + '{"synchronizerId" : "example::1220be58c29e65de40bf273be1dc2b266d43a9a002ea5b18955aeef7aac881bb471a", + "timestamp": "0001-01-01T00:00:00.000000Z"}' \ + localhost:5002 \ + com.digitalasset.canton.admin.participant.v30.PartyManagementService.GetHighestOffsetByTimestamp + ``` + + If you use authentication for the Canton Admin gRPC API, then you need to add the appropriate + authentication flags to the `grpcurl` command above. + + 3. Configure the Tx History Ingestion component to start ingesting from offset `offRecovery`. + 4. Restart the Tx History Ingestion component. + +Once you have completed these steps, the integration workflows will continue. + +{/* COPIED_END */} diff --git a/docs-main/integrations/wallet/guidance.mdx b/docs-main/integrations/wallet/guidance.mdx index c362fd11..2602c6a8 100644 --- a/docs-main/integrations/wallet/guidance.mdx +++ b/docs-main/integrations/wallet/guidance.mdx @@ -219,7 +219,7 @@ Links to the node deployment docs are below depending on the deployment choice a - Docker Compose DevNet docs - Kubernetes DevNet docs -The Wallet integration guide is tailored to work with a LocalNet setup ([https://docs.dev.sync.global/app_dev/testing/localnet.html](https://docs.dev.sync.global/app_dev/testing/localnet.html)) to make testing and verification easy. +The Wallet integration guide is tailored to work with a [LocalNet setup](/sdks-tools/development-tools/localnet) to make testing and verification easy. ## Connecting to a Synchronizer diff --git a/docs-main/sdks-tools/sdks/wallet-sdk.mdx b/docs-main/sdks-tools/sdks/wallet-sdk.mdx index 28a57680..5fc737a2 100644 --- a/docs-main/sdks-tools/sdks/wallet-sdk.mdx +++ b/docs-main/sdks-tools/sdks/wallet-sdk.mdx @@ -21,7 +21,7 @@ A smaller variant, the dApp SDK, is also available for browser-based application ## Prerequisites - Node.js (v18+) -- A running Canton validator node (either self-hosted or via a node-as-a-service provider). Any network validator can be used -- [Splice LocalNet](https://docs.dev.sync.global/app_dev/testing/localnet.html) is convenient for development and testing, but DevNet, TestNet, or MainNet validators work as well. +- A running Canton validator node (either self-hosted or via a node-as-a-service provider). Any network validator can be used -- [Splice LocalNet](/sdks-tools/development-tools/localnet) is convenient for development and testing, but DevNet, TestNet, or MainNet validators work as well. ## Installation