From 5885acba53090f0eb57c6297ea58fcef1a8955d9 Mon Sep 17 00:00:00 2001 From: 8bitpal Date: Tue, 28 Apr 2026 20:47:35 +0200 Subject: [PATCH 1/6] Strip /docs-main/ prefix from internal URLs (site-wide 404 fix) Mintlify routes pages from docs.json slugs which do not include the docs-main/ directory prefix (e.g., the slug "appdev/get-started/ choose-your-path" maps to /appdev/get-started/choose-your-path, not /docs-main/appdev/get-started/choose-your-path). 425+ internal links across .mdx files used the broken /docs-main/ prefix and 404'd on the deployed site. This pass: - 535 link rewrites across 167 .mdx files: ](/docs-main/...) -> ](/...) and href="/docs-main/..." -> href="/...". - 8 fields rewritten in docs.json redirects (sources and destinations that referenced /docs-main/...). Verified zero remaining /docs-main/ occurrences in docs-main/ after the pass (mdx + docs.json). --- docs-main/appdev/app-rewards.mdx | 6 +-- .../deep-dives/composition-multi-party.mdx | 6 +-- .../appdev/deep-dives/decentralization.mdx | 6 +-- docs-main/appdev/deep-dives/multi-hosting.mdx | 4 +- docs-main/appdev/deep-dives/privacy-model.mdx | 6 +-- .../appdev/get-started/choose-your-path.mdx | 42 +++++++++---------- .../upgrading-from-previous-versions.mdx | 14 +++---- docs-main/appdev/get-started/whats-new.mdx | 8 ++-- .../appdev/modules/m1-development-stack.mdx | 4 +- docs-main/appdev/modules/m1-mental-models.mdx | 4 +- .../modules/m1-understanding-canton.mdx | 10 ++--- .../modules/m2-canton-for-ethereum-devs.mdx | 6 +-- .../appdev/modules/m2-concept-translation.mdx | 4 +- .../appdev/modules/m2-migration-checklist.mdx | 4 +- .../modules/m2-multi-party-workflows.mdx | 4 +- .../modules/m2-network-architecture.mdx | 4 +- .../appdev/modules/m2-privacy-differences.mdx | 4 +- .../modules/m2-smart-contract-paradigm.mdx | 4 +- .../appdev/modules/m3-building-packaging.mdx | 6 +-- docs-main/appdev/modules/m3-choices.mdx | 8 ++-- .../appdev/modules/m3-contract-templates.mdx | 2 +- .../appdev/modules/m3-design-patterns.mdx | 10 ++--- .../appdev/modules/m3-dev-environment.mdx | 4 +- docs-main/appdev/modules/m3-testing.mdx | 2 +- .../appdev/modules/m4-app-architecture.mdx | 6 +-- docs-main/appdev/modules/m4-backend-dev.mdx | 8 ++-- .../appdev/modules/m4-building-apps-intro.mdx | 12 +++--- docs-main/appdev/modules/m4-canton-coin.mdx | 4 +- .../m4-featured-app-activity-marker.mdx | 4 +- docs-main/appdev/modules/m4-frontend-dev.mdx | 10 ++--- docs-main/appdev/modules/m4-observability.mdx | 4 +- docs-main/appdev/modules/m4-sdks-apis.mdx | 6 +-- .../appdev/modules/m5-ci-cd-integration.mdx | 10 ++--- .../modules/m5-deployment-progression.mdx | 6 +-- .../modules/m5-environment-configuration.mdx | 4 +- .../modules/m5-localnet-development.mdx | 4 +- .../appdev/modules/m5-testing-strategies.mdx | 4 +- docs-main/appdev/modules/m6-deployment.mdx | 10 ++--- docs-main/appdev/modules/m6-limitations.mdx | 6 +-- docs-main/appdev/modules/m6-overview.mdx | 10 ++--- .../appdev/modules/m6-package-naming.mdx | 8 ++-- .../appdev/modules/m6-package-selection.mdx | 6 +-- .../appdev/modules/m6-testing-upgrades.mdx | 4 +- .../modules/m6-upgrade-compatibility.mdx | 6 +-- .../modules/m6-writing-first-upgrade.mdx | 12 +++--- docs-main/appdev/modules/m7-compliance.mdx | 10 ++--- .../appdev/modules/m7-error-handling.mdx | 8 ++-- .../appdev/modules/m7-package-management.mdx | 6 +-- docs-main/appdev/modules/m7-performance.mdx | 8 ++-- docs-main/appdev/modules/m7-security.mdx | 4 +- .../modules/m7-smart-contract-upgrades.mdx | 14 +++---- docs-main/appdev/quickstart/index.mdx | 6 +-- docs-main/appdev/quickstart/prerequisites.mdx | 4 +- .../appdev/quickstart/running-the-demo.mdx | 4 +- .../appdev/reference/admin-api-reference.mdx | 6 +-- .../reference/configuration-reference.mdx | 6 +-- docs-main/appdev/reference/error-codes.mdx | 2 +- docs-main/appdev/tooling/debugging-tools.mdx | 6 +-- .../tooling/development-tools-overview.mdx | 16 +++---- docs-main/appdev/tooling/ide-setup.mdx | 6 +-- .../common-questions.mdx | 6 +-- .../error-code-reference.mdx | 4 +- .../operational-issues.mdx | 2 +- docs-main/appdev/troubleshooting.mdx | 2 +- docs-main/docs.json | 16 +++---- .../canton-console/console-overview.mdx | 4 +- .../canton-console/debugging-workflows.mdx | 6 +-- .../canton-console/essential-commands.mdx | 4 +- .../deployment/authorization-setup.mdx | 4 +- .../deployment/bootstrap-network.mdx | 12 +++--- .../deployment/configuration.mdx | 4 +- .../deployment/deployment-options.mdx | 6 +-- .../deployment/installation.mdx | 8 ++-- .../deployment/kubernetes-deployment.mdx | 6 +-- .../deployment/onboarding-process.mdx | 6 +-- .../deployment/prerequisites.mdx | 4 +- .../deployment/super-validator-setup.mdx | 10 ++--- .../deployment/sv-operations.mdx | 2 +- .../deployment/upgrades.mdx | 4 +- .../extension-synchronizers/deployment.mdx | 2 +- .../hybrid-synchronizer-pattern.mdx | 2 +- .../other-private-synchronizers.mdx | 4 +- .../private-synchronizers.mdx | 6 +-- .../private-validators.mdx | 4 +- .../backup-and-recovery.mdx | 4 +- .../production-operations/key-metrics.mdx | 6 +-- .../production-operations/logging.mdx | 2 +- .../monitoring-setup.mdx | 4 +- .../production-operations/pruning.mdx | 2 +- .../security-operations.mdx | 6 +-- .../upgrade-procedures.mdx | 6 +-- .../reference/canton-admin-api.mdx | 12 +++--- .../reference/canton-console-reference.mdx | 2 +- .../reference/configuration-reference.mdx | 6 +-- .../reference/error-codes.mdx | 6 +-- .../reference/metrics-reference.mdx | 2 +- .../reference/splice-apis.mdx | 2 +- .../release-notes/current-release.mdx | 2 +- .../release-notes/minor-releases.mdx | 4 +- .../release-notes/release-history.mdx | 2 +- .../release-notes/release-notes.mdx | 12 +++--- .../release-notes/weekly-patch-releases.mdx | 4 +- .../common-questions.mdx | 2 +- .../error-code-reference.mdx | 4 +- .../troubleshooting-guide/runbooks.mdx | 2 +- .../troubleshooting-methodology.mdx | 6 +-- .../infrastructure-requirements.mdx | 4 +- .../understand/introduction.mdx | 6 +-- .../understand/validator-roles.mdx | 6 +-- docs-main/integrations/apps/finding-apps.mdx | 4 +- docs-main/integrations/ecosystem.mdx | 8 ++-- .../integrations/exchanges/sdk-download.mdx | 2 +- .../integrations/integration-patterns.mdx | 4 +- docs-main/integrations/overview.mdx | 12 +++--- .../integrations/wallet/configuration.mdx | 4 +- .../integrations/wallet/sdk-download.mdx | 4 +- .../integrations/wallets/canton-vs-web3.mdx | 4 +- docs-main/integrations/wallets/for-users.mdx | 4 +- docs-main/overview/learn/architecture.mdx | 6 +-- .../overview/learn/how-transactions-work.mdx | 6 +-- docs-main/overview/learn/ledger-model.mdx | 8 ++-- docs-main/overview/learn/privacy-model.mdx | 6 +-- docs-main/overview/learn/trust-model.mdx | 6 +-- .../overview/learn/two-layer-consensus.mdx | 6 +-- .../reference/canton-coin-tokenomics.mdx | 4 +- .../canton-protocol-specification.mdx | 14 +++---- docs-main/overview/reference/cip-0056.mdx | 6 +-- docs-main/overview/reference/cip-index.mdx | 2 +- .../reference/cross-sync-dvp-example.mdx | 4 +- docs-main/overview/reference/gsf-policies.mdx | 6 +-- .../overview/reference/ordering-consensus.mdx | 2 +- .../reference/smart-contract-consensus.mdx | 6 +-- .../reference/splice-wallet-reference.mdx | 2 +- .../reference/super-validator-components.mdx | 4 +- .../reference/sv-governance-reference.mdx | 2 +- .../reference/transaction-lifecycle.mdx | 2 +- .../reference/validator-node-components.mdx | 12 +++--- .../overview/reference/what-are-cips.mdx | 6 +-- docs-main/overview/understand/canton-coin.mdx | 4 +- .../overview/understand/cantons-solution.mdx | 8 ++-- .../overview/understand/core-concepts.mdx | 8 ++-- .../understand/five-minute-overview.mdx | 8 ++-- .../understand/getting-app-featured.mdx | 2 +- .../understand/global-synchronizer.mdx | 6 +-- docs-main/overview/understand/the-problem.mdx | 4 +- docs-main/overview/understand/use-cases.mdx | 4 +- .../overview/understand/what-is-canton.mdx | 6 +-- .../overview/understand/who-should-read.mdx | 26 ++++++------ .../sdks-tools/api-reference/admin-api.mdx | 14 +++---- .../sdks-tools/api-reference/json-api.mdx | 4 +- .../sdks-tools/api-reference/ledger-api.mdx | 4 +- .../sdks-tools/api-reference/splice-apis.mdx | 8 ++-- .../sdks-tools/cli-tools/canton-console.mdx | 10 ++--- .../sdks-tools/cli-tools/daml-script.mdx | 8 ++-- docs-main/sdks-tools/cli-tools/dpm.mdx | 8 ++-- .../development-tools/daml-studio.mdx | 6 +-- .../sdks-tools/development-tools/localnet.mdx | 8 ++-- .../sdks-tools/development-tools/pqs.mdx | 8 ++-- .../sdks-tools/development-tools/sandbox.mdx | 10 ++--- .../language-bindings/community.mdx | 6 +-- .../sdks-tools/language-bindings/java.mdx | 6 +-- .../sdks-tools/language-bindings/scala.mdx | 8 ++-- docs-main/sdks-tools/overview.mdx | 4 +- .../reference-projects/cn-quickstart.mdx | 12 +++--- .../splice-reference-apps.mdx | 8 ++-- docs-main/sdks-tools/sdks/daml-sdk.mdx | 10 ++--- docs-main/sdks-tools/sdks/exchange-sdk.mdx | 12 +++--- docs-main/shared/support-channels.mdx | 6 +-- 168 files changed, 528 insertions(+), 528 deletions(-) diff --git a/docs-main/appdev/app-rewards.mdx b/docs-main/appdev/app-rewards.mdx index 576a3e6fb..8f1bd42d6 100644 --- a/docs-main/appdev/app-rewards.mdx +++ b/docs-main/appdev/app-rewards.mdx @@ -64,6 +64,6 @@ Canton Network does not charge fees on CC transfers between parties. The only co ## Further Reading -- [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) -- How traffic credits and CC work for application developers -- [Getting Your App Featured](/docs-main/overview/understand/getting-app-featured) -- Promotional opportunities on Canton Network -- [Canton Coin overview](/docs-main/overview/understand/canton-coin) -- Tokenomics, validator rewards, and governance +- [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) -- How traffic credits and CC work for application developers +- [Getting Your App Featured](/overview/understand/getting-app-featured) -- Promotional opportunities on Canton Network +- [Canton Coin overview](/overview/understand/canton-coin) -- Tokenomics, validator rewards, and governance diff --git a/docs-main/appdev/deep-dives/composition-multi-party.mdx b/docs-main/appdev/deep-dives/composition-multi-party.mdx index 8f6dfe246..cd3beac44 100644 --- a/docs-main/appdev/deep-dives/composition-multi-party.mdx +++ b/docs-main/appdev/deep-dives/composition-multi-party.mdx @@ -15,7 +15,7 @@ Real-world Daml applications involve multiple parties with different roles, perm ## The Propose-Accept Pattern -The most fundamental multi-party pattern in Daml. Since no party can unilaterally create a contract that binds another party (the [signatory rule](/docs-main/appdev/modules/m3-authorization#signatories-and-authorization)), you need a two-step process: +The most fundamental multi-party pattern in Daml. Since no party can unilaterally create a contract that binds another party (the [signatory rule](/appdev/modules/m3-authorization#signatories-and-authorization)), you need a two-step process: @@ -79,5 +79,5 @@ When composing multi-party workflows: ## Next Steps -- [Decentralization](/docs-main/appdev/deep-dives/decentralization) — Strategies for decentralizing at each layer -- [Multi-Hosting](/docs-main/appdev/deep-dives/multi-hosting) — Distributing parties across validators for resilience +- [Decentralization](/appdev/deep-dives/decentralization) — Strategies for decentralizing at each layer +- [Multi-Hosting](/appdev/deep-dives/multi-hosting) — Distributing parties across validators for resilience diff --git a/docs-main/appdev/deep-dives/decentralization.mdx b/docs-main/appdev/deep-dives/decentralization.mdx index b042f67ad..05b7afbfb 100644 --- a/docs-main/appdev/deep-dives/decentralization.mdx +++ b/docs-main/appdev/deep-dives/decentralization.mdx @@ -49,7 +49,7 @@ Transactions involving the party may be processed by any of its hosting validato - Organizations that want geographic redundancy - Gradual migration between validators -See [Multi-Hosting](/docs-main/appdev/deep-dives/multi-hosting) for implementation details. +See [Multi-Hosting](/appdev/deep-dives/multi-hosting) for implementation details. ## BFT Synchronizers @@ -87,5 +87,5 @@ You don't need to design for the highest level of decentralization from the star ## Next Steps -- [Multi-Hosting](/docs-main/appdev/deep-dives/multi-hosting) — Implementation details for distributing parties across validators -- [Composition and Multi-Party Workflows](/docs-main/appdev/deep-dives/composition-multi-party) — Daml patterns for multi-party interactions +- [Multi-Hosting](/appdev/deep-dives/multi-hosting) — Implementation details for distributing parties across validators +- [Composition and Multi-Party Workflows](/appdev/deep-dives/composition-multi-party) — Daml patterns for multi-party interactions diff --git a/docs-main/appdev/deep-dives/multi-hosting.mdx b/docs-main/appdev/deep-dives/multi-hosting.mdx index 5c3be8c4f..888b00e9f 100644 --- a/docs-main/appdev/deep-dives/multi-hosting.mdx +++ b/docs-main/appdev/deep-dives/multi-hosting.mdx @@ -114,5 +114,5 @@ Multi-hosting is one of several resilience approaches. Choose based on your requ ## Next Steps -- [Decentralization](/docs-main/appdev/deep-dives/decentralization) — How multi-hosting fits into Canton's decentralization spectrum -- [Composition and Multi-Party Workflows](/docs-main/appdev/deep-dives/composition-multi-party) — Daml patterns for multi-party interactions +- [Decentralization](/appdev/deep-dives/decentralization) — How multi-hosting fits into Canton's decentralization spectrum +- [Composition and Multi-Party Workflows](/appdev/deep-dives/composition-multi-party) — Daml patterns for multi-party interactions diff --git a/docs-main/appdev/deep-dives/privacy-model.mdx b/docs-main/appdev/deep-dives/privacy-model.mdx index 66a2cd060..8f109d5ba 100644 --- a/docs-main/appdev/deep-dives/privacy-model.mdx +++ b/docs-main/appdev/deep-dives/privacy-model.mdx @@ -97,6 +97,6 @@ The key principle: each party can audit everything they are entitled to see, and ## Next Steps -- [Privacy Model Explained](/docs-main/overview/learn/privacy-model) -- Technical overview of sub-transaction privacy at the protocol level -- [Privacy Differences from Ethereum](/docs-main/appdev/modules/m2-privacy-differences) -- How Canton's model compares to public blockchains -- [Design Patterns](/docs-main/appdev/modules/m3-design-patterns) -- Patterns for authorization and multi-party workflows +- [Privacy Model Explained](/overview/learn/privacy-model) -- Technical overview of sub-transaction privacy at the protocol level +- [Privacy Differences from Ethereum](/appdev/modules/m2-privacy-differences) -- How Canton's model compares to public blockchains +- [Design Patterns](/appdev/modules/m3-design-patterns) -- Patterns for authorization and multi-party workflows diff --git a/docs-main/appdev/get-started/choose-your-path.mdx b/docs-main/appdev/get-started/choose-your-path.mdx index 7ac2ab36f..5a97623e8 100644 --- a/docs-main/appdev/get-started/choose-your-path.mdx +++ b/docs-main/appdev/get-started/choose-your-path.mdx @@ -11,19 +11,19 @@ Whether you're new to blockchain or migrating from another platform, this guide **Recommended Path:** -1. [Five-Minute Overview](/docs-main/overview/understand/five-minute-overview) - Understand what Canton is -2. [Core Concepts](/docs-main/overview/understand/core-concepts) - Learn the fundamentals -3. [Module 1: Understanding Canton](/docs-main/appdev/modules/m1-understanding-canton) - Build mental models -4. [Module 3: Daml Smart Contracts](/docs-main/appdev/modules/m3-dev-environment) - Start coding -5. [Module 4: Building Applications](/docs-main/appdev/modules/m4-building-apps-intro) - Hands-on practice with the example application +1. [Five-Minute Overview](/overview/understand/five-minute-overview) - Understand what Canton is +2. [Core Concepts](/overview/understand/core-concepts) - Learn the fundamentals +3. [Module 1: Understanding Canton](/appdev/modules/m1-understanding-canton) - Build mental models +4. [Module 3: Daml Smart Contracts](/appdev/modules/m3-dev-environment) - Start coding +5. [Module 4: Building Applications](/appdev/modules/m4-building-apps-intro) - Hands-on practice with the example application **Recommended Path:** -1. [Canton for Ethereum Developers](/docs-main/appdev/modules/m2-canton-for-ethereum-devs) - Map your knowledge -2. [Privacy Model](/docs-main/overview/learn/privacy-model) - Understand the key difference -3. [Module 3: Daml Smart Contracts](/docs-main/appdev/modules/m3-dev-environment) - Learn Daml syntax -4. [Module 4: Building Applications](/docs-main/appdev/modules/m4-building-apps-intro) - Hands-on practice building a full-stack Canton app +1. [Canton for Ethereum Developers](/appdev/modules/m2-canton-for-ethereum-devs) - Map your knowledge +2. [Privacy Model](/overview/learn/privacy-model) - Understand the key difference +3. [Module 3: Daml Smart Contracts](/appdev/modules/m3-dev-environment) - Learn Daml syntax +4. [Module 4: Building Applications](/appdev/modules/m4-building-apps-intro) - Hands-on practice building a full-stack Canton app **Key differences to internalize:** - Immutable contracts (archive + create, not mutate) @@ -33,19 +33,19 @@ Whether you're new to blockchain or migrating from another platform, this guide **Recommended Path:** -1. [Five-Minute Overview](/docs-main/overview/understand/five-minute-overview) - Canton's approach -2. [Canton for Ethereum Developers](/docs-main/appdev/modules/m2-canton-for-ethereum-devs) - Concept mapping (still useful) -3. [Architecture Overview](/docs-main/overview/learn/architecture) - How components work -4. [Module 3: Daml Smart Contracts](/docs-main/appdev/modules/m3-dev-environment) - Start coding +1. [Five-Minute Overview](/overview/understand/five-minute-overview) - Canton's approach +2. [Canton for Ethereum Developers](/appdev/modules/m2-canton-for-ethereum-devs) - Concept mapping (still useful) +3. [Architecture Overview](/overview/learn/architecture) - How components work +4. [Module 3: Daml Smart Contracts](/appdev/modules/m3-dev-environment) - Start coding **Recommended Path:** -1. [Five-Minute Overview](/docs-main/overview/understand/five-minute-overview) -2. [The Problem Canton Solves](/docs-main/overview/understand/the-problem) -3. [Canton's Solution](/docs-main/overview/understand/cantons-solution) -4. [Use Cases](/docs-main/overview/understand/use-cases) -5. [Architecture Overview](/docs-main/overview/learn/architecture) +1. [Five-Minute Overview](/overview/understand/five-minute-overview) +2. [The Problem Canton Solves](/overview/understand/the-problem) +3. [Canton's Solution](/overview/understand/cantons-solution) +4. [Use Cases](/overview/understand/use-cases) +5. [Architecture Overview](/overview/learn/architecture) @@ -121,14 +121,14 @@ Before starting development: ## Hands-on Practice -Ready to build? [Module 4: Building Applications](/docs-main/appdev/modules/m4-building-apps-intro) walks you through a full-stack Canton Network application end-to-end — prerequisites, running the demo, backend and frontend development, the JSON Ledger API, and observability. +Ready to build? [Module 4: Building Applications](/appdev/modules/m4-building-apps-intro) walks you through a full-stack Canton Network application end-to-end — prerequisites, running the demo, backend and frontend development, the JSON Ledger API, and observability. ## Getting Help {/* TODO: Add Slack link once available */} - + #gsf-global-synchronizer-appdev channel @@ -136,7 +136,7 @@ Ready to build? [Module 4: Building Applications](/docs-main/appdev/modules/m4-b Technical discussions and Q&A - + Common questions answered diff --git a/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx b/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx index 8700d5da3..848cb9868 100644 --- a/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx +++ b/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx @@ -7,17 +7,17 @@ Per-version migration steps live with the component release notes. Use the links ## Within Canton 3.x -- **Splice and the Global Synchronizer** — [Current release](/docs-main/global-synchronizer/release-notes/current-release), [release history](/docs-main/global-synchronizer/release-notes/release-history), [weekly patch releases](/docs-main/global-synchronizer/release-notes/weekly-patch-releases) -- **Wallet SDK** — [Wallet SDK release notes](/docs-main/integrations/wallet/release-notes) +- **Splice and the Global Synchronizer** — [Current release](/global-synchronizer/release-notes/current-release), [release history](/global-synchronizer/release-notes/release-history), [weekly patch releases](/global-synchronizer/release-notes/weekly-patch-releases) +- **Wallet SDK** — [Wallet SDK release notes](/integrations/wallet/release-notes) - **Canton and Daml SDK** — [GitHub releases (digital-asset/daml)](https://github.com/digital-asset/daml/releases) and the upstream changelog at [docs.digitalasset.com](https://docs.digitalasset.com) ## From Canton 2.x to 3.x Canton 3.x is a major release with backward-incompatible changes across the SDK, APIs, and runtime. The high-level shape of the migration: -- **SDK tooling** — `daml` CLI is replaced by [`dpm`](/docs-main/sdks-tools/cli-tools/dpm). +- **SDK tooling** — `daml` CLI is replaced by [`dpm`](/sdks-tools/cli-tools/dpm). - **Ledger API** — services have been restructured. Regenerate gRPC stubs from the 3.x protobuf definitions, or update to a 3.x client library. -- **Package management** — the new [Smart Contract Upgrade (SCU)](/docs-main/appdev/modules/m6-overview) mechanism replaces the previous upgrade model. +- **Package management** — the new [Smart Contract Upgrade (SCU)](/appdev/modules/m6-overview) mechanism replaces the previous upgrade model. - **Synchronizer terminology** — what was previously called a "domain" is now called a **synchronizer** throughout the APIs and documentation. - **Daml exceptions** — deprecated and being removed. Replace with preconditions, encoded return types, or workflow patterns. @@ -25,6 +25,6 @@ For a worked migration path, build against the [cn-quickstart](https://github.co ## Related -- [Development Stack](/docs-main/appdev/modules/m1-development-stack) — the 3.x technology stack -- [Smart Contract Upgrades Overview](/docs-main/appdev/modules/m6-overview) — SCU model -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) — 3.x Ledger API usage patterns +- [Development Stack](/appdev/modules/m1-development-stack) — the 3.x technology stack +- [Smart Contract Upgrades Overview](/appdev/modules/m6-overview) — SCU model +- [Backend Development](/appdev/modules/m4-backend-dev) — 3.x Ledger API usage patterns diff --git a/docs-main/appdev/get-started/whats-new.mdx b/docs-main/appdev/get-started/whats-new.mdx index 540dda395..937a96102 100644 --- a/docs-main/appdev/get-started/whats-new.mdx +++ b/docs-main/appdev/get-started/whats-new.mdx @@ -5,20 +5,20 @@ description: "Pointers to release notes for Canton Network components" For changes shipping in each component, see the corresponding release notes: -- **Splice and the Global Synchronizer** — [Current release](/docs-main/global-synchronizer/release-notes/current-release), [release history](/docs-main/global-synchronizer/release-notes/release-history), [weekly patch releases](/docs-main/global-synchronizer/release-notes/weekly-patch-releases) -- **Wallet SDK** — [Wallet SDK release notes](/docs-main/integrations/wallet/release-notes) +- **Splice and the Global Synchronizer** — [Current release](/global-synchronizer/release-notes/current-release), [release history](/global-synchronizer/release-notes/release-history), [weekly patch releases](/global-synchronizer/release-notes/weekly-patch-releases) +- **Wallet SDK** — [Wallet SDK release notes](/integrations/wallet/release-notes) - **Canton and Daml SDK** — [GitHub releases (digital-asset/daml)](https://github.com/digital-asset/daml/releases) and the upstream documentation at [docs.digitalasset.com](https://docs.digitalasset.com) - **CIPs** — [Canton Improvement Proposals](https://github.com/global-synchronizer-foundation/cips) ## Version compatibility - + Compatible versions across Canton Network components. ## Upgrading from a previous version -See [Upgrading from Previous Versions](/docs-main/appdev/get-started/upgrading-from-previous-versions) for guidance on moving applications across SDK series. +See [Upgrading from Previous Versions](/appdev/get-started/upgrading-from-previous-versions) for guidance on moving applications across SDK series. ## Staying current diff --git a/docs-main/appdev/modules/m1-development-stack.mdx b/docs-main/appdev/modules/m1-development-stack.mdx index 0549583c0..68c9c0791 100644 --- a/docs-main/appdev/modules/m1-development-stack.mdx +++ b/docs-main/appdev/modules/m1-development-stack.mdx @@ -267,11 +267,11 @@ make start - + Run the example application. - + Start writing smart contracts. diff --git a/docs-main/appdev/modules/m1-mental-models.mdx b/docs-main/appdev/modules/m1-mental-models.mdx index 26d08b0cc..cffaa746c 100644 --- a/docs-main/appdev/modules/m1-mental-models.mdx +++ b/docs-main/appdev/modules/m1-mental-models.mdx @@ -209,11 +209,11 @@ When building on Canton, keep these models in mind: - + Understand the tools you'll use. - + See how components work together technically. diff --git a/docs-main/appdev/modules/m1-understanding-canton.mdx b/docs-main/appdev/modules/m1-understanding-canton.mdx index c618ce381..cdb070611 100644 --- a/docs-main/appdev/modules/m1-understanding-canton.mdx +++ b/docs-main/appdev/modules/m1-understanding-canton.mdx @@ -65,8 +65,8 @@ You declare what each party can do at compile time, and the protocol enforces it Before proceeding, you should: -- **Understand** what Canton is ([Five-Minute Overview](/docs-main/overview/understand/five-minute-overview)) -- **Know** the basic components ([Core Concepts](/docs-main/overview/understand/core-concepts)) +- **Understand** what Canton is ([Five-Minute Overview](/overview/understand/five-minute-overview)) +- **Know** the basic components ([Core Concepts](/overview/understand/core-concepts)) - **Have** programming experience (any language) No blockchain experience is required—and if you have it, be prepared to unlearn some things. @@ -84,16 +84,16 @@ By the end of this module, you'll understand: - + Build intuition for Canton's approach to distributed ledgers. - + Understand the tools and technologies you'll use. After completing this module, continue to: -- **[Module 2](/docs-main/appdev/modules/m2-canton-for-ethereum-devs)**: If you have Ethereum/blockchain experience +- **[Module 2](/appdev/modules/m2-canton-for-ethereum-devs)**: If you have Ethereum/blockchain experience - **[Daml Documentation](https://docs.daml.com)**: If you're ready to start writing Daml diff --git a/docs-main/appdev/modules/m2-canton-for-ethereum-devs.mdx b/docs-main/appdev/modules/m2-canton-for-ethereum-devs.mdx index 18fee9e76..7aad8c9e3 100644 --- a/docs-main/appdev/modules/m2-canton-for-ethereum-devs.mdx +++ b/docs-main/appdev/modules/m2-canton-for-ethereum-devs.mdx @@ -238,6 +238,6 @@ These are the most common mistakes blockchain developers make when first buildin ## Next Steps -- **[Architecture Overview](/docs-main/overview/learn/architecture)** - Deep dive into Canton's component model -- **[Privacy Model Explained](/docs-main/overview/learn/privacy-model)** - Understand sub-transaction privacy -- **[Developer Track Module 3: Daml Development](/docs-main/appdev/modules/m3-dev-environment)** - Start writing Daml code +- **[Architecture Overview](/overview/learn/architecture)** - Deep dive into Canton's component model +- **[Privacy Model Explained](/overview/learn/privacy-model)** - Understand sub-transaction privacy +- **[Developer Track Module 3: Daml Development](/appdev/modules/m3-dev-environment)** - Start writing Daml code diff --git a/docs-main/appdev/modules/m2-concept-translation.mdx b/docs-main/appdev/modules/m2-concept-translation.mdx index 5b41820f9..f3de7e408 100644 --- a/docs-main/appdev/modules/m2-concept-translation.mdx +++ b/docs-main/appdev/modules/m2-concept-translation.mdx @@ -204,11 +204,11 @@ Some Ethereum concepts have no direct Canton equivalent: - + Deep dive into Canton's privacy model vs. Ethereum. - + Understand the Daml vs. Solidity programming model. diff --git a/docs-main/appdev/modules/m2-migration-checklist.mdx b/docs-main/appdev/modules/m2-migration-checklist.mdx index 9beee1e46..3d13aa8ab 100644 --- a/docs-main/appdev/modules/m2-migration-checklist.mdx +++ b/docs-main/appdev/modules/m2-migration-checklist.mdx @@ -168,7 +168,7 @@ Canton has no equivalent to Ethereum's `eth_getLogs` or event filtering across a | Ethereum | Canton | Notes | |----------|--------|-------| | JSON-RPC | Ledger API (gRPC or JSON API) | Different protocol | -| Web3.js / ethers.js | Ledger API client or [dApp SDK](/docs-main/appdev/modules/m4-sdks-apis) | Language-specific clients | +| Web3.js / ethers.js | Ledger API client or [dApp SDK](/appdev/modules/m4-sdks-apis) | Language-specific clients | | Event logs | Transaction streams | Subscribe to party's transactions | | `eth_call` | Exercise non-consuming choice | Read-only operations | @@ -256,7 +256,7 @@ Avoid these common mistakes when migrating from Ethereum. - + Start building with Daml. diff --git a/docs-main/appdev/modules/m2-multi-party-workflows.mdx b/docs-main/appdev/modules/m2-multi-party-workflows.mdx index a016234c4..9e924526e 100644 --- a/docs-main/appdev/modules/m2-multi-party-workflows.mdx +++ b/docs-main/appdev/modules/m2-multi-party-workflows.mdx @@ -160,11 +160,11 @@ flowchart TB - + Practical checklist for migrating from Ethereum. - + Start writing Daml smart contracts. diff --git a/docs-main/appdev/modules/m2-network-architecture.mdx b/docs-main/appdev/modules/m2-network-architecture.mdx index af13ce887..c8ae22c79 100644 --- a/docs-main/appdev/modules/m2-network-architecture.mdx +++ b/docs-main/appdev/modules/m2-network-architecture.mdx @@ -238,11 +238,11 @@ When migrating from Ethereum to Canton: - + Detailed Canton architecture documentation. - + Begin writing Daml smart contracts. diff --git a/docs-main/appdev/modules/m2-privacy-differences.mdx b/docs-main/appdev/modules/m2-privacy-differences.mdx index b05094cb4..b57fa622c 100644 --- a/docs-main/appdev/modules/m2-privacy-differences.mdx +++ b/docs-main/appdev/modules/m2-privacy-differences.mdx @@ -287,11 +287,11 @@ When designing Canton applications: - + Understand Daml's immutable contract model vs. Solidity. - + Technical details of sub-transaction privacy. diff --git a/docs-main/appdev/modules/m2-smart-contract-paradigm.mdx b/docs-main/appdev/modules/m2-smart-contract-paradigm.mdx index dbacc3fff..2b5eaf398 100644 --- a/docs-main/appdev/modules/m2-smart-contract-paradigm.mdx +++ b/docs-main/appdev/modules/m2-smart-contract-paradigm.mdx @@ -206,11 +206,11 @@ contract MultiSig { - + Compare network architecture and topology. - + Start writing Daml smart contracts. diff --git a/docs-main/appdev/modules/m3-building-packaging.mdx b/docs-main/appdev/modules/m3-building-packaging.mdx index b7fc77f1c..df955d0f6 100644 --- a/docs-main/appdev/modules/m3-building-packaging.mdx +++ b/docs-main/appdev/modules/m3-building-packaging.mdx @@ -98,13 +98,13 @@ You'll notice a module `Test.Intro.Asset.TradeSetup`, which is almost a carbon c As you've seen here, identifiers depend on the package as a whole and packages always bring all their dependencies with them. Thus changing anything in a complex dependency graph can have significant repercussions. It is therefore advisable to keep dependency graphs simple, and to separate concerns which are likely to change at different rates into separate packages. -For example, in all our projects in this intro, including this chapter, our scripts are in the same project as our templates. In practice, that means changing a test changes all identifiers, which is not desirable. It's better for maintainability to separate tests from main templates. If we had done that in [Design Patterns](/docs-main/appdev/modules/m3-design-patterns), that would also have saved us from copying those modules. +For example, in all our projects in this intro, including this chapter, our scripts are in the same project as our templates. In practice, that means changing a test changes all identifiers, which is not desirable. It's better for maintainability to separate tests from main templates. If we had done that in [Design Patterns](/appdev/modules/m3-design-patterns), that would also have saved us from copying those modules. -Similarly, we included `Trade` in the same project as `Asset` in [Design Patterns](/docs-main/appdev/modules/m3-design-patterns), even though `Trade` is a pure extension to the core `Asset` model. If we expect `Trade` to need more frequent changes, it may be a good idea to split it out into a separate project from the start. +Similarly, we included `Trade` in the same project as `Asset` in [Design Patterns](/appdev/modules/m3-design-patterns), even though `Trade` is a pure extension to the core `Asset` model. If we expect `Trade` to need more frequent changes, it may be a good idea to split it out into a separate project from the start. ## Next up -The `MultiTrade` model has more complex control flow and data handling than previous models. In [Language Fundamentals](/docs-main/appdev/modules/m3-language-fundamentals) you'll learn how to write more advanced logic: control flow, folds, common typeclasses, custom functions, and the Daml standard library. We'll be using the same projects so don't delete your folders just yet. +The `MultiTrade` model has more complex control flow and data handling than previous models. In [Language Fundamentals](/appdev/modules/m3-language-fundamentals) you'll learn how to write more advanced logic: control flow, folds, common typeclasses, custom functions, and the Daml standard library. We'll be using the same projects so don't delete your folders just yet. {/* COPIED_END */} diff --git a/docs-main/appdev/modules/m3-choices.mdx b/docs-main/appdev/modules/m3-choices.mdx index 96f79966e..2c6436b67 100644 --- a/docs-main/appdev/modules/m3-choices.mdx +++ b/docs-main/appdev/modules/m3-choices.mdx @@ -26,7 +26,7 @@ You can load the code for this section by running `dpm new intro-choices --templ If you think of templates as classes and contracts as objects, where are the methods? -Take as an example a `Contact` contract on which the contact owner wants to be able to change the telephone number. Rather than requiring them to manually look up the contract, archive the old one, and create a new one as you saw in [Contract Templates](/docs-main/appdev/modules/m3-contract-templates), you can provide them a convenience method on `Contact`: +Take as an example a `Contact` contract on which the contact owner wants to be able to change the telephone number. Rather than requiring them to manually look up the contract, archive the old one, and create a new one as you saw in [Contract Templates](/appdev/modules/m3-contract-templates), you can provide them a convenience method on `Contact`: @@ -76,7 +76,7 @@ If you open the script view in the IDE, you will notice that Bob sees the `Conta ## Choices in the ledger model -In [Contract Templates](/docs-main/appdev/modules/m3-contract-templates#daml-ledger-basics) you learned about the high-level structure of a Daml ledger. With choices and the `exercise` function, you have the next important ingredient to understand the structure of the ledger and transactions. +In [Contract Templates](/appdev/modules/m3-contract-templates#daml-ledger-basics) you learned about the high-level structure of a Daml ledger. With choices and the `exercise` function, you have the next important ingredient to understand the structure of the ledger and transactions. A *transaction* is a list of *actions*, and there are three kinds of action: `create`, `exercise` and `fetch`. @@ -146,7 +146,7 @@ You may have noticed that there is no archive action. That's because `archive ci ## A simple cash model -With the power of choices, you can build your first interesting model: issuance of cash IOUs (I owe you). The model presented here is simpler than the one in [Language Fundamentals](/docs-main/appdev/modules/m3-language-fundamentals) as it's not concerned with the location of the physical cash, but merely with liabilities: +With the power of choices, you can build your first interesting model: issuance of cash IOUs (I owe you). The model presented here is simpler than the one in [Language Fundamentals](/appdev/modules/m3-language-fundamentals) as it's not concerned with the location of the physical cash, but merely with liabilities: @@ -156,6 +156,6 @@ The above model is fine as long as everyone trusts Dora. Dora could revoke the ` You can now store and transform data on the ledger, even giving other parties specific write access through choices. -In [Authorization Model](/docs-main/appdev/modules/m3-authorization), you will learn more about the authorization rules that govern who can create, exercise, and archive contracts. +In [Authorization Model](/appdev/modules/m3-authorization), you will learn more about the authorization rules that govern who can create, exercise, and archive contracts. {/* COPIED_END */} \ No newline at end of file diff --git a/docs-main/appdev/modules/m3-contract-templates.mdx b/docs-main/appdev/modules/m3-contract-templates.mdx index 8378fdfdf..bc3314279 100644 --- a/docs-main/appdev/modules/m3-contract-templates.mdx +++ b/docs-main/appdev/modules/m3-contract-templates.mdx @@ -68,6 +68,6 @@ Furthermore, Daml ledgers *guarantee* that parties see all transactions where th ## Next up -In [Choices](/docs-main/appdev/modules/m3-choices), you'll learn how to add behavior to your contracts using choices. +In [Choices](/appdev/modules/m3-choices), you'll learn how to add behavior to your contracts using choices. {/* COPIED_END */} \ No newline at end of file diff --git a/docs-main/appdev/modules/m3-design-patterns.mdx b/docs-main/appdev/modules/m3-design-patterns.mdx index 862ca15e8..3abeef07e 100644 --- a/docs-main/appdev/modules/m3-design-patterns.mdx +++ b/docs-main/appdev/modules/m3-design-patterns.mdx @@ -65,7 +65,7 @@ dependencies: You can generally set `name` and `version` freely to describe your project. `dependencies` does what the name suggests: it includes dependencies. You should always include `daml-prim` and `daml-stdlib`. The former contains internals of the compiler and the Daml Runtime, the latter gives access to the Daml standard library. `daml-script` contains the types and functions for Daml Script. -You compile a Daml project by running `dpm build` from the project root directory. This creates a DAR file in `.daml/dist/dist/${project_name}-${project_version}.dar`. A DAR file is Daml's equivalent of a JAR file in Java: it's the artifact that gets deployed to a ledger to load the package and its dependencies. `dar` files are fully self-contained in that they contain all dependencies of the main package. More on all of this in [Building and Packaging](/docs-main/appdev/modules/m3-building-packaging). +You compile a Daml project by running `dpm build` from the project root directory. This creates a DAR file in `.daml/dist/dist/${project_name}-${project_version}.dar`. A DAR file is Daml's equivalent of a JAR file in Java: it's the artifact that gets deployed to a ledger to load the package and its dependencies. `dar` files are fully self-contained in that they contain all dependencies of the main package. More on all of this in [Building and Packaging](/appdev/modules/m3-building-packaging). ## Project structure @@ -110,7 +110,7 @@ If your module contains any Daml Scripts, you need to import the corresponding f ## Project overview -The project both changes and adds to the `Iou` model presented in [Authorization](/docs-main/appdev/modules/m3-authorization): +The project both changes and adds to the `Iou` model presented in [Authorization](/appdev/modules/m3-authorization): - Assets are fungible in the sense that they have `Merge` and `Split` choices that allow the `owner` to manage their holdings. @@ -122,7 +122,7 @@ The project both changes and adds to the `Iou` model presented in [Authorization ## Composed choices and scripts -This project showcases how you can put the `Update` and `Script` actions you learned about in [Authorization](/docs-main/appdev/modules/m3-authorization) to good use. For example, the `Merge` and `Split` choices each perform several actions in their consequences. +This project showcases how you can put the `Update` and `Script` actions you learned about in [Authorization](/appdev/modules/m3-authorization) to good use. For example, the `Merge` and `Split` choices each perform several actions in their consequences. - Two create actions in case of `Split` - One create and one archive action in case of `Merge` @@ -226,7 +226,7 @@ Interpretation Each command corresponds to one or more actions. During this step, the `Update` corresponding to each action is evaluated in the context of the ledger to calculate all consequences, including transitive ones (consequences of consequences, etc.). The result of this is a complete transaction. Together with its requestor, this is also known as a commit. Blinding -On ledgers with strong privacy, projections (see [Privacy Model](/docs-main/overview/learn/privacy-model)) for all involved parties are created. This is also called *projecting*. +On ledgers with strong privacy, projections (see [Privacy Model](/overview/learn/privacy-model)) for all involved parties are created. This is also called *projecting*. Transaction submission The transaction/commit is submitted to the network. @@ -314,7 +314,7 @@ Beyond the composition patterns above, this section covers common multi-party wo ### Propose-Accept -The most common way to get multiple parties to agree on a shared contract. One party creates a proposal contract that the other party can accept, reject, or let expire. The `IouProposal` [in the authorization module](/docs-main/appdev/modules/m3-authorization#use-propose-accept-workflow-for-one-off-authorization) is another example of this pattern. +The most common way to get multiple parties to agree on a shared contract. One party creates a proposal contract that the other party can accept, reject, or let expire. The `IouProposal` [in the authorization module](/appdev/modules/m3-authorization#use-propose-accept-workflow-for-one-off-authorization) is another example of this pattern. The issuer creates a `CoinMaster` contract, then uses it to invite an owner. The invitation is a proposal contract with the issuer as signatory and the owner as observer: diff --git a/docs-main/appdev/modules/m3-dev-environment.mdx b/docs-main/appdev/modules/m3-dev-environment.mdx index 347b1c852..66bc6643d 100644 --- a/docs-main/appdev/modules/m3-dev-environment.mdx +++ b/docs-main/appdev/modules/m3-dev-environment.mdx @@ -31,6 +31,6 @@ dpm new intro-choices --template daml-intro-choices ## Next Steps -Continue to [Contract Templates](/docs-main/appdev/modules/m3-contract-templates) to start building Daml smart contracts. +Continue to [Contract Templates](/appdev/modules/m3-contract-templates) to start building Daml smart contracts. -If you're new to functional programming or want a refresher on Daml's syntax (types, pattern matching, records, type classes), see [Language Fundamentals](/docs-main/appdev/modules/m3-language-fundamentals) first. If you have experience with Haskell or other ML-family languages, you can skip it and refer back as needed. \ No newline at end of file +If you're new to functional programming or want a refresher on Daml's syntax (types, pattern matching, records, type classes), see [Language Fundamentals](/appdev/modules/m3-language-fundamentals) first. If you have experience with Haskell or other ML-family languages, you can skip it and refer back as needed. \ No newline at end of file diff --git a/docs-main/appdev/modules/m3-testing.mdx b/docs-main/appdev/modules/m3-testing.mdx index b0e48a460..5935c288c 100644 --- a/docs-main/appdev/modules/m3-testing.mdx +++ b/docs-main/appdev/modules/m3-testing.mdx @@ -175,4 +175,4 @@ The [cn-quickstart](https://github.com/digital-asset/cn-quickstart) project incl - Splice API interactions (Scan, Validator APIs) - End-to-end application flows that involve the Global Synchronizer -See the [QuickStart walkthrough](/docs-main/appdev/quickstart/running-the-demo) for instructions on starting LocalNet. \ No newline at end of file +See the [QuickStart walkthrough](/appdev/quickstart/running-the-demo) for instructions on starting LocalNet. \ No newline at end of file diff --git a/docs-main/appdev/modules/m4-app-architecture.mdx b/docs-main/appdev/modules/m4-app-architecture.mdx index db243821b..620c8710b 100644 --- a/docs-main/appdev/modules/m4-app-architecture.mdx +++ b/docs-main/appdev/modules/m4-app-architecture.mdx @@ -75,6 +75,6 @@ Pick the fully mediated approach unless you have a specific reason to expose led ## Next Steps -- [SDKs and APIs](/docs-main/appdev/modules/m4-sdks-apis) -- The tools and interfaces available for each layer -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) -- Patterns for connecting to the Ledger API and PQS -- [Frontend Development](/docs-main/appdev/modules/m4-frontend-dev) -- Building a React UI against the backend API +- [SDKs and APIs](/appdev/modules/m4-sdks-apis) -- The tools and interfaces available for each layer +- [Backend Development](/appdev/modules/m4-backend-dev) -- Patterns for connecting to the Ledger API and PQS +- [Frontend Development](/appdev/modules/m4-frontend-dev) -- Building a React UI against the backend API diff --git a/docs-main/appdev/modules/m4-backend-dev.mdx b/docs-main/appdev/modules/m4-backend-dev.mdx index 25ffb6f83..dc34a9d3e 100644 --- a/docs-main/appdev/modules/m4-backend-dev.mdx +++ b/docs-main/appdev/modules/m4-backend-dev.mdx @@ -569,11 +569,11 @@ curl -s "http://localhost:8080/licenses/${LICENSE_CID}/comments" \ ## Next Steps -- [Frontend Development](/docs-main/appdev/modules/m4-frontend-dev) -- Build a React UI that consumes this backend's REST API, including a frontend exercise that adds comment UI on top of these backend endpoints -- [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) -- Understand traffic costs and wallet integration for payments +- [Frontend Development](/appdev/modules/m4-frontend-dev) -- Build a React UI that consumes this backend's REST API, including a frontend exercise that adds comment UI on top of these backend endpoints +- [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) -- Understand traffic costs and wallet integration for payments - [cn-quickstart repository](https://github.com/digital-asset/cn-quickstart) -- Full working backend implementation ## Advanced Topics -- [Command Deduplication](/docs-main/appdev/deep-dives/command-deduplication) — Designing application command flows so an intended ledger change is executed exactly once, even under retries, crashes, and lost network messages. -- [Explicit Contract Disclosure](/docs-main/appdev/deep-dives/explicit-contract-disclosure) — Submitting commands that read a contract you do not stakeholder by passing it as a disclosed contract on the Ledger API. +- [Command Deduplication](/appdev/deep-dives/command-deduplication) — Designing application command flows so an intended ledger change is executed exactly once, even under retries, crashes, and lost network messages. +- [Explicit Contract Disclosure](/appdev/deep-dives/explicit-contract-disclosure) — Submitting commands that read a contract you do not stakeholder by passing it as a disclosed contract on the Ledger API. diff --git a/docs-main/appdev/modules/m4-building-apps-intro.mdx b/docs-main/appdev/modules/m4-building-apps-intro.mdx index 78b8b3adb..ff0768985 100644 --- a/docs-main/appdev/modules/m4-building-apps-intro.mdx +++ b/docs-main/appdev/modules/m4-building-apps-intro.mdx @@ -7,7 +7,7 @@ Module 4 bridges the gap between writing Daml contracts and shipping a complete ## Prerequisites -Before starting this module, you should have completed [Module 3: Daml Smart Contracts](/docs-main/appdev/modules/m3-dev-environment). You need a working understanding of templates, choices, and authorization in Daml. Familiarity with Java or TypeScript is helpful but not required. +Before starting this module, you should have completed [Module 3: Daml Smart Contracts](/appdev/modules/m3-dev-environment). You need a working understanding of templates, choices, and authorization in Daml. Familiarity with Java or TypeScript is helpful but not required. ## What You Will Learn @@ -21,23 +21,23 @@ Before starting this module, you should have completed [Module 3: Daml Smart Con - + Roles, layers, and how the pieces of a Canton application fit together. - + Code generation, Ledger API, JSON API, PQS, and the Wallet SDK. - + Connect to the ledger, submit commands, read transactions, and query PQS. - + Build a React UI with generated TypeScript bindings and wallet integration. - + Understand how CC buys traffic and how to manage transaction costs. diff --git a/docs-main/appdev/modules/m4-canton-coin.mdx b/docs-main/appdev/modules/m4-canton-coin.mdx index 72a170fdf..7aef81ca5 100644 --- a/docs-main/appdev/modules/m4-canton-coin.mdx +++ b/docs-main/appdev/modules/m4-canton-coin.mdx @@ -86,6 +86,6 @@ The wallet handles CC transfers, balance lookups, and payment confirmations. You ## Further Reading -- [Canton Coin overview](/docs-main/overview/understand/canton-coin) -- Deeper background on CC tokenomics, validator rewards, and governance -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) -- Handling transaction errors, including insufficient-traffic failures +- [Canton Coin overview](/overview/understand/canton-coin) -- Deeper background on CC tokenomics, validator rewards, and governance +- [Backend Development](/appdev/modules/m4-backend-dev) -- Handling transaction errors, including insufficient-traffic failures - [cn-quickstart](https://github.com/digital-asset/cn-quickstart) -- Working example of wallet integration in a Canton application diff --git a/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx b/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx index 10e242076..d9df8d433 100644 --- a/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx +++ b/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx @@ -112,7 +112,7 @@ This local testing loop lets you validate your marker creation logic before depl ## Further Reading -- [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) -- How CC rewards relate to traffic and transaction costs +- [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) -- How CC rewards relate to traffic and transaction costs - [CIP-0047](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0047/cip-0047.md) -- The specification for FeaturedAppActivityMarker - [CIP-0078](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0078/cip-0078.md) -- The change restricting rewards to featured apps -- [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression) -- Moving from LocalNet to DevNet to MainNet +- [Deployment Progression](/appdev/modules/m5-deployment-progression) -- Moving from LocalNet to DevNet to MainNet diff --git a/docs-main/appdev/modules/m4-frontend-dev.mdx b/docs-main/appdev/modules/m4-frontend-dev.mdx index 986bffbf8..957822700 100644 --- a/docs-main/appdev/modules/m4-frontend-dev.mdx +++ b/docs-main/appdev/modules/m4-frontend-dev.mdx @@ -29,7 +29,7 @@ The frontend is the user-facing layer of your Canton application. This page uses ## Connecting to the Backend -In cn-quickstart, the frontend does not talk to the Ledger API directly — all ledger interactions go through the backend's REST endpoints. Canton does provide a [JSON API](/docs-main/sdks-tools/api-reference/json-api) that frontends can use for direct ledger access, but the cn-quickstart architecture routes everything through the backend for separation of concerns. +In cn-quickstart, the frontend does not talk to the Ledger API directly — all ledger interactions go through the backend's REST endpoints. Canton does provide a [JSON API](/sdks-tools/api-reference/json-api) that frontends can use for direct ledger access, but the cn-quickstart architecture routes everything through the backend for separation of concerns. The API client is configured in [`api.ts`](https://github.com/digital-asset/cn-quickstart/blob/main/quickstart/frontend/src/api.ts) using the `openapi-client-axios` library, which reads the OpenAPI schema and produces a typed HTTP client: @@ -56,7 +56,7 @@ Separately, `dpm codegen-js` generates TypeScript types from your compiled DAR f Whether you use DAR-generated types depends on your architecture: - **Fully mediated** (cn-quickstart default) -- The frontend uses OpenAPI-generated types from the backend's REST schema. The Daml-generated TypeScript types are not needed in the frontend because the backend translates between ledger concepts and REST DTOs. -- **Direct ledger access via JSON API** -- The frontend submits commands through the [JSON API](/docs-main/sdks-tools/api-reference/json-api) using the Daml-generated TypeScript bindings. This gives tighter integration with the ledger but requires the frontend to handle party IDs, contract IDs, and command submission directly. +- **Direct ledger access via JSON API** -- The frontend submits commands through the [JSON API](/sdks-tools/api-reference/json-api) using the Daml-generated TypeScript bindings. This gives tighter integration with the ledger but requires the frontend to handle party IDs, contract IDs, and command submission directly. For most applications, the fully mediated approach is simpler. The JSON API approach makes sense when you want a thin or no backend layer. @@ -127,7 +127,7 @@ The `ViteYaml` plugin allows importing the OpenAPI YAML file directly as a JavaS ## Exercise: Add License Comments UI -This exercise builds on the backend exercise in [Backend Development](/docs-main/appdev/modules/m4-backend-dev#exercise-add-license-comments). Complete that first — you need the `LicenseComment` Daml template, the OpenAPI endpoints, and the backend implementation before the frontend can use them. +This exercise builds on the backend exercise in [Backend Development](/appdev/modules/m4-backend-dev#exercise-add-license-comments). Complete that first — you need the `LicenseComment` Daml template, the OpenAPI endpoints, and the backend implementation before the frontend can use them. You'll add a comment list and comment form to the licenses view, following the same store/view patterns that cn-quickstart uses for licenses. @@ -182,6 +182,6 @@ Open the app, navigate to the Licenses page, click the "Comments" button on a li ## Next Steps -- [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) -- How CC and traffic affect your application -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) -- The backend that the frontend communicates with +- [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) -- How CC and traffic affect your application +- [Backend Development](/appdev/modules/m4-backend-dev) -- The backend that the frontend communicates with - [cn-quickstart frontend source](https://github.com/digital-asset/cn-quickstart/tree/main/quickstart/frontend) -- Full working frontend implementation diff --git a/docs-main/appdev/modules/m4-observability.mdx b/docs-main/appdev/modules/m4-observability.mdx index ee3f78e39..6a3fbe906 100644 --- a/docs-main/appdev/modules/m4-observability.mdx +++ b/docs-main/appdev/modules/m4-observability.mdx @@ -85,6 +85,6 @@ Set up alerts for conditions that affect your application's reliability: ## Further Reading -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) -- Error handling patterns that pair with observability -- [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) -- Monitoring your traffic budget +- [Backend Development](/appdev/modules/m4-backend-dev) -- Error handling patterns that pair with observability +- [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) -- Monitoring your traffic budget - [cn-quickstart repository](https://github.com/digital-asset/cn-quickstart) -- Pre-configured observability stack diff --git a/docs-main/appdev/modules/m4-sdks-apis.mdx b/docs-main/appdev/modules/m4-sdks-apis.mdx index 2b08c5841..c18f6e401 100644 --- a/docs-main/appdev/modules/m4-sdks-apis.mdx +++ b/docs-main/appdev/modules/m4-sdks-apis.mdx @@ -95,7 +95,7 @@ The Wallet SDK provides integration with Canton Coin (CC) wallets for applicatio In cn-quickstart, the licensing application uses wallet integration for license renewal payments. The `LicenseRenewalRequest` contract implements the Splice `AllocationRequest` interface, which the wallet system detects and processes as a payment. -For more on how CC and traffic work, see [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin). +For more on how CC and traffic work, see [Canton Coin and Traffic](/appdev/modules/m4-canton-coin). ## Working Examples @@ -110,5 +110,5 @@ Clone the repository and run `cd quickstart && make setup && make build && make ## Next Steps -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) -- Patterns for using the Ledger API and PQS in a Java backend -- [Frontend Development](/docs-main/appdev/modules/m4-frontend-dev) -- Building a React frontend against the backend API +- [Backend Development](/appdev/modules/m4-backend-dev) -- Patterns for using the Ledger API and PQS in a Java backend +- [Frontend Development](/appdev/modules/m4-frontend-dev) -- Building a React frontend against the backend API diff --git a/docs-main/appdev/modules/m5-ci-cd-integration.mdx b/docs-main/appdev/modules/m5-ci-cd-integration.mdx index 5b7ef82a8..e32fe689d 100644 --- a/docs-main/appdev/modules/m5-ci-cd-integration.mdx +++ b/docs-main/appdev/modules/m5-ci-cd-integration.mdx @@ -51,7 +51,7 @@ After Daml tests pass, run your backend's integration tests against a sandbox: -For tests that need a full multi-validator setup, start [LocalNet](/docs-main/appdev/modules/m5-localnet-development) with Docker Compose instead of the sandbox. This is heavier but covers cross-validator scenarios. +For tests that need a full multi-validator setup, start [LocalNet](/appdev/modules/m5-localnet-development) with Docker Compose instead of the sandbox. This is heavier but covers cross-validator scenarios. ## Package Stage @@ -96,7 +96,7 @@ Use your CI system's environment or stage abstractions to gate promotions. A com - A manual approval gate promotes from DevNet to TestNet - Another manual gate promotes from TestNet to MainNet -Each promotion runs the same deployment steps with different environment configuration (see [Environment Configuration](/docs-main/appdev/modules/m5-environment-configuration)). +Each promotion runs the same deployment steps with different environment configuration (see [Environment Configuration](/appdev/modules/m5-environment-configuration)). ## Example: GitHub Actions @@ -118,6 +118,6 @@ Regularly review logs during development and testing, such as by capturing logs ## Next Steps -- [Testing Strategies](/docs-main/appdev/modules/m5-testing-strategies) — Testing pyramid and approach details -- [Environment Configuration](/docs-main/appdev/modules/m5-environment-configuration) — Per-environment configuration management -- [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression) — What to verify at each promotion stage +- [Testing Strategies](/appdev/modules/m5-testing-strategies) — Testing pyramid and approach details +- [Environment Configuration](/appdev/modules/m5-environment-configuration) — Per-environment configuration management +- [Deployment Progression](/appdev/modules/m5-deployment-progression) — What to verify at each promotion stage diff --git a/docs-main/appdev/modules/m5-deployment-progression.mdx b/docs-main/appdev/modules/m5-deployment-progression.mdx index 8e6a6f43f..05a83ff33 100644 --- a/docs-main/appdev/modules/m5-deployment-progression.mdx +++ b/docs-main/appdev/modules/m5-deployment-progression.mdx @@ -26,7 +26,7 @@ The core architecture stays the same across all four environments: your applicat ## Upgrade Types on the Global Synchronizer -For detailed upgrade procedures, see the [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures) page. +For detailed upgrade procedures, see the [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures) page. {/* COPIED_START source="docs-website:replicated/quickstart/3.5/sdk/quickstart/upgrade/upgrades-on-global-sync.rst" hash="upgrade-types" */} @@ -101,5 +101,5 @@ Application providers should maintain nodes on DevNet, TestNet, and MainNet to g ## Next Steps -- [Environment Configuration](/docs-main/appdev/modules/m5-environment-configuration) — DPM configuration for each environment -- [CI/CD Integration](/docs-main/appdev/modules/m5-ci-cd-integration) — Automating the promotion pipeline +- [Environment Configuration](/appdev/modules/m5-environment-configuration) — DPM configuration for each environment +- [CI/CD Integration](/appdev/modules/m5-ci-cd-integration) — Automating the promotion pipeline diff --git a/docs-main/appdev/modules/m5-environment-configuration.mdx b/docs-main/appdev/modules/m5-environment-configuration.mdx index 87b69d5c5..47053d3f1 100644 --- a/docs-main/appdev/modules/m5-environment-configuration.mdx +++ b/docs-main/appdev/modules/m5-environment-configuration.mdx @@ -161,5 +161,5 @@ dpm install package ## Next Steps -- [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression) — Environment differences and promotion checklist -- [CI/CD Integration](/docs-main/appdev/modules/m5-ci-cd-integration) — Using environment configuration in automated pipelines +- [Deployment Progression](/appdev/modules/m5-deployment-progression) — Environment differences and promotion checklist +- [CI/CD Integration](/appdev/modules/m5-ci-cd-integration) — Using environment configuration in automated pipelines diff --git a/docs-main/appdev/modules/m5-localnet-development.mdx b/docs-main/appdev/modules/m5-localnet-development.mdx index b7daa9ea4..4ed0b3382 100644 --- a/docs-main/appdev/modules/m5-localnet-development.mdx +++ b/docs-main/appdev/modules/m5-localnet-development.mdx @@ -211,5 +211,5 @@ Or with cn-quickstart: `make canton-console`. ## Next Steps -- [Testing Strategies](/docs-main/appdev/modules/m5-testing-strategies) — Testing pyramid and approaches for Canton applications -- [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression) — Moving from LocalNet to DevNet, TestNet, and MainNet +- [Testing Strategies](/appdev/modules/m5-testing-strategies) — Testing pyramid and approaches for Canton applications +- [Deployment Progression](/appdev/modules/m5-deployment-progression) — Moving from LocalNet to DevNet, TestNet, and MainNet diff --git a/docs-main/appdev/modules/m5-testing-strategies.mdx b/docs-main/appdev/modules/m5-testing-strategies.mdx index 161abff80..67a6ca529 100644 --- a/docs-main/appdev/modules/m5-testing-strategies.mdx +++ b/docs-main/appdev/modules/m5-testing-strategies.mdx @@ -205,5 +205,5 @@ Performance testing for Canton applications should account for the distinction b ## Next Steps -- [LocalNet Development](/docs-main/appdev/modules/m5-localnet-development) — Set up and work with the cn-quickstart LocalNet environment -- [CI/CD Integration](/docs-main/appdev/modules/m5-ci-cd-integration) — Automate your test pipeline +- [LocalNet Development](/appdev/modules/m5-localnet-development) — Set up and work with the cn-quickstart LocalNet environment +- [CI/CD Integration](/appdev/modules/m5-ci-cd-integration) — Automate your test pipeline diff --git a/docs-main/appdev/modules/m6-deployment.mdx b/docs-main/appdev/modules/m6-deployment.mdx index cda2f54cd..d51cecaeb 100644 --- a/docs-main/appdev/modules/m6-deployment.mdx +++ b/docs-main/appdev/modules/m6-deployment.mdx @@ -9,7 +9,7 @@ In the following discussion, the current version is `v1` and the new version is ## Deployment Sequence -The deployment follows the asynchronous rollout model described in the [Overview](/docs-main/appdev/modules/m6-overview): +The deployment follows the asynchronous rollout model described in the [Overview](/appdev/modules/m6-overview): 1. **Upload v2 DAR to your own validator** — Upload the v2 package on your participant node. This doesn't affect other organizations yet. @@ -108,7 +108,7 @@ To avoid complex roll-forward rollbacks, consider splitting an upgrade into two ## Environment-by-Environment Rollout -Apply the standard promotion path from [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression): +Apply the standard promotion path from [Deployment Progression](/appdev/modules/m5-deployment-progression): - **LocalNet** — Test the full upgrade cycle locally: upload v1, create contracts, upload v2, verify cross-version behavior, run migration automation. - **DevNet** — Deploy the upgrade with a real counterparty. Verify that DAR distribution and mixed-version operation work across validators. @@ -117,6 +117,6 @@ Apply the standard promotion path from [Deployment Progression](/docs-main/appde ## Next Steps -- [Smart Contract Upgrades Overview](/docs-main/appdev/modules/m6-overview) — Return to the module overview -- [Testing Upgrades](/docs-main/appdev/modules/m6-testing-upgrades) — Verify upgrade paths before deployment -- [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression) — General environment promotion strategy +- [Smart Contract Upgrades Overview](/appdev/modules/m6-overview) — Return to the module overview +- [Testing Upgrades](/appdev/modules/m6-testing-upgrades) — Verify upgrade paths before deployment +- [Deployment Progression](/appdev/modules/m5-deployment-progression) — General environment promotion strategy diff --git a/docs-main/appdev/modules/m6-limitations.mdx b/docs-main/appdev/modules/m6-limitations.mdx index adb50dc9f..3793cef78 100644 --- a/docs-main/appdev/modules/m6-limitations.mdx +++ b/docs-main/appdev/modules/m6-limitations.mdx @@ -101,6 +101,6 @@ This approach is explicit and requires active cooperation from contract stakehol ## Further Reading -- [Upgrade Compatibility](/docs-main/appdev/modules/m6-upgrade-compatibility) -- Full compatibility rules for SCU -- [Package Naming](/docs-main/appdev/modules/m6-package-naming) -- Naming conventions that account for breaking changes -- [Smart Contract Upgrades in Production](/docs-main/appdev/modules/m7-smart-contract-upgrades) -- Operational considerations for rollouts +- [Upgrade Compatibility](/appdev/modules/m6-upgrade-compatibility) -- Full compatibility rules for SCU +- [Package Naming](/appdev/modules/m6-package-naming) -- Naming conventions that account for breaking changes +- [Smart Contract Upgrades in Production](/appdev/modules/m7-smart-contract-upgrades) -- Operational considerations for rollouts diff --git a/docs-main/appdev/modules/m6-overview.mdx b/docs-main/appdev/modules/m6-overview.mdx index 46485e61a..281ace116 100644 --- a/docs-main/appdev/modules/m6-overview.mdx +++ b/docs-main/appdev/modules/m6-overview.mdx @@ -82,23 +82,23 @@ This module walks you through the upgrade process step by step: - + What changes are allowed and what breaks compatibility. - + Step-by-step tutorial for creating a v2 package. - + How the ledger resolves which package version to use. - + Verifying upgrade paths before deployment. - + Rolling out upgrades across environments and organizations. diff --git a/docs-main/appdev/modules/m6-package-naming.mdx b/docs-main/appdev/modules/m6-package-naming.mdx index 354ad9630..8ee0c9827 100644 --- a/docs-main/appdev/modules/m6-package-naming.mdx +++ b/docs-main/appdev/modules/m6-package-naming.mdx @@ -38,7 +38,7 @@ com-acme-asset-interfaces-v1 ``` The version marker refers to the **contract model version**, not the build version. Increment it only when you make breaking changes that require a new package (as described in -[Upgrade Limitations](/docs-main/appdev/modules/m6-limitations)). Non-breaking upgrades (adding optional fields, new choices) happen within the same package name -- SCU handles those transparently. +[Upgrade Limitations](/appdev/modules/m6-limitations)). Non-breaking upgrades (adding optional fields, new choices) happen within the same package name -- SCU handles those transparently. Do not include version numbers in template names. @@ -99,6 +99,6 @@ Each package can be versioned independently. The interface package changes rarel ## Further Reading -- [Upgrade Limitations](/docs-main/appdev/modules/m6-limitations) -- Constraints that drive package naming decisions -- [Upgrade Compatibility](/docs-main/appdev/modules/m6-upgrade-compatibility) -- Rules for what constitutes a breaking vs. non-breaking change -- [Building and Packaging](/docs-main/appdev/modules/m3-building-packaging) -- How to compile and package Daml code with `dpm build` +- [Upgrade Limitations](/appdev/modules/m6-limitations) -- Constraints that drive package naming decisions +- [Upgrade Compatibility](/appdev/modules/m6-upgrade-compatibility) -- Rules for what constitutes a breaking vs. non-breaking change +- [Building and Packaging](/appdev/modules/m3-building-packaging) -- How to compile and package Daml code with `dpm build` diff --git a/docs-main/appdev/modules/m6-package-selection.mdx b/docs-main/appdev/modules/m6-package-selection.mdx index f3e353ae9..b2df5cc3d 100644 --- a/docs-main/appdev/modules/m6-package-selection.mdx +++ b/docs-main/appdev/modules/m6-package-selection.mdx @@ -12,7 +12,7 @@ Every contract on the ledger is associated with the package version that created The resolution follows these rules: - **Creating contracts** — The runtime uses the package version your code references. If your backend imports v2, new contracts are created with v2. -- **Fetching contracts** — The runtime evaluates the contract's data against the version your code references. SCU and vetting rules determine whether the fetch succeeds (see [Upgrade Compatibility](/docs-main/appdev/modules/m6-upgrade-compatibility)). +- **Fetching contracts** — The runtime evaluates the contract's data against the version your code references. SCU and vetting rules determine whether the fetch succeeds (see [Upgrade Compatibility](/appdev/modules/m6-upgrade-compatibility)). - **Exercising choices** — The choice body from the version your code references is executed, not the version that created the contract. This means bug fixes in v2 choices apply to v1 contracts. ## Symbolic Package References @@ -68,5 +68,5 @@ The system fails safely when versions are incompatible rather than silently drop ## Next Steps -- [Testing Upgrades](/docs-main/appdev/modules/m6-testing-upgrades) — Verify that version resolution works correctly for your upgrade -- [Deploying Upgrades](/docs-main/appdev/modules/m6-deployment) — Coordinate package uploads across validators +- [Testing Upgrades](/appdev/modules/m6-testing-upgrades) — Verify that version resolution works correctly for your upgrade +- [Deploying Upgrades](/appdev/modules/m6-deployment) — Coordinate package uploads across validators diff --git a/docs-main/appdev/modules/m6-testing-upgrades.mdx b/docs-main/appdev/modules/m6-testing-upgrades.mdx index f500508d4..2cd95d04b 100644 --- a/docs-main/appdev/modules/m6-testing-upgrades.mdx +++ b/docs-main/appdev/modules/m6-testing-upgrades.mdx @@ -120,5 +120,5 @@ kill $SANDBOX_PID ## Next Steps -- [Deploying Upgrades](/docs-main/appdev/modules/m6-deployment) — Rolling out tested upgrades across environments -- [Upgrade Compatibility](/docs-main/appdev/modules/m6-upgrade-compatibility) — Reference for what changes are allowed +- [Deploying Upgrades](/appdev/modules/m6-deployment) — Rolling out tested upgrades across environments +- [Upgrade Compatibility](/appdev/modules/m6-upgrade-compatibility) — Reference for what changes are allowed diff --git a/docs-main/appdev/modules/m6-upgrade-compatibility.mdx b/docs-main/appdev/modules/m6-upgrade-compatibility.mdx index 6d52b507d..4cead68ed 100644 --- a/docs-main/appdev/modules/m6-upgrade-compatibility.mdx +++ b/docs-main/appdev/modules/m6-upgrade-compatibility.mdx @@ -76,7 +76,7 @@ These changes are **not** backward-compatible and will be rejected by the compil - Removing interface instances from templates - Changing interface definitions -If you need to make a breaking change, create new templates with the desired structure and add `Upgrade` choices to the old templates that archive old contracts and create new ones. See [SCU compatibility rules](/docs-main/appdev/modules/m6-writing-first-upgrade#step-3-verify-compatibility) for details on what the compiler checks. +If you need to make a breaking change, create new templates with the desired structure and add `Upgrade` choices to the old templates that archive old contracts and create new ones. See [SCU compatibility rules](/appdev/modules/m6-writing-first-upgrade#step-3-verify-compatibility) for details on what the compiler checks. ## Backend Compatibility @@ -114,5 +114,5 @@ The Daml compiler validates upgrade compatibility between v1 and v2 at build tim ## Next Steps -- [Writing Your First Upgrade](/docs-main/appdev/modules/m6-writing-first-upgrade) — Step-by-step tutorial for creating a v2 package -- [Package Selection](/docs-main/appdev/modules/m6-package-selection) — How the ledger resolves package versions +- [Writing Your First Upgrade](/appdev/modules/m6-writing-first-upgrade) — Step-by-step tutorial for creating a v2 package +- [Package Selection](/appdev/modules/m6-package-selection) — How the ledger resolves package versions diff --git a/docs-main/appdev/modules/m6-writing-first-upgrade.mdx b/docs-main/appdev/modules/m6-writing-first-upgrade.mdx index b08c20de8..1aa0eea32 100644 --- a/docs-main/appdev/modules/m6-writing-first-upgrade.mdx +++ b/docs-main/appdev/modules/m6-writing-first-upgrade.mdx @@ -13,7 +13,7 @@ This tutorial walks you through creating a v2 version of a Daml package. You'll ## Prerequisites - A working `dpm` installation with the Daml SDK -- Familiarity with Daml templates and choices ([Module 3](/docs-main/appdev/modules/m3-contract-templates)) +- Familiarity with Daml templates and choices ([Module 3](/appdev/modules/m3-contract-templates)) - A text editor or Daml Studio ## Step 1: Create the v1 Package @@ -126,7 +126,7 @@ dpm test ``` -This test runs within a single package version, so it approximates rather than fully reproduces cross-version behavior. On a real ledger with both v1 and v2 DARs uploaded, the runtime handles the version resolution between actual v1 contracts and v2 code. See [Testing Upgrades](/docs-main/appdev/modules/m6-testing-upgrades) for strategies to test real cross-version scenarios. +This test runs within a single package version, so it approximates rather than fully reproduces cross-version behavior. On a real ledger with both v1 and v2 DARs uploaded, the runtime handles the version resolution between actual v1 contracts and v2 code. See [Testing Upgrades](/appdev/modules/m6-testing-upgrades) for strategies to test real cross-version scenarios. ## Step 5: Deploy Both Versions @@ -159,10 +159,10 @@ When a validator receives a v2 DAR: This design ensures that mixed-version operation is safe: no data is silently lost, and incompatible interactions fail explicitly rather than corrupting state. -See [Package Selection](/docs-main/appdev/modules/m6-package-selection) to learn how different versions are resolved at runtime. +See [Package Selection](/appdev/modules/m6-package-selection) to learn how different versions are resolved at runtime. ## Next Steps -- [Upgrade Compatibility](/docs-main/appdev/modules/m6-upgrade-compatibility) — Full reference of allowed and disallowed changes -- [Testing Upgrades](/docs-main/appdev/modules/m6-testing-upgrades) — Comprehensive upgrade testing strategies -- [Deploying Upgrades](/docs-main/appdev/modules/m6-deployment) — Rolling out upgrades across environments +- [Upgrade Compatibility](/appdev/modules/m6-upgrade-compatibility) — Full reference of allowed and disallowed changes +- [Testing Upgrades](/appdev/modules/m6-testing-upgrades) — Comprehensive upgrade testing strategies +- [Deploying Upgrades](/appdev/modules/m6-deployment) — Rolling out upgrades across environments diff --git a/docs-main/appdev/modules/m7-compliance.mdx b/docs-main/appdev/modules/m7-compliance.mdx index b461ede9e..306c82865 100644 --- a/docs-main/appdev/modules/m7-compliance.mdx +++ b/docs-main/appdev/modules/m7-compliance.mdx @@ -24,7 +24,7 @@ This means: - The synchronizer (sequencer and mediator) processes encrypted messages and never sees plaintext transaction data - Validators store only the contracts and transactions that involve their hosted parties -For a detailed explanation of visibility rules, see [Privacy Model Explained](/docs-main/overview/learn/privacy-model). +For a detailed explanation of visibility rules, see [Privacy Model Explained](/overview/learn/privacy-model). ### No Global State Visibility @@ -46,7 +46,7 @@ PQS retains data for as long as the underlying PostgreSQL database is maintained ### Auditor Patterns in Daml -You can grant audit access at the smart contract level by adding an auditor party as an observer. The auditor sees the contract and all events that affect it, without being able to modify it. See the [Privacy Model](/docs-main/overview/learn/privacy-model) page for code examples of auditor patterns. +You can grant audit access at the smart contract level by adding an auditor party as an observer. The auditor sees the contract and all events that affect it, without being able to modify it. See the [Privacy Model](/overview/learn/privacy-model) page for code examples of auditor patterns. ## Data Residency @@ -82,6 +82,6 @@ These are technical mechanisms. Whether they satisfy specific regulatory require ## Further Reading -- [Privacy Model Explained](/docs-main/overview/learn/privacy-model) -- Full details on sub-transaction privacy -- [Security Best Practices](/docs-main/appdev/modules/m7-security) -- Securing your Canton application -- [Architecture Overview](/docs-main/overview/learn/architecture) -- How validators and synchronizers relate +- [Privacy Model Explained](/overview/learn/privacy-model) -- Full details on sub-transaction privacy +- [Security Best Practices](/appdev/modules/m7-security) -- Securing your Canton application +- [Architecture Overview](/overview/learn/architecture) -- How validators and synchronizers relate diff --git a/docs-main/appdev/modules/m7-error-handling.mdx b/docs-main/appdev/modules/m7-error-handling.mdx index e7f784eaf..b1b7e9f65 100644 --- a/docs-main/appdev/modules/m7-error-handling.mdx +++ b/docs-main/appdev/modules/m7-error-handling.mdx @@ -37,7 +37,7 @@ A timeout does **not** mean the command failed. It may have succeeded but the re If your validator's traffic budget is exhausted, command submissions fail with an error indicating insufficient traffic. This is not a transient error -- retrying will not help until the traffic budget is replenished (either manually or via auto-top-up). -See [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) for how to manage traffic credits. +See [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) for how to manage traffic credits. ## Handling Contention @@ -103,6 +103,6 @@ Commands that fail after all retries should be logged with full context (command ## Further Reading -- [Backend Development](/docs-main/appdev/modules/m4-backend-dev) -- Ledger API client setup, including error handling examples -- [Canton Coin and Traffic](/docs-main/appdev/modules/m4-canton-coin) -- Managing traffic to avoid submission failures -- [Observability](/docs-main/appdev/modules/m4-observability) -- Logging and metrics for error tracking +- [Backend Development](/appdev/modules/m4-backend-dev) -- Ledger API client setup, including error handling examples +- [Canton Coin and Traffic](/appdev/modules/m4-canton-coin) -- Managing traffic to avoid submission failures +- [Observability](/appdev/modules/m4-observability) -- Logging and metrics for error tracking diff --git a/docs-main/appdev/modules/m7-package-management.mdx b/docs-main/appdev/modules/m7-package-management.mdx index 2e3884ef8..8ea9d5a88 100644 --- a/docs-main/appdev/modules/m7-package-management.mdx +++ b/docs-main/appdev/modules/m7-package-management.mdx @@ -81,7 +81,7 @@ Practical steps for distribution: - Publish DARs to a shared artifact repository that counterparties can access - Provide build instructions so counterparties can compile from source and verify the DAR matches - Include a changelog documenting what changed between versions -- Communicate upgrade timelines (see [Upgrade Deployment](/docs-main/appdev/modules/m6-deployment)) +- Communicate upgrade timelines (see [Upgrade Deployment](/appdev/modules/m6-deployment)) ## Package Modularization @@ -101,5 +101,5 @@ Reviewers: Skip this section. Remove markers after final approval. ## Next Steps -- [Security Best Practices](/docs-main/appdev/modules/m7-security) — Securing your packages and deployment pipeline -- [Performance](/docs-main/appdev/modules/m7-performance) — Optimization strategies for Canton applications +- [Security Best Practices](/appdev/modules/m7-security) — Securing your packages and deployment pipeline +- [Performance](/appdev/modules/m7-performance) — Optimization strategies for Canton applications diff --git a/docs-main/appdev/modules/m7-performance.mdx b/docs-main/appdev/modules/m7-performance.mdx index 3b8cdb0e8..5dc1588fe 100644 --- a/docs-main/appdev/modules/m7-performance.mdx +++ b/docs-main/appdev/modules/m7-performance.mdx @@ -3,13 +3,13 @@ title: "Performance Best Practices" description: "Where to find Canton application performance guidance — moved to the consolidated Performance Optimization deep-dive." --- -The detailed performance guidance — on-ledger vs off-ledger trade-offs, contract design for performance, PQS query optimization, parallel command submission, traffic management, and the worked example for reducing contention — now lives with the rest of the performance material in [Performance Optimization](/docs-main/appdev/deep-dives/performance-optimization). +The detailed performance guidance — on-ledger vs off-ledger trade-offs, contract design for performance, PQS query optimization, parallel command submission, traffic management, and the worked example for reducing contention — now lives with the rest of the performance material in [Performance Optimization](/appdev/deep-dives/performance-optimization). For diagnosing performance issues in a running validator, see also: -- [Performance Issues](/docs-main/appdev/troubleshooting#performance-issues-2) and [Contention](/docs-main/appdev/troubleshooting#contention) in the troubleshooting cheat sheet. +- [Performance Issues](/appdev/troubleshooting#performance-issues-2) and [Contention](/appdev/troubleshooting#contention) in the troubleshooting cheat sheet. ## Next Steps -- [Security Best Practices](/docs-main/appdev/modules/m7-security) — Authorization and secure configuration -- [Package Management](/docs-main/appdev/modules/m7-package-management) — Efficient DAR management +- [Security Best Practices](/appdev/modules/m7-security) — Authorization and secure configuration +- [Package Management](/appdev/modules/m7-package-management) — Efficient DAR management diff --git a/docs-main/appdev/modules/m7-security.mdx b/docs-main/appdev/modules/m7-security.mdx index f2fa3b6db..3bdc0fac3 100644 --- a/docs-main/appdev/modules/m7-security.mdx +++ b/docs-main/appdev/modules/m7-security.mdx @@ -97,8 +97,8 @@ Validate all user input before it reaches the Ledger API. While Daml's type syst ## Next Steps -- [Package Management](/docs-main/appdev/modules/m7-package-management) — Securing DAR distribution and deployment -- [Performance](/docs-main/appdev/modules/m7-performance) — Optimization strategies for Canton applications +- [Package Management](/appdev/modules/m7-package-management) — Securing DAR distribution and deployment +- [Performance](/appdev/modules/m7-performance) — Optimization strategies for Canton applications {/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/observe/open-tracing.rst" hash="03a5f726" */} diff --git a/docs-main/appdev/modules/m7-smart-contract-upgrades.mdx b/docs-main/appdev/modules/m7-smart-contract-upgrades.mdx index 68b403707..a2d5425d8 100644 --- a/docs-main/appdev/modules/m7-smart-contract-upgrades.mdx +++ b/docs-main/appdev/modules/m7-smart-contract-upgrades.mdx @@ -18,7 +18,7 @@ Before uploading a new package version to production: - Verify the upgrade passes `dpm build` and `dpm test` in your CI pipeline - Confirm the new package is SCU-compatible with the current version (run compatibility checks locally or in CI) -- Review the list of changes against the [upgrade limitations](/docs-main/appdev/modules/m6-limitations) to ensure no breaking modifications +- Review the list of changes against the [upgrade limitations](/appdev/modules/m6-limitations) to ensure no breaking modifications - Test the upgrade on DevNet or TestNet with realistic data volumes - Communicate with counterparties who hold contracts affected by the upgrade (see below) - Document the rollback procedure before starting @@ -109,9 +109,9 @@ Use this checklist for each production upgrade: ## Further Reading -- [Upgrade Limitations](/docs-main/appdev/modules/m6-limitations) -- Constraints that affect production rollouts -- [Testing Upgrades](/docs-main/appdev/modules/m6-testing-upgrades) -- Testing strategies before going to production -- [Error Handling](/docs-main/appdev/modules/m7-error-handling) -- Handling errors that may arise during mixed-version deployments -- [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression) -- The DevNet to TestNet to MainNet path -- [Smart Contract Upgrading Reference](/docs-main/appdev/deep-dives/smart-contract-upgrading-reference) — Detailed package validation and runtime upgrade rules. -- [Values in the Ledger API](/docs-main/appdev/deep-dives/values-in-the-ledger-api) — How the Ledger API validates and normalizes values during command submission and query. +- [Upgrade Limitations](/appdev/modules/m6-limitations) -- Constraints that affect production rollouts +- [Testing Upgrades](/appdev/modules/m6-testing-upgrades) -- Testing strategies before going to production +- [Error Handling](/appdev/modules/m7-error-handling) -- Handling errors that may arise during mixed-version deployments +- [Deployment Progression](/appdev/modules/m5-deployment-progression) -- The DevNet to TestNet to MainNet path +- [Smart Contract Upgrading Reference](/appdev/deep-dives/smart-contract-upgrading-reference) — Detailed package validation and runtime upgrade rules. +- [Values in the Ledger API](/appdev/deep-dives/values-in-the-ledger-api) — How the Ledger API validates and normalizes values during command submission and query. diff --git a/docs-main/appdev/quickstart/index.mdx b/docs-main/appdev/quickstart/index.mdx index 4e36e1b1e..d3c140b4d 100644 --- a/docs-main/appdev/quickstart/index.mdx +++ b/docs-main/appdev/quickstart/index.mdx @@ -22,13 +22,13 @@ The QuickStart sets up a local environment (called LocalNet) with: ## Pages in this section - + System requirements, dependencies, and step-by-step installation - + How the QuickStart project is organized and what each component does - + Start the application and walk through the licensing workflow diff --git a/docs-main/appdev/quickstart/prerequisites.mdx b/docs-main/appdev/quickstart/prerequisites.mdx index cb564f736..b796afc33 100644 --- a/docs-main/appdev/quickstart/prerequisites.mdx +++ b/docs-main/appdev/quickstart/prerequisites.mdx @@ -17,8 +17,8 @@ This guide walks through the installation and `LocalNet` deployment of the CN Qu ### Roadmap -- After installation, [explore the demo](/docs-main/appdev/quickstart/running-the-demo) to complete a business operation in the example application. -- For an overview of how the Quickstart project is structured, read the [project structure guide](/docs-main/appdev/quickstart/project-structure). +- After installation, [explore the demo](/appdev/quickstart/running-the-demo) to complete a business operation in the example application. +- For an overview of how the Quickstart project is structured, read the [project structure guide](/appdev/quickstart/project-structure). - Learn about debugging using lnav in the [Debugging and troubleshooting with lnav](https://docs.digitalasset.com/build/3.4/quickstart/operate/lnav-in-cn.html). - Additional debugging information is in the section in the observability and troubleshooting section of the [cn-quickstart repository](https://github.com/digital-asset/cn-quickstart). diff --git a/docs-main/appdev/quickstart/running-the-demo.mdx b/docs-main/appdev/quickstart/running-the-demo.mdx index 521adb473..17ca7c685 100644 --- a/docs-main/appdev/quickstart/running-the-demo.mdx +++ b/docs-main/appdev/quickstart/running-the-demo.mdx @@ -48,7 +48,7 @@ If you find errors, please contact your representative at Digital Asset. ## Prerequisites -Install the [CN App Quickstart](/docs-main/appdev/quickstart/prerequisites) before beginning this demonstration. +Install the [CN App Quickstart](/appdev/quickstart/prerequisites) before beginning this demonstration. ## Walkthrough @@ -411,7 +411,7 @@ The Validator Onboarding menu allows for the creation of validator onboarding se ## Next steps -You've completed a business operation in the CN App Quickstart and have been introduced to the basics of the Canton Console and Daml Shell. We encourage you to explore the CN App Quickstart codebase and modify it to meet your business needs. You might be interested in learning more about the [App Quickstart project structure](/docs-main/appdev/quickstart/project-structure) or the application development modules in [Module 4: Building Applications](/docs-main/appdev/modules/m4-building-apps-intro). +You've completed a business operation in the CN App Quickstart and have been introduced to the basics of the Canton Console and Daml Shell. We encourage you to explore the CN App Quickstart codebase and modify it to meet your business needs. You might be interested in learning more about the [App Quickstart project structure](/appdev/quickstart/project-structure) or the application development modules in [Module 4: Building Applications](/appdev/modules/m4-building-apps-intro). {/* COPIED_END */} \ No newline at end of file diff --git a/docs-main/appdev/reference/admin-api-reference.mdx b/docs-main/appdev/reference/admin-api-reference.mdx index e814003c7..1a8b0f0cc 100644 --- a/docs-main/appdev/reference/admin-api-reference.mdx +++ b/docs-main/appdev/reference/admin-api-reference.mdx @@ -56,12 +56,12 @@ Repair operations can cause data inconsistencies if used incorrectly. Only use t ## Canton Console Access -The Canton Console provides a more ergonomic interface to Admin API operations. See [Essential Commands](/docs-main/global-synchronizer/canton-console/essential-commands) for commonly used console commands. +The Canton Console provides a more ergonomic interface to Admin API operations. See [Essential Commands](/global-synchronizer/canton-console/essential-commands) for commonly used console commands. ## Next Steps - [Ledger API Reference](/api-reference) — Application-facing Ledger API -- [Canton Console](/docs-main/global-synchronizer/canton-console/console-overview) — Interactive console access -- [Canton Admin API (GS)](/docs-main/global-synchronizer/reference/canton-admin-api) — GS-focused Admin API reference +- [Canton Console](/global-synchronizer/canton-console/console-overview) — Interactive console access +- [Canton Admin API (GS)](/global-synchronizer/reference/canton-admin-api) — GS-focused Admin API reference diff --git a/docs-main/appdev/reference/configuration-reference.mdx b/docs-main/appdev/reference/configuration-reference.mdx index 8b2078eed..43cc5d969 100644 --- a/docs-main/appdev/reference/configuration-reference.mdx +++ b/docs-main/appdev/reference/configuration-reference.mdx @@ -8,7 +8,7 @@ import CantonAppdevReferenceConfigurationReferenceL42 from "/snippets/canton-doc import CantonAppdevReferenceConfigurationReferenceL95 from "/snippets/canton-docs/appdev_reference_configuration-reference_L95.mdx"; -Canton nodes use [HOCON](https://github.com/lightbend/config/blob/master/HOCON.md) (Human-Optimized Config Object Notation) for its static configuration files. Static configuration covers settings that must be known at process startup, such as storage backends, API ports, and node identities. Dynamic settings like party registration and synchronizer connections are managed at runtime through the [console](/docs-main/global-synchronizer/reference/canton-console-reference) or administration APIs. +Canton nodes use [HOCON](https://github.com/lightbend/config/blob/master/HOCON.md) (Human-Optimized Config Object Notation) for its static configuration files. Static configuration covers settings that must be known at process startup, such as storage backends, API ports, and node identities. Dynamic settings like party registration and synchronizer connections are managed at runtime through the [console](/global-synchronizer/reference/canton-console-reference) or administration APIs. ## HOCON Format Basics @@ -95,11 +95,11 @@ Each Daml package has a `daml.yaml` specifying the SDK version, package name, so ### multi-package.yaml (Multi-Package Projects) -For multi-package project configuration, see [Building and Packaging](/docs-main/appdev/modules/m3-building-packaging) which covers `multi-package.yaml` structure and dependency resolution. +For multi-package project configuration, see [Building and Packaging](/appdev/modules/m3-building-packaging) which covers `multi-package.yaml` structure and dependency resolution. ### DPM Environment Variables -For DPM environment variable details, see [Building and Packaging](/docs-main/appdev/modules/m3-building-packaging). +For DPM environment variable details, see [Building and Packaging](/appdev/modules/m3-building-packaging). ### DPM Global Configuration diff --git a/docs-main/appdev/reference/error-codes.mdx b/docs-main/appdev/reference/error-codes.mdx index df39d5d4d..eb077cbc7 100644 --- a/docs-main/appdev/reference/error-codes.mdx +++ b/docs-main/appdev/reference/error-codes.mdx @@ -82,6 +82,6 @@ These error codes are also surfaced through the JSON API (HTTP Ledger API) with ## Further reading -- [Troubleshooting cheat sheet](/docs-main/appdev/troubleshooting) -- quick-reference solutions for common issues including specific error codes +- [Troubleshooting cheat sheet](/appdev/troubleshooting) -- quick-reference solutions for common issues including specific error codes - [gRPC status codes](https://grpc.github.io/grpc/core/md_doc_statuscodes.html) -- official gRPC status code definitions - [gRPC rich error model](https://cloud.google.com/apis/design/errors#error_details) -- the error detail types used in Canton responses diff --git a/docs-main/appdev/tooling/debugging-tools.mdx b/docs-main/appdev/tooling/debugging-tools.mdx index 386a4161a..dc3779ac9 100644 --- a/docs-main/appdev/tooling/debugging-tools.mdx +++ b/docs-main/appdev/tooling/debugging-tools.mdx @@ -226,6 +226,6 @@ This is almost always a privacy question. Your party can see a contract only if ## Next Steps -- [Development Tools Overview](/docs-main/appdev/tooling/development-tools-overview) -- Summary of all available tools -- [Troubleshooting](/docs-main/appdev/troubleshooting) -- Solutions for common Canton development issues -- [Testing Daml Contracts](/docs-main/appdev/modules/m3-testing) -- Writing effective tests to catch issues early +- [Development Tools Overview](/appdev/tooling/development-tools-overview) -- Summary of all available tools +- [Troubleshooting](/appdev/troubleshooting) -- Solutions for common Canton development issues +- [Testing Daml Contracts](/appdev/modules/m3-testing) -- Writing effective tests to catch issues early diff --git a/docs-main/appdev/tooling/development-tools-overview.mdx b/docs-main/appdev/tooling/development-tools-overview.mdx index e3a8b35cf..79902e613 100644 --- a/docs-main/appdev/tooling/development-tools-overview.mdx +++ b/docs-main/appdev/tooling/development-tools-overview.mdx @@ -16,7 +16,7 @@ DPM is the primary command-line tool for Canton project management. It handles p -DPM replaces the older `daml` CLI and is the recommended entry point for all build and project tasks. See the [DPM reference](/docs-main/sdks-tools/cli-tools/dpm) for the full command set. +DPM replaces the older `daml` CLI and is the recommended entry point for all build and project tasks. See the [DPM reference](/sdks-tools/cli-tools/dpm) for the full command set. ## Daml Studio (VS Code Extension) @@ -29,7 +29,7 @@ Daml Studio is a VS Code extension that provides an IDE experience for writing D Install it by running `dpm studio` from a project directory, or search for "Daml" in the VS Code extensions marketplace. Requires VS Code 1.87 or later. -For full setup instructions, see [IDE Setup](/docs-main/appdev/tooling/ide-setup). +For full setup instructions, see [IDE Setup](/appdev/tooling/ide-setup). ## Canton Console @@ -53,7 +53,7 @@ Sandbox gives you a local validator with an in-memory ledger. It is fast to star Sandbox does not simulate multi-validator or multi-party-on-different-validator scenarios. For that, use LocalNet. -See [Sandbox](/docs-main/sdks-tools/development-tools/sandbox) for configuration options. +See [Sandbox](/sdks-tools/development-tools/sandbox) for configuration options. ## LocalNet @@ -63,7 +63,7 @@ LocalNet is a Docker Compose-based local environment that simulates the Global S LocalNet provides a local synchronizer, multiple validator nodes, test Canton Coin with faucet access, and a Splice wallet. It is the right environment for integration testing, multi-party workflows, and end-to-end application testing before deploying to DevNet. -See [LocalNet](/docs-main/sdks-tools/development-tools/localnet) for details. +See [LocalNet](/sdks-tools/development-tools/localnet) for details. ## PQS (Participant Query Store) @@ -78,7 +78,7 @@ PQS is useful when you need: PQS respects the same privacy boundaries as the Ledger API -- it contains only the data your party is entitled to see. -See [PQS](/docs-main/sdks-tools/development-tools/pqs) and the [PQS SQL Reference](/api-reference) for schema details and query examples. +See [PQS](/sdks-tools/development-tools/pqs) and the [PQS SQL Reference](/api-reference) for schema details and query examples. ## Choosing the Right Tool @@ -91,6 +91,6 @@ See [PQS](/docs-main/sdks-tools/development-tools/pqs) and the [PQS SQL Referenc ## Next Steps -- [IDE Setup](/docs-main/appdev/tooling/ide-setup) -- Configure your editor for Canton development -- [Debugging Tools](/docs-main/appdev/tooling/debugging-tools) -- Troubleshoot transactions and contract state -- [The Canton Development Stack](/docs-main/appdev/modules/m1-development-stack) -- How all the pieces fit together +- [IDE Setup](/appdev/tooling/ide-setup) -- Configure your editor for Canton development +- [Debugging Tools](/appdev/tooling/debugging-tools) -- Troubleshoot transactions and contract state +- [The Canton Development Stack](/appdev/modules/m1-development-stack) -- How all the pieces fit together diff --git a/docs-main/appdev/tooling/ide-setup.mdx b/docs-main/appdev/tooling/ide-setup.mdx index b91854e3d..557d21ab0 100644 --- a/docs-main/appdev/tooling/ide-setup.mdx +++ b/docs-main/appdev/tooling/ide-setup.mdx @@ -79,6 +79,6 @@ You can also add a `.vscode/settings.json` with project-specific settings like f ## Next Steps -- [Development Tools Overview](/docs-main/appdev/tooling/development-tools-overview) -- Summary of all Canton development tools -- [Development Environment Setup](/docs-main/appdev/modules/m3-dev-environment) -- Getting started with Daml development -- [Debugging Tools](/docs-main/appdev/tooling/debugging-tools) -- Troubleshoot issues during development +- [Development Tools Overview](/appdev/tooling/development-tools-overview) -- Summary of all Canton development tools +- [Development Environment Setup](/appdev/modules/m3-dev-environment) -- Getting started with Daml development +- [Debugging Tools](/appdev/tooling/debugging-tools) -- Troubleshoot issues during development diff --git a/docs-main/appdev/troubleshooting-guide/common-questions.mdx b/docs-main/appdev/troubleshooting-guide/common-questions.mdx index 9127f9827..ed1258e10 100644 --- a/docs-main/appdev/troubleshooting-guide/common-questions.mdx +++ b/docs-main/appdev/troubleshooting-guide/common-questions.mdx @@ -8,7 +8,7 @@ import DamlAppdevTroubleshootingGuideCommonQuestionsL49 from "/snippets/daml-doc -Answers to questions that come up regularly in Canton Network application development. For validator operations questions, see the [Common Issues FAQ](/docs-main/appdev/faq). +Answers to questions that come up regularly in Canton Network application development. For validator operations questions, see the [Common Issues FAQ](/appdev/faq). ## Getting Started @@ -26,7 +26,7 @@ Daml models must be written in the Daml language. Your application's backend and - **JavaScript/TypeScript** can use the JSON API (HTTP) or gRPC client libraries. The cn-quickstart includes a TypeScript frontend as a reference - **Python and Go** can use gRPC client libraries generated from the Ledger API `.proto` files -There are also community-maintained bindings for some languages. See the [language bindings](/docs-main/sdks-tools/language-bindings/community) page. +There are also community-maintained bindings for some languages. See the [language bindings](/sdks-tools/language-bindings/community) page. ### Where do I find example code? @@ -121,7 +121,7 @@ curl "https://your-validator:5002/v2/packages" \ ### How do I get my app featured on Canton Network? -Applications can apply for featured status through the Canton Improvement Proposal (CIP) process. Start by reviewing the [CIP introduction](/docs-main/overview/understand/cips-introduction) and the [getting your app featured](/docs-main/overview/understand/getting-app-featured) guide. +Applications can apply for featured status through the Canton Improvement Proposal (CIP) process. Start by reviewing the [CIP introduction](/overview/understand/cips-introduction) and the [getting your app featured](/overview/understand/getting-app-featured) guide. The process involves submitting a proposal that describes your application, its value to the network, and how it uses Canton Network infrastructure. Featured apps gain visibility on canton.network and may receive support from the Global Synchronizer Foundation. diff --git a/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx b/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx index 1a4a452ea..bc792e311 100644 --- a/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx +++ b/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx @@ -99,9 +99,9 @@ ABORTED: Interpretation error: ... contract not active **Cause:** A concurrent transaction consumed the contract between your read and your exercise. This is normal in concurrent environments. -**Fix:** Retry the operation with exponential backoff. See [Development Issues](/docs-main/appdev/troubleshooting-guide/development-issues) for a retry pattern. +**Fix:** Retry the operation with exponential backoff. See [Development Issues](/appdev/troubleshooting-guide/development-issues) for a retry pattern. ## Ledger API errors -Ledger API runtime error codes (AUTH_INVALID_TOKEN, PACKAGE_NOT_FOUND, PARTY_NOT_KNOWN, PERMISSION_DENIED, etc.) live on their own page: [Ledger API Errors](/docs-main/appdev/troubleshooting-guide/ledger-api-errors). +Ledger API runtime error codes (AUTH_INVALID_TOKEN, PACKAGE_NOT_FOUND, PARTY_NOT_KNOWN, PERMISSION_DENIED, etc.) live on their own page: [Ledger API Errors](/appdev/troubleshooting-guide/ledger-api-errors). diff --git a/docs-main/appdev/troubleshooting-guide/operational-issues.mdx b/docs-main/appdev/troubleshooting-guide/operational-issues.mdx index 421fed450..24935cc84 100644 --- a/docs-main/appdev/troubleshooting-guide/operational-issues.mdx +++ b/docs-main/appdev/troubleshooting-guide/operational-issues.mdx @@ -9,7 +9,7 @@ import DamlDocsAppdevTroubleshootingGuideOperationalIssuesL66 from "/snippets/da import DamlDocsAppdevTroubleshootingGuideOperationalIssuesL103 from "/snippets/daml-docs/appdev_troubleshooting-guide_operational-issues_L103.mdx"; -This page covers problems that arise when your application runs against a live network: DevNet, TestNet, or MainNet. Local development issues are covered in [Development Issues](/docs-main/appdev/troubleshooting-guide/development-issues). +This page covers problems that arise when your application runs against a live network: DevNet, TestNet, or MainNet. Local development issues are covered in [Development Issues](/appdev/troubleshooting-guide/development-issues). ## Traffic Exhaustion diff --git a/docs-main/appdev/troubleshooting.mdx b/docs-main/appdev/troubleshooting.mdx index 7fd709061..f7a209fab 100644 --- a/docs-main/appdev/troubleshooting.mdx +++ b/docs-main/appdev/troubleshooting.mdx @@ -893,7 +893,7 @@ The following switches/steps should be taken to improve analyzing errors. Withou ## Reading log files -For Canton trace IDs, context extraction patterns, and detailed lnav workflows, see [Log Analysis](/docs-main/appdev/tooling/debugging-tools#log-analysis) in the debugging tools page. +For Canton trace IDs, context extraction patterns, and detailed lnav workflows, see [Log Analysis](/appdev/tooling/debugging-tools#log-analysis) in the debugging tools page. ## Setup Issues diff --git a/docs-main/docs.json b/docs-main/docs.json index 20a75eef8..c7f5e4a5e 100644 --- a/docs-main/docs.json +++ b/docs-main/docs.json @@ -8980,23 +8980,23 @@ }, "redirects": [ { - "source": "/docs-main/appdev/deep-dives/manage-daml-packages", - "destination": "/docs-main/appdev/modules/m5-manage-daml-packages" + "source": "/appdev/deep-dives/manage-daml-packages", + "destination": "/appdev/modules/m5-manage-daml-packages" }, { - "source": "/docs-main/appdev/deep-dives/query-with-pqs", - "destination": "/docs-main/appdev/modules/m4-query-with-pqs" + "source": "/appdev/deep-dives/query-with-pqs", + "destination": "/appdev/modules/m4-query-with-pqs" }, { - "source": "/docs-main/sdks-tools/sdks/canton-sdk", - "destination": "/docs-main/sdks-tools/sdks/daml-sdk" + "source": "/sdks-tools/sdks/canton-sdk", + "destination": "/sdks-tools/sdks/daml-sdk" }, { - "source": "/docs-main/appdev/reference/ledger-api-reference", + "source": "/appdev/reference/ledger-api-reference", "destination": "/api-reference" }, { - "source": "/docs-main/appdev/reference/pqs-sql-reference", + "source": "/appdev/reference/pqs-sql-reference", "destination": "/api-reference" } ], diff --git a/docs-main/global-synchronizer/canton-console/console-overview.mdx b/docs-main/global-synchronizer/canton-console/console-overview.mdx index 3c85b405e..2542f212d 100644 --- a/docs-main/global-synchronizer/canton-console/console-overview.mdx +++ b/docs-main/global-synchronizer/canton-console/console-overview.mdx @@ -162,5 +162,5 @@ Use the built-in help to discover available commands: ## Next Steps -- [Essential Commands](/docs-main/global-synchronizer/canton-console/essential-commands) — Key commands for health checks, party and package management -- [Debugging Workflows](/docs-main/global-synchronizer/canton-console/debugging-workflows) — Step-by-step diagnostic procedures +- [Essential Commands](/global-synchronizer/canton-console/essential-commands) — Key commands for health checks, party and package management +- [Debugging Workflows](/global-synchronizer/canton-console/debugging-workflows) — Step-by-step diagnostic procedures diff --git a/docs-main/global-synchronizer/canton-console/debugging-workflows.mdx b/docs-main/global-synchronizer/canton-console/debugging-workflows.mdx index 81f71c3d9..25e51d120 100644 --- a/docs-main/global-synchronizer/canton-console/debugging-workflows.mdx +++ b/docs-main/global-synchronizer/canton-console/debugging-workflows.mdx @@ -88,7 +88,7 @@ If a multi-hosting setup isn't taking effect: -A pending proposal means the second participant hasn't signed yet. See [Multi-Hosting](/docs-main/appdev/deep-dives/multi-hosting) for the authorization procedure. +A pending proposal means the second participant hasn't signed yet. See [Multi-Hosting](/appdev/deep-dives/multi-hosting) for the authorization procedure. ## ACS Inspection @@ -113,5 +113,5 @@ When investigating any issue, work through this sequence: ## Next Steps -- [Essential Commands](/docs-main/global-synchronizer/canton-console/essential-commands) — Quick reference for all key commands -- [Console Overview](/docs-main/global-synchronizer/canton-console/console-overview) — How to access the console in different environments +- [Essential Commands](/global-synchronizer/canton-console/essential-commands) — Quick reference for all key commands +- [Console Overview](/global-synchronizer/canton-console/console-overview) — How to access the console in different environments diff --git a/docs-main/global-synchronizer/canton-console/essential-commands.mdx b/docs-main/global-synchronizer/canton-console/essential-commands.mdx index c0e34d1d6..2f0e35017 100644 --- a/docs-main/global-synchronizer/canton-console/essential-commands.mdx +++ b/docs-main/global-synchronizer/canton-console/essential-commands.mdx @@ -111,5 +111,5 @@ The `testing.acs_search` command is intended for debugging, not production queri ## Next Steps -- [Debugging Workflows](/docs-main/global-synchronizer/canton-console/debugging-workflows) — Using these commands in diagnostic scenarios -- [Console Overview](/docs-main/global-synchronizer/canton-console/console-overview) — How to start the console +- [Debugging Workflows](/global-synchronizer/canton-console/debugging-workflows) — Using these commands in diagnostic scenarios +- [Console Overview](/global-synchronizer/canton-console/console-overview) — How to start the console diff --git a/docs-main/global-synchronizer/deployment/authorization-setup.mdx b/docs-main/global-synchronizer/deployment/authorization-setup.mdx index 504511081..ec40fe5c7 100644 --- a/docs-main/global-synchronizer/deployment/authorization-setup.mdx +++ b/docs-main/global-synchronizer/deployment/authorization-setup.mdx @@ -126,11 +126,11 @@ Running without authentication is insecure. Any client that can reach the Ledger - + Review other configuration options. - + Plan for network upgrades. diff --git a/docs-main/global-synchronizer/deployment/bootstrap-network.mdx b/docs-main/global-synchronizer/deployment/bootstrap-network.mdx index 80737adc8..9e4832f86 100644 --- a/docs-main/global-synchronizer/deployment/bootstrap-network.mdx +++ b/docs-main/global-synchronizer/deployment/bootstrap-network.mdx @@ -17,7 +17,7 @@ Reviewers: Skip this section. Remove markers after final approval. An SV node involves several distinct identities across different layers: -- **SV identity** — An EC keypair (prime256v1) that identifies the SV operator. Stable across network resets and redeployments. Generated during initial setup (see [Kubernetes Deployment](/docs-main/global-synchronizer/deployment/kubernetes-deployment)). +- **SV identity** — An EC keypair (prime256v1) that identifies the SV operator. Stable across network resets and redeployments. Generated during initial setup (see [Kubernetes Deployment](/global-synchronizer/deployment/kubernetes-deployment)). - **Participant identities** — Automatically generated by the Canton participant node on first startup. These are specific to a particular deployment and are not reused across network resets. @@ -149,12 +149,12 @@ Separate Helm values files configure the CometBFT, participant, global domain (s ## Running a Hard Domain Migration (HDM) -In rare cases, the Global Synchronizer may need a hard synchronizer migration — replacing the existing synchronizer with a new one while preserving the application state. This corresponds to a Type 3 upgrade as described in [Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression). HDMs are triggered by protocol-level changes that cannot be applied through standard upgrade procedures. +In rare cases, the Global Synchronizer may need a hard synchronizer migration — replacing the existing synchronizer with a new one while preserving the application state. This corresponds to a Type 3 upgrade as described in [Deployment Progression](/appdev/modules/m5-deployment-progression). HDMs are triggered by protocol-level changes that cannot be applied through standard upgrade procedures. -The migration preserves the active contract set (ACS) while creating a new synchronizer with updated protocol parameters. HDMs require coordination among all SVs and increment the migration ID. See [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures) for the operational steps. +The migration preserves the active contract set (ACS) while creating a new synchronizer with updated protocol parameters. HDMs require coordination among all SVs and increment the migration ID. See [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures) for the operational steps. ## Next Steps -- [Kubernetes Deployment](/docs-main/global-synchronizer/deployment/kubernetes-deployment) — Detailed SV node deployment guide -- [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures) — Upgrading SV nodes -- [Security Operations](/docs-main/global-synchronizer/production-operations/security-operations) — Hardening and key management +- [Kubernetes Deployment](/global-synchronizer/deployment/kubernetes-deployment) — Detailed SV node deployment guide +- [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures) — Upgrading SV nodes +- [Security Operations](/global-synchronizer/production-operations/security-operations) — Hardening and key management diff --git a/docs-main/global-synchronizer/deployment/configuration.mdx b/docs-main/global-synchronizer/deployment/configuration.mdx index ea2c6dcb3..95211aa25 100644 --- a/docs-main/global-synchronizer/deployment/configuration.mdx +++ b/docs-main/global-synchronizer/deployment/configuration.mdx @@ -224,11 +224,11 @@ Enable pruning in production to manage database growth. Configure the retention - + Configure JWT authentication for your validator. - + Plan for network upgrades. diff --git a/docs-main/global-synchronizer/deployment/deployment-options.mdx b/docs-main/global-synchronizer/deployment/deployment-options.mdx index b247101d7..5b0a456a6 100644 --- a/docs-main/global-synchronizer/deployment/deployment-options.mdx +++ b/docs-main/global-synchronizer/deployment/deployment-options.mdx @@ -93,17 +93,17 @@ You can start with Docker Compose on DevNet and migrate to Kubernetes for TestNe ## Super Validator deployments -Super Validators must use Kubernetes. The additional components (CometBFT node, sequencer, mediator, SV app, Scan service) require orchestration that Docker Compose does not support at production scale. See the [Super Validator Setup](/docs-main/global-synchronizer/deployment/super-validator-setup) guide. +Super Validators must use Kubernetes. The additional components (CometBFT node, sequencer, mediator, SV app, Scan service) require orchestration that Docker Compose does not support at production scale. See the [Super Validator Setup](/global-synchronizer/deployment/super-validator-setup) guide. ## Next steps - + System requirements for your chosen deployment method. - + Step-by-step installation for Docker Compose and Kubernetes. diff --git a/docs-main/global-synchronizer/deployment/installation.mdx b/docs-main/global-synchronizer/deployment/installation.mdx index a81fb06b4..4cadd85fb 100644 --- a/docs-main/global-synchronizer/deployment/installation.mdx +++ b/docs-main/global-synchronizer/deployment/installation.mdx @@ -24,7 +24,7 @@ This page walks through deploying a validator node on the Canton Network. Choose ## Before you begin -Confirm that you have completed the [onboarding process](/docs-main/global-synchronizer/deployment/onboarding-process) and have the following ready: +Confirm that you have completed the [onboarding process](/global-synchronizer/deployment/onboarding-process) and have the following ready: - Your static egress IP is allowlisted - You have an onboarding secret from your SV sponsor (or self-generated for DevNet) @@ -163,11 +163,11 @@ If startup fails with connection errors, verify that your egress IP is allowlist - + Tune your validator's configuration. - + Configure authentication for production use. @@ -1015,5 +1015,5 @@ The default configuration for both of these requires access to at least 2/3 of t ## Stay connected -For Slack, mailing lists, and support contacts, see [Staying connected](/docs-main/global-synchronizer/deployment/onboarding-process#staying-connected) in the onboarding process page. +For Slack, mailing lists, and support contacts, see [Staying connected](/global-synchronizer/deployment/onboarding-process#staying-connected) in the onboarding process page. diff --git a/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx b/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx index b6aaa4220..483265f6c 100644 --- a/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx +++ b/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx @@ -204,6 +204,6 @@ After deployment, verify your node is operational: ## Next Steps -- [Bootstrap Network](/docs-main/global-synchronizer/deployment/bootstrap-network) — Network bootstrapping and SV operations -- [Console Overview](/docs-main/global-synchronizer/canton-console/console-overview) — Accessing the Canton Console for debugging -- [Security Operations](/docs-main/global-synchronizer/production-operations/security-operations) — Hardening your SV deployment +- [Bootstrap Network](/global-synchronizer/deployment/bootstrap-network) — Network bootstrapping and SV operations +- [Console Overview](/global-synchronizer/canton-console/console-overview) — Accessing the Canton Console for debugging +- [Security Operations](/global-synchronizer/production-operations/security-operations) — Hardening your SV deployment diff --git a/docs-main/global-synchronizer/deployment/onboarding-process.mdx b/docs-main/global-synchronizer/deployment/onboarding-process.mdx index 42f77d78e..041ec7eeb 100644 --- a/docs-main/global-synchronizer/deployment/onboarding-process.mdx +++ b/docs-main/global-synchronizer/deployment/onboarding-process.mdx @@ -123,7 +123,7 @@ With your IP allowlisted and onboarding secret in hand, deploy your validator us - **Migration ID** — The current network migration identifier (published at sync.global/sv-network/) - **Party hint** — Your validator admin party identifier, formatted as `organization-function-enumerator` (for example, `acmeCorp-validator-1`) -See the [Installation](/docs-main/global-synchronizer/deployment/installation) page for deployment instructions. +See the [Installation](/global-synchronizer/deployment/installation) page for deployment instructions. ### 6. Verify connectivity @@ -168,11 +168,11 @@ Contact `da-support@digitalasset.com` for best-effort support, or `support@digit - + Review system requirements before deploying. - + Deploy your validator node. diff --git a/docs-main/global-synchronizer/deployment/prerequisites.mdx b/docs-main/global-synchronizer/deployment/prerequisites.mdx index 54e567b61..caf052123 100644 --- a/docs-main/global-synchronizer/deployment/prerequisites.mdx +++ b/docs-main/global-synchronizer/deployment/prerequisites.mdx @@ -170,11 +170,11 @@ For a production Kubernetes deployment, plan for at least: - + Get onboarded with an SV sponsor. - + Deploy your validator node. diff --git a/docs-main/global-synchronizer/deployment/super-validator-setup.mdx b/docs-main/global-synchronizer/deployment/super-validator-setup.mdx index 15de144fb..67d9a0878 100644 --- a/docs-main/global-synchronizer/deployment/super-validator-setup.mdx +++ b/docs-main/global-synchronizer/deployment/super-validator-setup.mdx @@ -13,7 +13,7 @@ import CantonDocsGlobalSynchronizerDeploymentSuperValidatorSetupL208 from "/snip Super Validators (SVs) operate the core infrastructure of the Global Synchronizer: sequencer nodes, mediator nodes, and BFT consensus through CometBFT. This guide covers the Kubernetes-based deployment of an SV node. -SV deployment is significantly more involved than a standard validator. If you are setting up a regular validator, see the [Installation](/docs-main/global-synchronizer/deployment/installation) page instead. +SV deployment is significantly more involved than a standard validator. If you are setting up a regular validator, see the [Installation](/global-synchronizer/deployment/installation) page instead. ## Requirements @@ -117,7 +117,7 @@ Configure the BFT sequencer connection parameters in your Helm values with the e ## Step 6: Configure authentication -SV nodes expose web UIs that require authentication. The setup is similar to standard validator authentication (see [Authorization Setup](/docs-main/global-synchronizer/deployment/authorization-setup)), with additional clients for the SV-specific UIs. +SV nodes expose web UIs that require authentication. The setup is similar to standard validator authentication (see [Authorization Setup](/global-synchronizer/deployment/authorization-setup)), with additional clients for the SV-specific UIs. **Required OIDC clients for an SV node:** @@ -131,7 +131,7 @@ Configure your OIDC provider (Auth0, Keycloak, or other) with these clients befo ## Step 7: Install Helm charts -The Splice node bundle contains Helm charts and sample values for SV deployment. Download the bundle as described in the [Installation](/docs-main/global-synchronizer/deployment/installation) guide. +The Splice node bundle contains Helm charts and sample values for SV deployment. Download the bundle as described in the [Installation](/global-synchronizer/deployment/installation) guide. Configure the Helm values for your environment. Key sections include: @@ -264,11 +264,11 @@ SV operators have additional responsibilities beyond standard validator operatio - + Plan for SV-specific upgrade procedures. - + Configure authentication for SV web UIs. diff --git a/docs-main/global-synchronizer/deployment/sv-operations.mdx b/docs-main/global-synchronizer/deployment/sv-operations.mdx index 1c2011841..423e81d28 100644 --- a/docs-main/global-synchronizer/deployment/sv-operations.mdx +++ b/docs-main/global-synchronizer/deployment/sv-operations.mdx @@ -19,7 +19,7 @@ As an SV operator, you are most welcome to review, install, and use third-party Installing additional Daml apps on an SV node is not supported and may compromise its secure operations. In particular, refrain from manually uploading additional `.dar` files to your SV node or manually connecting it to third-party synchronizers. -For more information on hardening your SV node, see [Security Hardening](/docs-main/global-synchronizer/production-operations/security-hardening). +For more information on hardening your SV node, see [Security Hardening](/global-synchronizer/production-operations/security-hardening). ## Generating a validator onboarding secret diff --git a/docs-main/global-synchronizer/deployment/upgrades.mdx b/docs-main/global-synchronizer/deployment/upgrades.mdx index de1d16b2b..3ff06b240 100644 --- a/docs-main/global-synchronizer/deployment/upgrades.mdx +++ b/docs-main/global-synchronizer/deployment/upgrades.mdx @@ -182,11 +182,11 @@ Validators running outdated software versions risk disconnection from the networ - + Review configuration options for your upgraded node. - + SV-specific upgrade considerations. diff --git a/docs-main/global-synchronizer/extension-synchronizers/deployment.mdx b/docs-main/global-synchronizer/extension-synchronizers/deployment.mdx index e18337a43..78708c6b6 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/deployment.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/deployment.mdx @@ -17,7 +17,7 @@ Before you begin: - PostgreSQL 14+ (managed service recommended for production) - TLS certificates for the sequencer endpoint - Canton release artifacts (Docker images or JARs) -- Familiarity with [Canton's synchronizer architecture](/docs-main/overview/learn/global-synchronizer-architecture) +- Familiarity with [Canton's synchronizer architecture](/overview/learn/global-synchronizer-architecture) ## Ordering backends diff --git a/docs-main/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern.mdx b/docs-main/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern.mdx index 6e73dca26..dd8263a5f 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern.mdx @@ -100,4 +100,4 @@ participant: - **Traffic costs** — Transactions on the private synchronizer do not consume Canton Coin. Only transactions on the Global Synchronizer incur traffic fees. - **Validator overlap** — All parties involved in a contract must have their validators connected to the synchronizer where that contract is assigned. Plan your synchronizer topology accordingly. - **Ordering guarantees** — Each synchronizer provides its own total ordering. Cross-synchronizer transactions are synchronized by the Canton protocol but have higher latency than same-synchronizer transactions. -- **Operational overhead** — Running a private synchronizer means operating sequencer and mediator infrastructure in addition to your validator. See the [deployment guide](/docs-main/global-synchronizer/extension-synchronizers/deployment) for what this involves. +- **Operational overhead** — Running a private synchronizer means operating sequencer and mediator infrastructure in addition to your validator. See the [deployment guide](/global-synchronizer/extension-synchronizers/deployment) for what this involves. diff --git a/docs-main/global-synchronizer/extension-synchronizers/other-private-synchronizers.mdx b/docs-main/global-synchronizer/extension-synchronizers/other-private-synchronizers.mdx index bfc7fe627..5650bb01e 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/other-private-synchronizers.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/other-private-synchronizers.mdx @@ -22,7 +22,7 @@ Without a Global Synchronizer connection, your deployment cannot: - **Interoperate with other Canton Network participants** — Contracts on your private synchronizer cannot be reassigned to the Global Synchronizer or interact with contracts there - **Participate in Canton Network governance** — Your validators are not part of the Canton Network topology -If you later decide you need Global Synchronizer connectivity, you can add it by connecting your validators to the Global Synchronizer and reassigning contracts as needed. See [linking a validator to multiple synchronizers](/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync) for how this works. +If you later decide you need Global Synchronizer connectivity, you can add it by connecting your validators to the Global Synchronizer and reassigning contracts as needed. See [linking a validator to multiple synchronizers](/global-synchronizer/extension-synchronizers/linking-validator-multi-sync) for how this works. ## Architecture @@ -69,7 +69,7 @@ Deploying a standalone synchronizer follows the same process as deploying a priv 3. Deploy validators and connect them to your synchronizer 4. Allocate parties on the validators and begin transacting -For step-by-step instructions, see the [private synchronizer deployment guide](/docs-main/global-synchronizer/extension-synchronizers/deployment). +For step-by-step instructions, see the [private synchronizer deployment guide](/global-synchronizer/extension-synchronizers/deployment). ## Differences from Global Synchronizer operation diff --git a/docs-main/global-synchronizer/extension-synchronizers/private-synchronizers.mdx b/docs-main/global-synchronizer/extension-synchronizers/private-synchronizers.mdx index 8a0f9490a..42fabc6e8 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/private-synchronizers.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/private-synchronizers.mdx @@ -92,6 +92,6 @@ Private synchronizers and the Global Synchronizer are complementary: ## Next Steps -- [Hybrid Synchronizer Pattern](/docs-main/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern) — Combining public and private synchronizers -- [Deployment](/docs-main/global-synchronizer/extension-synchronizers/deployment) — Deploying extension synchronizer infrastructure -- [Linking Validators](/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync) — Multi-synchronizer validator configuration +- [Hybrid Synchronizer Pattern](/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern) — Combining public and private synchronizers +- [Deployment](/global-synchronizer/extension-synchronizers/deployment) — Deploying extension synchronizer infrastructure +- [Linking Validators](/global-synchronizer/extension-synchronizers/linking-validator-multi-sync) — Multi-synchronizer validator configuration diff --git a/docs-main/global-synchronizer/extension-synchronizers/private-validators.mdx b/docs-main/global-synchronizer/extension-synchronizers/private-validators.mdx index 88a86299f..a16479eaa 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/private-validators.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/private-validators.mdx @@ -111,9 +111,9 @@ After the participant starts, it connects to the configured synchronizer automat Use this decision framework: -- **Do any of your parties need to transact with external Canton Network parties?** If yes, you need a Global Synchronizer connection. Consider the [hybrid pattern](/docs-main/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern) instead. +- **Do any of your parties need to transact with external Canton Network parties?** If yes, you need a Global Synchronizer connection. Consider the [hybrid pattern](/global-synchronizer/extension-synchronizers/hybrid-synchronizer-pattern) instead. - **Do you need Canton Coin for payments or settlement?** If yes, you need a Global Synchronizer connection. -- **Might you need network connectivity in the future?** If possibly, deploy standard validators now and connect to the Global Synchronizer later. Adding a synchronizer connection is non-disruptive — see [linking to multiple synchronizers](/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync). +- **Might you need network connectivity in the future?** If possibly, deploy standard validators now and connect to the Global Synchronizer later. Adding a synchronizer connection is non-disruptive — see [linking to multiple synchronizers](/global-synchronizer/extension-synchronizers/linking-validator-multi-sync). - **Is your use case entirely internal with no foreseeable external interaction?** Private-only validators are the simpler choice. ## Migrating to Global Synchronizer later diff --git a/docs-main/global-synchronizer/production-operations/backup-and-recovery.mdx b/docs-main/global-synchronizer/production-operations/backup-and-recovery.mdx index fa6ee4518..8a2c801f5 100644 --- a/docs-main/global-synchronizer/production-operations/backup-and-recovery.mdx +++ b/docs-main/global-synchronizer/production-operations/backup-and-recovery.mdx @@ -210,8 +210,8 @@ The recovery timestamp is chosen by inspecting participant logs for ACS commitme ## Next Steps -- [Security Operations](/docs-main/global-synchronizer/production-operations/security-operations) — Key management and security hardening -- [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures) — Major and minor upgrade processes +- [Security Operations](/global-synchronizer/production-operations/security-operations) — Key management and security hardening +- [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures) — Major and minor upgrade processes {/* COPIED_START source="docs-website:docs/replicated/canton/3.4/participant/howtos/recover/repairing.rst" hash="a821b734" */} diff --git a/docs-main/global-synchronizer/production-operations/key-metrics.mdx b/docs-main/global-synchronizer/production-operations/key-metrics.mdx index 7f97f0348..fcdfe499a 100644 --- a/docs-main/global-synchronizer/production-operations/key-metrics.mdx +++ b/docs-main/global-synchronizer/production-operations/key-metrics.mdx @@ -3,7 +3,7 @@ title: "Key Metrics" description: "Critical metrics to monitor for Canton Network validators and SV nodes" --- -This page covers the metrics that matter most for operational health of your validator or SV node. For the complete metrics catalog, see the [Metrics Reference](/docs-main/global-synchronizer/reference/metrics-reference). +This page covers the metrics that matter most for operational health of your validator or SV node. For the complete metrics catalog, see the [Metrics Reference](/global-synchronizer/reference/metrics-reference). ## Validator Health Checks @@ -99,5 +99,5 @@ Set up alerts for these conditions: ## Next Steps -- [Monitoring Setup](/docs-main/global-synchronizer/production-operations/monitoring-setup) — Configuring Prometheus and Grafana -- [Metrics Reference](/docs-main/global-synchronizer/reference/metrics-reference) — Complete metrics catalog +- [Monitoring Setup](/global-synchronizer/production-operations/monitoring-setup) — Configuring Prometheus and Grafana +- [Metrics Reference](/global-synchronizer/reference/metrics-reference) — Complete metrics catalog diff --git a/docs-main/global-synchronizer/production-operations/logging.mdx b/docs-main/global-synchronizer/production-operations/logging.mdx index 39080fbb6..c1ae7aa99 100644 --- a/docs-main/global-synchronizer/production-operations/logging.mdx +++ b/docs-main/global-synchronizer/production-operations/logging.mdx @@ -125,7 +125,7 @@ In Kubernetes deployments, the standard practice is to log to stdout and let the ## Metrics exposure -For Prometheus metrics endpoints, scraping configuration, and Grafana dashboards, see [Monitoring Setup](/docs-main/global-synchronizer/production-operations/monitoring-setup). +For Prometheus metrics endpoints, scraping configuration, and Grafana dashboards, see [Monitoring Setup](/global-synchronizer/production-operations/monitoring-setup). # diff --git a/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx b/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx index 1425e9e75..f171ca697 100644 --- a/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx +++ b/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx @@ -130,8 +130,8 @@ The dashboards assume a Kubernetes deployment and may need modification for othe ## Next Steps -- [Key Metrics](/docs-main/global-synchronizer/production-operations/key-metrics) — Which metrics to monitor and what they mean -- [Metrics Reference](/docs-main/global-synchronizer/reference/metrics-reference) — Complete metrics catalog +- [Key Metrics](/global-synchronizer/production-operations/key-metrics) — Which metrics to monitor and what they mean +- [Metrics Reference](/global-synchronizer/reference/metrics-reference) — Complete metrics catalog {/* COPIED_START source="docs-website:docs/replicated/canton/3.4/participant/tutorials/monitoring/example_monitoring_setup.rst" hash="c78cf431" */} diff --git a/docs-main/global-synchronizer/production-operations/pruning.mdx b/docs-main/global-synchronizer/production-operations/pruning.mdx index 480f522cf..cb931482d 100644 --- a/docs-main/global-synchronizer/production-operations/pruning.mdx +++ b/docs-main/global-synchronizer/production-operations/pruning.mdx @@ -27,7 +27,7 @@ The recommended settings are a `pruningInterval` of `1 hour` and a `retentionPer A validator that falls more than 30 days behind (the default retention period) will be unable to catch up on the synchronizer, because the sequencer will have pruned the data it needs. Set the retention period with this constraint in mind. -When sequencer pruning is enabled, you must keep historical backups for each pruning window. The time difference between two historical backups must be smaller than the `retentionPeriod`. See the [Backup and Recovery](/docs-main/global-synchronizer/production-operations/backup-and-recovery) page for full backup requirements. +When sequencer pruning is enabled, you must keep historical backups for each pruning window. The time difference between two historical backups must be smaller than the `retentionPeriod`. See the [Backup and Recovery](/global-synchronizer/production-operations/backup-and-recovery) page for full backup requirements. ## CometBFT pruning diff --git a/docs-main/global-synchronizer/production-operations/security-operations.mdx b/docs-main/global-synchronizer/production-operations/security-operations.mdx index 655e6bc05..971017f0b 100644 --- a/docs-main/global-synchronizer/production-operations/security-operations.mdx +++ b/docs-main/global-synchronizer/production-operations/security-operations.mdx @@ -124,6 +124,6 @@ Rotate the following credentials on a regular schedule: ## Next Steps -- [Backup and Recovery](/docs-main/global-synchronizer/production-operations/backup-and-recovery) — Backup procedures and disaster recovery -- [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures) — Upgrade processes for validators and SVs -- [Infrastructure Requirements](/docs-main/global-synchronizer/understand/infrastructure-requirements) — Hardware and network security requirements +- [Backup and Recovery](/global-synchronizer/production-operations/backup-and-recovery) — Backup procedures and disaster recovery +- [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures) — Upgrade processes for validators and SVs +- [Infrastructure Requirements](/global-synchronizer/understand/infrastructure-requirements) — Hardware and network security requirements diff --git a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx index ee585509f..049478e38 100644 --- a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx +++ b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx @@ -17,7 +17,7 @@ Reviewers: Skip this section. Remove markers after final approval. Minor upgrades (e.g., `0.5.8` to `0.5.9`) can be performed independently by each node operator. They require only a `helm upgrade` with the new chart version. -Always read the [release notes](/docs-main/global-synchronizer/release-notes/current-release) before upgrading to learn about required configuration changes. +Always read the [release notes](/global-synchronizer/release-notes/current-release) before upgrading to learn about required configuration changes. {/* COPIED_END */} @@ -191,5 +191,5 @@ The command completes once enough SVs have executed it. The old synchronizer the ## Next Steps -- [Backup and Recovery](/docs-main/global-synchronizer/production-operations/backup-and-recovery) — Backup procedures before upgrades -- [Release Notes](/docs-main/global-synchronizer/release-notes/current-release) — Current release changes and requirements +- [Backup and Recovery](/global-synchronizer/production-operations/backup-and-recovery) — Backup procedures before upgrades +- [Release Notes](/global-synchronizer/release-notes/current-release) — Current release changes and requirements diff --git a/docs-main/global-synchronizer/reference/canton-admin-api.mdx b/docs-main/global-synchronizer/reference/canton-admin-api.mdx index 236bc45ab..9f0aa591d 100644 --- a/docs-main/global-synchronizer/reference/canton-admin-api.mdx +++ b/docs-main/global-synchronizer/reference/canton-admin-api.mdx @@ -17,7 +17,7 @@ The participant Admin API (default port: 5002) covers: - **Pruning** — Manage ledger data retention - **Repair** — ACS import/export for disaster recovery -For detailed usage examples, see [Essential Commands](/docs-main/global-synchronizer/canton-console/essential-commands). +For detailed usage examples, see [Essential Commands](/global-synchronizer/canton-console/essential-commands). ## Sequencer Admin API @@ -37,7 +37,7 @@ The mediator Admin API (default port: 5007) covers: ### Canton Console -The Canton Console wraps Admin API calls in a Scala REPL. This is the recommended way to interact with the Admin API for debugging and operational tasks. See [Console Overview](/docs-main/global-synchronizer/canton-console/console-overview). +The Canton Console wraps Admin API calls in a Scala REPL. This is the recommended way to interact with the Admin API for debugging and operational tasks. See [Console Overview](/global-synchronizer/canton-console/console-overview). ### Direct gRPC @@ -45,7 +45,7 @@ For programmatic access, connect directly to the gRPC Admin API ports. Proto def ### Kubernetes Debug Pod -In Kubernetes deployments, access the Admin API via a debug pod. See [Console Overview](/docs-main/global-synchronizer/canton-console/console-overview) for the K8s access procedure. +In Kubernetes deployments, access the Admin API via a debug pod. See [Console Overview](/global-synchronizer/canton-console/console-overview) for the K8s access procedure. ## Authentication @@ -53,6 +53,6 @@ The Admin API is typically accessed within a trusted network (VPN, private netwo ## Next Steps -- [Splice APIs](/docs-main/global-synchronizer/reference/splice-apis) — Scan and Validator REST APIs -- [Console Overview](/docs-main/global-synchronizer/canton-console/console-overview) — Interactive console access -- [Admin API Reference (AppDev)](/docs-main/appdev/reference/admin-api-reference) — Developer-focused Admin API reference +- [Splice APIs](/global-synchronizer/reference/splice-apis) — Scan and Validator REST APIs +- [Console Overview](/global-synchronizer/canton-console/console-overview) — Interactive console access +- [Admin API Reference (AppDev)](/appdev/reference/admin-api-reference) — Developer-focused Admin API reference diff --git a/docs-main/global-synchronizer/reference/canton-console-reference.mdx b/docs-main/global-synchronizer/reference/canton-console-reference.mdx index 9f7ddc26d..fc1d75731 100644 --- a/docs-main/global-synchronizer/reference/canton-console-reference.mdx +++ b/docs-main/global-synchronizer/reference/canton-console-reference.mdx @@ -205,7 +205,7 @@ To run a script against a remote node without entering the interactive REPL: ### TLS and Authorization -This section will be expanded in a future update. For TLS configuration, see the [Validator Configuration Reference](/docs-main/global-synchronizer/reference/configuration-reference). +This section will be expanded in a future update. For TLS configuration, see the [Validator Configuration Reference](/global-synchronizer/reference/configuration-reference). ## Node References diff --git a/docs-main/global-synchronizer/reference/configuration-reference.mdx b/docs-main/global-synchronizer/reference/configuration-reference.mdx index eebfd24fa..1808d7a2b 100644 --- a/docs-main/global-synchronizer/reference/configuration-reference.mdx +++ b/docs-main/global-synchronizer/reference/configuration-reference.mdx @@ -24,7 +24,7 @@ import CantonDocsGlobalSynchronizerReferenceConfigurationReferenceL355 from "/sn This page covers the configuration options available to validator and Super Validator (SV) operators on Canton Network. It covers Splice app configuration, Canton participant settings, database setup, authentication, traffic management, pruning, and observability. -For application developer configuration (Canton + DPM), see the [AppDev Configuration Reference](/docs-main/appdev/reference/configuration-reference). +For application developer configuration (Canton + DPM), see the [AppDev Configuration Reference](/appdev/reference/configuration-reference). ## Configuration format @@ -34,7 +34,7 @@ When deploying with Helm, pass `ADDITIONAL_CONFIG` values through the `additiona ### Custom bootstrap scripts -Custom bootstrap scripts run Canton Console commands at node startup. This section will be expanded in a future update. See the [Canton Console Scripting](/docs-main/global-synchronizer/canton-console/scripting) page for script syntax and examples. +Custom bootstrap scripts run Canton Console commands at node startup. This section will be expanded in a future update. See the [Canton Console Scripting](/global-synchronizer/canton-console/scripting) page for script syntax and examples. ## Validator node configuration @@ -139,7 +139,7 @@ For mutual TLS, add `sslcert` and `sslkey` pointing to the client certificate an ## Authentication -Validator components authenticate to each other and to external users through JWT tokens issued by an OpenID Connect (OIDC) provider. Full setup instructions are in the [deployment section](/docs-main/global-synchronizer/deployment/authorization-setup). The key configuration secrets are: +Validator components authenticate to each other and to external users through JWT tokens issued by an OpenID Connect (OIDC) provider. Full setup instructions are in the [deployment section](/global-synchronizer/deployment/authorization-setup). The key configuration secrets are: diff --git a/docs-main/global-synchronizer/reference/error-codes.mdx b/docs-main/global-synchronizer/reference/error-codes.mdx index 7993f1e85..95aa5f552 100644 --- a/docs-main/global-synchronizer/reference/error-codes.mdx +++ b/docs-main/global-synchronizer/reference/error-codes.mdx @@ -3337,11 +3337,11 @@ These errors appear when your participant cannot communicate with the synchroniz ## Sequencer Errors -This section will be expanded in a future update. For sequencer-related troubleshooting, see [Troubleshooting Methodology](/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology). +This section will be expanded in a future update. For sequencer-related troubleshooting, see [Troubleshooting Methodology](/global-synchronizer/troubleshooting-guide/troubleshooting-methodology). ## Mediator Errors -This section will be expanded in a future update. For mediator-related troubleshooting, see [Troubleshooting Methodology](/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology). +This section will be expanded in a future update. For mediator-related troubleshooting, see [Troubleshooting Methodology](/global-synchronizer/troubleshooting-guide/troubleshooting-methodology). ## ACS Commitment Errors @@ -3383,5 +3383,5 @@ ABORTED: Traffic balance below reserved traffic amount (0 < 200000) ## Error escalation -For the decision path on how to triage and escalate errors, see [Error Escalation](/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology#error-escalation) in the troubleshooting methodology. +For the decision path on how to triage and escalate errors, see [Error Escalation](/global-synchronizer/troubleshooting-guide/troubleshooting-methodology#error-escalation) in the troubleshooting methodology. diff --git a/docs-main/global-synchronizer/reference/metrics-reference.mdx b/docs-main/global-synchronizer/reference/metrics-reference.mdx index 6e86e92df..92e1f5ebb 100644 --- a/docs-main/global-synchronizer/reference/metrics-reference.mdx +++ b/docs-main/global-synchronizer/reference/metrics-reference.mdx @@ -76,7 +76,7 @@ An HTTP 200 response means the node is ready. ### Topology metrics (optional) -This section will be expanded in a future update. Topology metrics track synchronizer membership changes and party-to-participant mappings. For monitoring guidance, see [Performance Optimization](/docs-main/global-synchronizer/production-operations/performance-optimization). +This section will be expanded in a future update. Topology metrics track synchronizer membership changes and party-to-participant mappings. For monitoring guidance, see [Performance Optimization](/global-synchronizer/production-operations/performance-optimization). ## Key participant metrics diff --git a/docs-main/global-synchronizer/reference/splice-apis.mdx b/docs-main/global-synchronizer/reference/splice-apis.mdx index 0de49f1c0..cae627327 100644 --- a/docs-main/global-synchronizer/reference/splice-apis.mdx +++ b/docs-main/global-synchronizer/reference/splice-apis.mdx @@ -79,5 +79,5 @@ All Splice APIs require JWT authentication. Obtain tokens from your OIDC provide ## Next Steps -- [Canton Admin API](/docs-main/global-synchronizer/reference/canton-admin-api) — Canton node administration API +- [Canton Admin API](/global-synchronizer/reference/canton-admin-api) — Canton node administration API - [Ledger API Reference](/api-reference) — Application-facing Ledger API diff --git a/docs-main/global-synchronizer/release-notes/current-release.mdx b/docs-main/global-synchronizer/release-notes/current-release.mdx index 572bdef01..8b4ef2ee7 100644 --- a/docs-main/global-synchronizer/release-notes/current-release.mdx +++ b/docs-main/global-synchronizer/release-notes/current-release.mdx @@ -58,4 +58,4 @@ If you are upgrading from 0.5.7 or earlier, skip 0.5.7 and go directly to at lea Version 0.5.8 breaks backwards compatibility with migration dumps taken on 0.4.x versions. Deploy with `migrating: false` (Helm) or without `-M` (Docker Compose). -For the full upgrade procedure, see [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures). +For the full upgrade procedure, see [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures). diff --git a/docs-main/global-synchronizer/release-notes/minor-releases.mdx b/docs-main/global-synchronizer/release-notes/minor-releases.mdx index 95ec24980..189686436 100644 --- a/docs-main/global-synchronizer/release-notes/minor-releases.mdx +++ b/docs-main/global-synchronizer/release-notes/minor-releases.mdx @@ -3,7 +3,7 @@ title: "Minor Releases" description: "Release notes for Splice 0.5.x minor releases" --- -This page covers the minor releases in the current Splice 0.5.x series. Each release corresponds to a Canton 3.4.x version. For the current release, see [Current Release](/docs-main/global-synchronizer/release-notes/current-release). +This page covers the minor releases in the current Splice 0.5.x series. Each release corresponds to a Canton 3.4.x version. For the current release, see [Current Release](/global-synchronizer/release-notes/current-release). ## Splice 0.5.8 @@ -177,4 +177,4 @@ This upgrade requires a synchronizer migration with downtime and cannot be appli {/* COPIED_END */} -For the complete release history including the 0.4.x series, see [Release History](/docs-main/global-synchronizer/release-notes/release-history). +For the complete release history including the 0.4.x series, see [Release History](/global-synchronizer/release-notes/release-history). diff --git a/docs-main/global-synchronizer/release-notes/release-history.mdx b/docs-main/global-synchronizer/release-notes/release-history.mdx index 9db20d07f..b5750759c 100644 --- a/docs-main/global-synchronizer/release-notes/release-history.mdx +++ b/docs-main/global-synchronizer/release-notes/release-history.mdx @@ -3,7 +3,7 @@ title: "Release History" description: "Complete release history of Global Synchronizer software versions" --- -This page provides a high-level overview of all Global Synchronizer software releases. For detailed notes on recent releases, see [Current Release](/docs-main/global-synchronizer/release-notes/current-release) and [Minor Releases](/docs-main/global-synchronizer/release-notes/minor-releases). +This page provides a high-level overview of all Global Synchronizer software releases. For detailed notes on recent releases, see [Current Release](/global-synchronizer/release-notes/current-release) and [Minor Releases](/global-synchronizer/release-notes/minor-releases). ## Splice 0.5.x (Canton 3.4.x) diff --git a/docs-main/global-synchronizer/release-notes/release-notes.mdx b/docs-main/global-synchronizer/release-notes/release-notes.mdx index 0f0e45224..78ef8870c 100644 --- a/docs-main/global-synchronizer/release-notes/release-notes.mdx +++ b/docs-main/global-synchronizer/release-notes/release-notes.mdx @@ -59,10 +59,10 @@ If you are upgrading from 0.5.7 or earlier, skip 0.5.7 and go directly to at lea Version 0.5.8 breaks backwards compatibility with migration dumps taken on 0.4.x versions. Deploy with `migrating: false` (Helm) or without `-M` (Docker Compose). -For the full upgrade procedure, see [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures). +For the full upgrade procedure, see [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures). -This page covers the minor releases in the current Splice 0.5.x series. Each release corresponds to a Canton 3.4.x version. For the current release, see [Current Release](/docs-main/global-synchronizer/release-notes/current-release). +This page covers the minor releases in the current Splice 0.5.x series. Each release corresponds to a Canton 3.4.x version. For the current release, see [Current Release](/global-synchronizer/release-notes/current-release). ## Splice 0.5.8 @@ -236,7 +236,7 @@ This upgrade requires a synchronizer migration with downtime and cannot be appli {/* COPIED_END */} -For the complete release history including the 0.4.x series, see [Release History](/docs-main/global-synchronizer/release-notes/release-history). +For the complete release history including the 0.4.x series, see [Release History](/global-synchronizer/release-notes/release-history). The Global Synchronizer software follows a release cadence that includes periodic patch releases addressing bug fixes, performance improvements, and security updates. These releases use semantic versioning within the current 0.5.x series. @@ -271,7 +271,7 @@ Patch releases within the same minor version (0.5.x) are designed to be applied Some patch releases include important caveats. For example, 0.5.7 introduced a performance regression and should be skipped in favor of 0.5.8. Always read the release notes before upgrading. -For the full upgrade procedure, see [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures). +For the full upgrade procedure, see [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures). ## Version skipping @@ -287,10 +287,10 @@ Each Splice release is built against a specific Canton version. The Canton relea | 0.5.0 – 0.5.8 | 3.4.x | | 0.4.x | 3.3.x | -For details on individual releases, see [Current Release](/docs-main/global-synchronizer/release-notes/current-release) and [Minor Releases](/docs-main/global-synchronizer/release-notes/minor-releases). +For details on individual releases, see [Current Release](/global-synchronizer/release-notes/current-release) and [Minor Releases](/global-synchronizer/release-notes/minor-releases). -This page provides a high-level overview of all Global Synchronizer software releases. For detailed notes on recent releases, see [Current Release](/docs-main/global-synchronizer/release-notes/current-release) and [Minor Releases](/docs-main/global-synchronizer/release-notes/minor-releases). +This page provides a high-level overview of all Global Synchronizer software releases. For detailed notes on recent releases, see [Current Release](/global-synchronizer/release-notes/current-release) and [Minor Releases](/global-synchronizer/release-notes/minor-releases). ## Splice 0.5.x (Canton 3.4.x) diff --git a/docs-main/global-synchronizer/release-notes/weekly-patch-releases.mdx b/docs-main/global-synchronizer/release-notes/weekly-patch-releases.mdx index f829dfbdf..fab64a387 100644 --- a/docs-main/global-synchronizer/release-notes/weekly-patch-releases.mdx +++ b/docs-main/global-synchronizer/release-notes/weekly-patch-releases.mdx @@ -35,7 +35,7 @@ Patch releases within the same minor version (0.5.x) are designed to be applied Some patch releases include important caveats. For example, 0.5.7 introduced a performance regression and should be skipped in favor of 0.5.8. Always read the release notes before upgrading. -For the full upgrade procedure, see [Upgrade Procedures](/docs-main/global-synchronizer/production-operations/upgrade-procedures). +For the full upgrade procedure, see [Upgrade Procedures](/global-synchronizer/production-operations/upgrade-procedures). ## Version skipping @@ -51,4 +51,4 @@ Each Splice release is built against a specific Canton version. The Canton relea | 0.5.0 – 0.5.8 | 3.4.x | | 0.4.x | 3.3.x | -For details on individual releases, see [Current Release](/docs-main/global-synchronizer/release-notes/current-release) and [Minor Releases](/docs-main/global-synchronizer/release-notes/minor-releases). +For details on individual releases, see [Current Release](/global-synchronizer/release-notes/current-release) and [Minor Releases](/global-synchronizer/release-notes/minor-releases). diff --git a/docs-main/global-synchronizer/troubleshooting-guide/common-questions.mdx b/docs-main/global-synchronizer/troubleshooting-guide/common-questions.mdx index 946d92064..42b0d6549 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/common-questions.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/common-questions.mdx @@ -12,7 +12,7 @@ import CantonGlobalSynchronizerTroubleshootingGuideCommonQuestionsL43 from "/sni Onboarding to TestNet requires a Super Validator (SV) sponsor. Contact the SV you are working with, or reach out through the `#validator-operations-onboarding` Slack channel to find a sponsor. The sponsor submits a vote to approve your validator. The process typically takes 2-4 weeks. -MainNet onboarding follows the same pattern but with stricter requirements. You need a signed operator agreement and must demonstrate successful operation on TestNet. See the [onboarding process](/docs-main/global-synchronizer/deployment/onboarding-process) documentation for the full checklist. +MainNet onboarding follows the same pattern but with stricter requirements. You need a signed operator agreement and must demonstrate successful operation on TestNet. See the [onboarding process](/global-synchronizer/deployment/onboarding-process) documentation for the full checklist. ### What ports need to be open? diff --git a/docs-main/global-synchronizer/troubleshooting-guide/error-code-reference.mdx b/docs-main/global-synchronizer/troubleshooting-guide/error-code-reference.mdx index 5d38cf323..2fefc2b22 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/error-code-reference.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/error-code-reference.mdx @@ -25,7 +25,7 @@ This page lists the error codes most frequently encountered by validator operato - **Message:** `Insufficient traffic for submission` - **Cause:** Your validator's traffic balance is too low to submit the transaction to the sequencer. -- **Resolution:** Purchase additional traffic via the validator API or enable auto-top-up. See [Transaction Failures](/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures) for details. +- **Resolution:** Purchase additional traffic via the validator API or enable auto-top-up. See [Transaction Failures](/global-synchronizer/troubleshooting-guide/transaction-failures) for details. ## Sequencer Errors @@ -81,7 +81,7 @@ This page lists the error codes most frequently encountered by validator operato - **Message:** `Cannot convert configuration` - **Cause:** A configuration value is missing, empty, or of the wrong type. The error message includes the specific configuration path. -- **Resolution:** Check the path mentioned in the error against your configuration files and environment variables. See [Configuration Problems](/docs-main/global-synchronizer/troubleshooting-guide/configuration-problems). +- **Resolution:** Check the path mentioned in the error against your configuration files and environment variables. See [Configuration Problems](/global-synchronizer/troubleshooting-guide/configuration-problems). ### DB_STORAGE_DEGRADATION diff --git a/docs-main/global-synchronizer/troubleshooting-guide/runbooks.mdx b/docs-main/global-synchronizer/troubleshooting-guide/runbooks.mdx index fc3b3a3ec..165ca1619 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/runbooks.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/runbooks.mdx @@ -14,7 +14,7 @@ Use this template for any incident that does not have a dedicated runbook. 1. **Detect** -- Identify the symptom. How was the issue discovered? (Alert, user report, routine check.) 2. **Assess** -- Determine scope and severity. Is the validator offline? Are transactions failing? Is data at risk? -3. **Act** -- Execute the appropriate fix. Follow the relevant runbook below or the [troubleshooting methodology](/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology). +3. **Act** -- Execute the appropriate fix. Follow the relevant runbook below or the [troubleshooting methodology](/global-synchronizer/troubleshooting-guide/troubleshooting-methodology). 4. **Verify** -- Confirm the fix. Check health endpoints, submit a test transaction, review logs for recurring errors. 5. **Document** -- Record what happened, what caused it, what was done, and any follow-up actions. diff --git a/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx b/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx index b613d0c39..49886e859 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx @@ -73,7 +73,7 @@ nc -zv sequencer.sync.global 443 ping -c 3 sequencer.dev.sync.global ``` -TLS issues often surface as connection hangs or handshake failures. See [Connectivity Issues](/docs-main/global-synchronizer/troubleshooting-guide/connectivity-issues) for details. +TLS issues often surface as connection hangs or handshake failures. See [Connectivity Issues](/global-synchronizer/troubleshooting-guide/connectivity-issues) for details. ## Step 4: Check the Database @@ -92,7 +92,7 @@ psql -h $DB_HOST -U $DB_USER -d participant -c " " ``` -If tables are large (tens of GB), enable pruning. See [Performance Issues](/docs-main/global-synchronizer/troubleshooting-guide/performance-issues). +If tables are large (tens of GB), enable pruning. See [Performance Issues](/global-synchronizer/troubleshooting-guide/performance-issues). ## Step 5: Check Canton Console @@ -244,7 +244,7 @@ When sharing logs with support, redact private keys, passwords, and JWT tokens, If self-diagnosis does not resolve the issue, escalate in this order: -- **Self-service documentation** -- search this troubleshooting guide and the [cheat sheet](/docs-main/global-synchronizer/troubleshooting) +- **Self-service documentation** -- search this troubleshooting guide and the [cheat sheet](/global-synchronizer/troubleshooting) - **Community Slack channels** -- post in `#validator-operations` or `#gsf-global-synchronizer-appdev` with your error message, logs (redacted), and environment details - **Email support** -- contact [da-support@digitalasset.com](mailto:da-support@digitalasset.com) for best-effort discretionary support - **Paid support with SLAs** -- contact [support@digitalasset.com](mailto:support@digitalasset.com), which opens a tracked Jira ticket diff --git a/docs-main/global-synchronizer/understand/infrastructure-requirements.mdx b/docs-main/global-synchronizer/understand/infrastructure-requirements.mdx index 75168d06f..86bff4886 100644 --- a/docs-main/global-synchronizer/understand/infrastructure-requirements.mdx +++ b/docs-main/global-synchronizer/understand/infrastructure-requirements.mdx @@ -225,11 +225,11 @@ Canton Network requires egress IP registration: - + Understand your responsibilities as a validator. - + Begin deploying your validator node. diff --git a/docs-main/global-synchronizer/understand/introduction.mdx b/docs-main/global-synchronizer/understand/introduction.mdx index 513404166..8b124f8e3 100644 --- a/docs-main/global-synchronizer/understand/introduction.mdx +++ b/docs-main/global-synchronizer/understand/introduction.mdx @@ -146,7 +146,7 @@ Validators must keep pace with network upgrades. Falling behind versions can res Before deploying a validator, ensure you have: 1. **Sponsorship**: A Super Validator must sponsor your onboarding -2. **Infrastructure**: Meet the [infrastructure requirements](/docs-main/global-synchronizer/understand/infrastructure-requirements) +2. **Infrastructure**: Meet the [infrastructure requirements](/global-synchronizer/understand/infrastructure-requirements) 3. **Technical capacity**: Team capable of operating containerized services 4. **Canton Coin**: Budget for traffic fees (TestNet/MainNet) @@ -187,11 +187,11 @@ The Global Synchronizer handles: - + Hardware, software, and network requirements. - + Understand your responsibilities as a validator. diff --git a/docs-main/global-synchronizer/understand/validator-roles.mdx b/docs-main/global-synchronizer/understand/validator-roles.mdx index 11b43e972..9ac9bcde8 100644 --- a/docs-main/global-synchronizer/understand/validator-roles.mdx +++ b/docs-main/global-synchronizer/understand/validator-roles.mdx @@ -208,7 +208,7 @@ Operating a validator involves several cost categories: ### Prerequisites 1. **Technical capacity**: Team capable of operating containerized services -2. **Infrastructure**: Meet [infrastructure requirements](/docs-main/global-synchronizer/understand/infrastructure-requirements) +2. **Infrastructure**: Meet [infrastructure requirements](/global-synchronizer/understand/infrastructure-requirements) 3. **Sponsorship**: Super Validator willing to sponsor 4. **Canton Coin**: Budget for traffic fees @@ -224,11 +224,11 @@ Operating a validator involves several cost categories: - + Begin deploying your validator node. - + Review detailed infrastructure requirements. diff --git a/docs-main/integrations/apps/finding-apps.mdx b/docs-main/integrations/apps/finding-apps.mdx index 01d2b2dd8..2f002d556 100644 --- a/docs-main/integrations/apps/finding-apps.mdx +++ b/docs-main/integrations/apps/finding-apps.mdx @@ -15,11 +15,11 @@ If you don't find what you need, consider building it yourself: - + Build your own Canton application. - + How to list your application. diff --git a/docs-main/integrations/ecosystem.mdx b/docs-main/integrations/ecosystem.mdx index e1ee3fba3..8b4580349 100644 --- a/docs-main/integrations/ecosystem.mdx +++ b/docs-main/integrations/ecosystem.mdx @@ -137,14 +137,14 @@ For current network statistics, visit: ### As a Validator -1. Review [infrastructure requirements](/docs-main/global-synchronizer/understand/infrastructure-requirements) +1. Review [infrastructure requirements](/global-synchronizer/understand/infrastructure-requirements) 2. Contact a [Super Validator](https://canton.foundation) for sponsorship 3. Complete the onboarding process 4. Begin operations ### As a Developer -1. Start with the [QuickStart](/docs-main/appdev/quickstart) +1. Start with the [QuickStart](/appdev/quickstart) 2. Learn [Daml](https://docs.daml.com) 3. Build and deploy your application 4. Join the developer community {/* TODO: Add Slack link once available */} @@ -179,11 +179,11 @@ Check [canton.network](https://canton.network) for upcoming events. - + Learn common integration approaches. - + Begin developing on Canton Network. diff --git a/docs-main/integrations/exchanges/sdk-download.mdx b/docs-main/integrations/exchanges/sdk-download.mdx index ff4fcd800..856f079dd 100644 --- a/docs-main/integrations/exchanges/sdk-download.mdx +++ b/docs-main/integrations/exchanges/sdk-download.mdx @@ -67,4 +67,4 @@ The exchange integration guide is structured as incremental milestones: ## Next Steps -- [Exchange Integration Guidance](/docs-main/integrations/exchanges/guidance) — Workflows, architecture, and deployment +- [Exchange Integration Guidance](/integrations/exchanges/guidance) — Workflows, architecture, and deployment diff --git a/docs-main/integrations/integration-patterns.mdx b/docs-main/integrations/integration-patterns.mdx index 62c85584b..2ff5a1cd3 100644 --- a/docs-main/integrations/integration-patterns.mdx +++ b/docs-main/integrations/integration-patterns.mdx @@ -262,11 +262,11 @@ async function submitWithRetry(command: Command, maxRetries = 3): Promise - + Detailed wallet integration guide. - + Implement the Canton Token Standard. diff --git a/docs-main/integrations/overview.mdx b/docs-main/integrations/overview.mdx index 22b943c1b..6850df808 100644 --- a/docs-main/integrations/overview.mdx +++ b/docs-main/integrations/overview.mdx @@ -20,11 +20,11 @@ Canton Network provides several categories of integrations: - + Discover wallet options for managing your Canton Coin. - + Explore applications built on Canton Network. @@ -34,11 +34,11 @@ Canton Network provides several categories of integrations: - + Add wallet functionality to your application using the Wallet SDK. - + Connect exchanges and liquidity providers. @@ -61,11 +61,11 @@ Unlike blockchain explorers on Ethereum that show all transactions, Canton explo - + Common patterns for building integrations. - + Explore the broader Canton Network ecosystem. diff --git a/docs-main/integrations/wallet/configuration.mdx b/docs-main/integrations/wallet/configuration.mdx index 510d901f1..b9e0d56a0 100644 --- a/docs-main/integrations/wallet/configuration.mdx +++ b/docs-main/integrations/wallet/configuration.mdx @@ -76,5 +76,5 @@ See the [config template](https://github.com/hyperledger-labs/splice-wallet-kern ## Next Steps -- [Wallet SDK Download](/docs-main/integrations/wallet/sdk-download) — Installation instructions -- [Wallet Integration Guidance](/docs-main/integrations/wallet/guidance) — Signing transactions from dApps +- [Wallet SDK Download](/integrations/wallet/sdk-download) — Installation instructions +- [Wallet Integration Guidance](/integrations/wallet/guidance) — Signing transactions from dApps diff --git a/docs-main/integrations/wallet/sdk-download.mdx b/docs-main/integrations/wallet/sdk-download.mdx index 66451f31f..9f8ba7667 100644 --- a/docs-main/integrations/wallet/sdk-download.mdx +++ b/docs-main/integrations/wallet/sdk-download.mdx @@ -48,5 +48,5 @@ The Wallet SDK includes: ## Next Steps -- [Wallet Configuration](/docs-main/integrations/wallet/configuration) — Configure the SDK for your environment -- [Wallet Integration Guidance](/docs-main/integrations/wallet/guidance) — Signing transactions from dApps +- [Wallet Configuration](/integrations/wallet/configuration) — Configure the SDK for your environment +- [Wallet Integration Guidance](/integrations/wallet/guidance) — Signing transactions from dApps diff --git a/docs-main/integrations/wallets/canton-vs-web3.mdx b/docs-main/integrations/wallets/canton-vs-web3.mdx index e8796bc9c..eceac8ee5 100644 --- a/docs-main/integrations/wallets/canton-vs-web3.mdx +++ b/docs-main/integrations/wallets/canton-vs-web3.mdx @@ -208,11 +208,11 @@ There's no equivalent of Etherscan showing all network transactions. This is by - + Integrate wallet functionality into your app. - + Understand the Canton Token Standard. diff --git a/docs-main/integrations/wallets/for-users.mdx b/docs-main/integrations/wallets/for-users.mdx index 3cef198bb..473d734ca 100644 --- a/docs-main/integrations/wallets/for-users.mdx +++ b/docs-main/integrations/wallets/for-users.mdx @@ -156,11 +156,11 @@ If you're running an application that needs traffic: - + Understand the technical differences. - + Discover Canton Network applications. diff --git a/docs-main/overview/learn/architecture.mdx b/docs-main/overview/learn/architecture.mdx index b8a99b2de..8a966e50a 100644 --- a/docs-main/overview/learn/architecture.mdx +++ b/docs-main/overview/learn/architecture.mdx @@ -305,6 +305,6 @@ flowchart TB ## Next Steps -- **[Privacy Model Explained](/docs-main/overview/learn/privacy-model)** - Deep dive into sub-transaction privacy -- **[The Global Synchronizer](/docs-main/overview/understand/global-synchronizer)** - Understand the public network infrastructure -- **[Validator Operations](/docs-main/global-synchronizer/understand/introduction)** - For those deploying and operating validators +- **[Privacy Model Explained](/overview/learn/privacy-model)** - Deep dive into sub-transaction privacy +- **[The Global Synchronizer](/overview/understand/global-synchronizer)** - Understand the public network infrastructure +- **[Validator Operations](/global-synchronizer/understand/introduction)** - For those deploying and operating validators diff --git a/docs-main/overview/learn/how-transactions-work.mdx b/docs-main/overview/learn/how-transactions-work.mdx index 6d04c2ae2..1875faf67 100644 --- a/docs-main/overview/learn/how-transactions-work.mdx +++ b/docs-main/overview/learn/how-transactions-work.mdx @@ -69,6 +69,6 @@ Throughout this process, the synchronizer never saw the asset details, the trans ## Further Reading -- [Architecture Overview](/docs-main/overview/learn/architecture) -- How validators, synchronizers, and applications fit together -- [Privacy Model Explained](/docs-main/overview/learn/privacy-model) -- Detailed explanation of sub-transaction privacy and view decomposition -- [Two-Layer Consensus](/docs-main/overview/learn/two-layer-consensus) -- The consensus protocol in more technical detail +- [Architecture Overview](/overview/learn/architecture) -- How validators, synchronizers, and applications fit together +- [Privacy Model Explained](/overview/learn/privacy-model) -- Detailed explanation of sub-transaction privacy and view decomposition +- [Two-Layer Consensus](/overview/learn/two-layer-consensus) -- The consensus protocol in more technical detail diff --git a/docs-main/overview/learn/ledger-model.mdx b/docs-main/overview/learn/ledger-model.mdx index 1d36b04a6..0db7a20ff 100644 --- a/docs-main/overview/learn/ledger-model.mdx +++ b/docs-main/overview/learn/ledger-model.mdx @@ -225,7 +225,7 @@ Canton uses **ledger time** for contract operations. Time is: -See [Working with Time](/docs-main/appdev/modules/m3-working-with-time) for the full set of time primitives available in Canton 3.x. +See [Working with Time](/appdev/modules/m3-working-with-time) for the full set of time primitives available in Canton 3.x. ## Composability @@ -237,6 +237,6 @@ The ledger enforces this atomicity — either the entire swap commits or none of ## Related Topics -- [Contract Templates](/docs-main/appdev/modules/m3-contract-templates) — write your first Daml contracts -- [Choices](/docs-main/appdev/modules/m3-choices) — add behavior to contracts -- [Privacy Model](/docs-main/overview/learn/privacy-model) — how views enable privacy +- [Contract Templates](/appdev/modules/m3-contract-templates) — write your first Daml contracts +- [Choices](/appdev/modules/m3-choices) — add behavior to contracts +- [Privacy Model](/overview/learn/privacy-model) — how views enable privacy diff --git a/docs-main/overview/learn/privacy-model.mdx b/docs-main/overview/learn/privacy-model.mdx index 86b77b07f..2ccb695fe 100644 --- a/docs-main/overview/learn/privacy-model.mdx +++ b/docs-main/overview/learn/privacy-model.mdx @@ -223,6 +223,6 @@ When designing Canton applications, ask: ## Next Steps -- **[The Global Synchronizer](/docs-main/overview/understand/global-synchronizer)** - Understand the public network infrastructure -- **[Developer Track Module 3: Daml Development](/docs-main/appdev/modules/m3-dev-environment)** - Apply privacy patterns in code -- **[Glossary](/docs-main/overview/understand/glossary)** - Terminology reference including privacy-related terms +- **[The Global Synchronizer](/overview/understand/global-synchronizer)** - Understand the public network infrastructure +- **[Developer Track Module 3: Daml Development](/appdev/modules/m3-dev-environment)** - Apply privacy patterns in code +- **[Glossary](/overview/understand/glossary)** - Terminology reference including privacy-related terms diff --git a/docs-main/overview/learn/trust-model.mdx b/docs-main/overview/learn/trust-model.mdx index a1d8ce74c..1b8c9737a 100644 --- a/docs-main/overview/learn/trust-model.mdx +++ b/docs-main/overview/learn/trust-model.mdx @@ -135,6 +135,6 @@ In a traditional blockchain, everyone sees every transaction and every node vali ## Related Topics -- [Two-Layer Consensus](/docs-main/overview/learn/two-layer-consensus) — how the consensus layers interact -- [Architecture Overview](/docs-main/overview/learn/architecture) — component responsibilities -- [Privacy Model](/docs-main/overview/learn/privacy-model) — what each party can see +- [Two-Layer Consensus](/overview/learn/two-layer-consensus) — how the consensus layers interact +- [Architecture Overview](/overview/learn/architecture) — component responsibilities +- [Privacy Model](/overview/learn/privacy-model) — what each party can see diff --git a/docs-main/overview/learn/two-layer-consensus.mdx b/docs-main/overview/learn/two-layer-consensus.mdx index 479cb09d3..eda437437 100644 --- a/docs-main/overview/learn/two-layer-consensus.mdx +++ b/docs-main/overview/learn/two-layer-consensus.mdx @@ -144,6 +144,6 @@ The flow begins in the ordering layer: a validator submits an encrypted transact ## Related Topics -- [Trust Model Overview](/docs-main/overview/learn/trust-model) — trust assumptions across each layer -- [Architecture Overview](/docs-main/overview/learn/architecture) — how components fit together -- [Privacy Model](/docs-main/overview/learn/privacy-model) — sub-transaction privacy in detail +- [Trust Model Overview](/overview/learn/trust-model) — trust assumptions across each layer +- [Architecture Overview](/overview/learn/architecture) — how components fit together +- [Privacy Model](/overview/learn/privacy-model) — sub-transaction privacy in detail diff --git a/docs-main/overview/reference/canton-coin-tokenomics.mdx b/docs-main/overview/reference/canton-coin-tokenomics.mdx index 6a504c22c..15093ce2b 100644 --- a/docs-main/overview/reference/canton-coin-tokenomics.mdx +++ b/docs-main/overview/reference/canton-coin-tokenomics.mdx @@ -5,7 +5,7 @@ description: "Technical reference for Canton Coin fees, minting rounds, activity Canton Coin (CC) is the native utility token of the Global Synchronizer. It is implemented through the [Splice](https://github.com/hyperledger-labs/splice) open-source infrastructure, where it is referred to as "Amulet" at the Daml contract level. CC serves three functions: paying for network usage (traffic), rewarding infrastructure operators and application providers, and governing the network through Super Validator participation. -For background on CC's role in the network and how to obtain it, see [Canton Coin and the Global Synchronizer](/docs-main/overview/understand/canton-coin). The [Canton Coin white paper](https://www.digitalasset.com/hubfs/Canton%20Network%20Files/Documents%20(whitepapers%2c%20etc...)/Canton%20Coin_%20A%20Canton-Network-native%20payment%20application.pdf) provides the full formal specification. +For background on CC's role in the network and how to obtain it, see [Canton Coin and the Global Synchronizer](/overview/understand/canton-coin). The [Canton Coin white paper](https://www.digitalasset.com/hubfs/Canton%20Network%20Files/Documents%20(whitepapers%2c%20etc...)/Canton%20Coin_%20A%20Canton-Network-native%20payment%20application.pdf) provides the full formal specification. ## Fee Structure @@ -108,7 +108,7 @@ For API details, see the [CIP-0056 text](https://github.com/global-synchronizer- ## Related Resources -- [Canton Coin and the Global Synchronizer](/docs-main/overview/understand/canton-coin) -- conceptual overview and how to obtain CC +- [Canton Coin and the Global Synchronizer](/overview/understand/canton-coin) -- conceptual overview and how to obtain CC - [CIP-0078 (CC Fee Removal)](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0078/cip-0078.md) -- the proposal that eliminated transfer and lock fees - [CIP-0056 (CN Token Standard)](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0056/cip-0056.md) -- standard interfaces for token operations - [CIP-0073 (Weighted Validator Liveness Rewards)](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0073/cip-0073.md) -- liveness reward support for SV-determined parties diff --git a/docs-main/overview/reference/canton-protocol-specification.mdx b/docs-main/overview/reference/canton-protocol-specification.mdx index aa52eb9d8..05196a71b 100644 --- a/docs-main/overview/reference/canton-protocol-specification.mdx +++ b/docs-main/overview/reference/canton-protocol-specification.mdx @@ -3,7 +3,7 @@ title: "Canton Protocol Specification" description: "Technical specification of the Canton protocol architecture, covering consensus layers, transaction processing, and topology management" --- -This section provides the full technical specification of the Canton protocol. Where the [Learn](/docs-main/overview/learn/architecture) pages introduce concepts at a high level, these reference pages describe the protocol mechanics in detail — the data structures, trust assumptions, message flows, and formal properties that underpin Canton Network. +This section provides the full technical specification of the Canton protocol. Where the [Learn](/overview/learn/architecture) pages introduce concepts at a high level, these reference pages describe the protocol mechanics in detail — the data structures, trust assumptions, message flows, and formal properties that underpin Canton Network. ## Protocol Architecture @@ -25,19 +25,19 @@ Participants and mediators never communicate directly. All messages flow through ## Reference Pages - + The extended UTXO model: templates, stakeholders, choices, transaction structure, views, and witnesses. - + Proof of Stakeholder validation, privacy-preserving consensus, and trust domain comparisons. - + Sequencer and mediator architecture, BFT ordering service, and the ISS-inspired consensus protocol. - + The complete five-phase lifecycle from preparation through commit. - + Namespace management, cryptographic keys, party-to-participant mappings, and topology transactions. @@ -62,4 +62,4 @@ A Daml transaction moves through both consensus layers during its lifecycle: 4. Each confirming participant validates its views and sends a confirmation or rejection to the mediator (smart contract layer, via ordering layer) 5. The mediator aggregates confirmations and issues a verdict, within the required time window, which the sequencer distributes to all participants (ordering layer) -The [Transaction Lifecycle](/docs-main/overview/reference/transaction-lifecycle) page covers each phase in full detail. +The [Transaction Lifecycle](/overview/reference/transaction-lifecycle) page covers each phase in full detail. diff --git a/docs-main/overview/reference/cip-0056.mdx b/docs-main/overview/reference/cip-0056.mdx index 33b6e5d7e..1534d27d4 100644 --- a/docs-main/overview/reference/cip-0056.mdx +++ b/docs-main/overview/reference/cip-0056.mdx @@ -68,7 +68,7 @@ The `AllocationView` contains: - `meta` -- extensibility metadata {/* COPIED_END */} -You create allocations by fetching an `AllocationFactory` from the registry and exercising `AllocationFactory_Allocate`. Use the [Token Standard API](/docs-main/overview/reference/splice-wallet-reference#wallet-api-endpoints) to discover and interact with allocation contracts. The settlement's `executor` party, along with the sender and receiver, jointly control the allocation. Three choices govern its lifecycle: +You create allocations by fetching an `AllocationFactory` from the registry and exercising `AllocationFactory_Allocate`. Use the [Token Standard API](/overview/reference/splice-wallet-reference#wallet-api-endpoints) to discover and interact with allocation contracts. The settlement's `executor` party, along with the sender and receiver, jointly control the allocation. Three choices govern its lifecycle: - `Allocation_ExecuteTransfer` -- execute the transfer of allocated assets (the settlement step) - `Allocation_Cancel` -- release the allocation early, controlled jointly by sender, receiver, and executor @@ -136,7 +136,7 @@ To execute a factory choice, you first query the registry's off-ledger API to ob - `disclosedContracts` (required for the exercise to succeed) - `choiceContextData` (passed as `context` in the choice argument) -Registry URLs for a given instrument are currently maintained by wallets themselves. A generic discovery mechanism based on the [Canton Name Service](/docs-main/overview/reference/canton-name-service) is planned, where registry URLs would be stored in CNS entry metadata and retrievable through the Scan API. +Registry URLs for a given instrument are currently maintained by wallets themselves. A generic discovery mechanism based on the [Canton Name Service](/overview/reference/canton-name-service) is planned, where registry URLs would be stored in CNS entry metadata and retrievable through the Scan API. The off-ledger APIs do not require authentication. Instead, the security model relies on the unguessability of contract IDs and Canton's need-to-know data distribution -- only parties entitled to see a contract receive it. @@ -170,4 +170,4 @@ The [Token Standard CLI](https://github.com/hyperledger-labs/splice/tree/main/to - [Full CIP-0056 text](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0056/cip-0056.md) - [Token standard source code](https://github.com/hyperledger-labs/splice/tree/main/token-standard) - [Splice wallet kernel](https://github.com/hyperledger-labs/splice-wallet-kernel) -- [Canton Coin Tokenomics](/docs-main/overview/reference/canton-coin-tokenomics) -- fee structure and UTXO dust expiry for CC specifically +- [Canton Coin Tokenomics](/overview/reference/canton-coin-tokenomics) -- fee structure and UTXO dust expiry for CC specifically diff --git a/docs-main/overview/reference/cip-index.mdx b/docs-main/overview/reference/cip-index.mdx index 238a1b4bc..3aa4e147d 100644 --- a/docs-main/overview/reference/cip-index.mdx +++ b/docs-main/overview/reference/cip-index.mdx @@ -3,7 +3,7 @@ title: "CIP Index" description: "Reference index of Canton Improvement Proposals (CIPs) by type, number, and status" --- -This page catalogs Canton Improvement Proposals across all types. The canonical source of truth is the [CIP GitHub repository](https://github.com/global-synchronizer-foundation/cips), where each CIP has its own directory containing the full proposal text. For background on what CIPs are and how they work, see [What are CIPs?](/docs-main/overview/reference/what-are-cips). To submit a new proposal, see the [CIP Reference](/docs-main/overview/reference/what-are-cips#how-to-propose-a-cip). +This page catalogs Canton Improvement Proposals across all types. The canonical source of truth is the [CIP GitHub repository](https://github.com/global-synchronizer-foundation/cips), where each CIP has its own directory containing the full proposal text. For background on what CIPs are and how they work, see [What are CIPs?](/overview/reference/what-are-cips). To submit a new proposal, see the [CIP Reference](/overview/reference/what-are-cips#how-to-propose-a-cip). ## Standards Track CIPs diff --git a/docs-main/overview/reference/cross-sync-dvp-example.mdx b/docs-main/overview/reference/cross-sync-dvp-example.mdx index 25bab9748..ea571a887 100644 --- a/docs-main/overview/reference/cross-sync-dvp-example.mdx +++ b/docs-main/overview/reference/cross-sync-dvp-example.mdx @@ -109,5 +109,5 @@ has the possibility of a contract being temporarily unusable, but they do not ca ## Further Reading -- [Reassignment Protocol](/docs-main/overview/reference/reassignment-protocol) -- detailed protocol mechanics for unassignment and assignment -- [Architecture Overview](/docs-main/overview/learn/architecture) -- how synchronizers, validators, and the Global Synchronizer fit together +- [Reassignment Protocol](/overview/reference/reassignment-protocol) -- detailed protocol mechanics for unassignment and assignment +- [Architecture Overview](/overview/learn/architecture) -- how synchronizers, validators, and the Global Synchronizer fit together diff --git a/docs-main/overview/reference/gsf-policies.mdx b/docs-main/overview/reference/gsf-policies.mdx index ed123642d..5b9a78591 100644 --- a/docs-main/overview/reference/gsf-policies.mdx +++ b/docs-main/overview/reference/gsf-policies.mdx @@ -44,7 +44,7 @@ Premier members additionally have access to Board Committees and can appoint del The Global Synchronizer is operated by independently acting organizations called Super Validators. These organizations run the core infrastructure -- sequencers, mediators, and SV application nodes -- and participate in governance through an on-chain governance application. -Governance actions are executed through the DSO (Decentralized Synchronizer Operations) party, a decentralized Daml party with a confirmation threshold of approximately 2/3 of onboarded Super Validators. No single entity, including the GSF, can make unilateral changes. For a detailed breakdown of the DSO party, confirmation protocols, and voting mechanics, see [SV Governance Reference](/docs-main/overview/reference/sv-governance-reference). +Governance actions are executed through the DSO (Decentralized Synchronizer Operations) party, a decentralized Daml party with a confirmation threshold of approximately 2/3 of onboarded Super Validators. No single entity, including the GSF, can make unilateral changes. For a detailed breakdown of the DSO party, confirmation protocols, and voting mechanics, see [SV Governance Reference](/overview/reference/sv-governance-reference). The governance framework covers: @@ -108,7 +108,7 @@ Super Validators that do not upgrade in time can cause operational issues. For e Changes to network rules, standards, and protocols are proposed through Canton Improvement Proposals (CIPs). The CIP process provides a structured way for anyone in the ecosystem to propose changes, with final ratification by Super Validator vote. -For details on the CIP process, including how to propose one, see [CIP Reference](/docs-main/overview/reference/what-are-cips). The full list of CIPs is maintained at [github.com/global-synchronizer-foundation/cips](https://github.com/global-synchronizer-foundation/cips). +For details on the CIP process, including how to propose one, see [CIP Reference](/overview/reference/what-are-cips). The full list of CIPs is maintained at [github.com/global-synchronizer-foundation/cips](https://github.com/global-synchronizer-foundation/cips). ## Communication channels @@ -124,4 +124,4 @@ The GSF maintains several channels for validator operators and ecosystem partici - [Canton Network](https://canton.network) -- network overview and entry point - [GSF configs repository](https://github.com/global-synchronizer-foundation/configs) -- network configuration parameters - [CIP repository](https://github.com/global-synchronizer-foundation/cips) -- Canton Improvement Proposals -- [SV Governance Reference](/docs-main/overview/reference/sv-governance-reference) -- technical details on the DSO party and voting mechanics +- [SV Governance Reference](/overview/reference/sv-governance-reference) -- technical details on the DSO party and voting mechanics diff --git a/docs-main/overview/reference/ordering-consensus.mdx b/docs-main/overview/reference/ordering-consensus.mdx index 881ab1a14..2c8c78237 100644 --- a/docs-main/overview/reference/ordering-consensus.mdx +++ b/docs-main/overview/reference/ordering-consensus.mdx @@ -3,7 +3,7 @@ title: "Ordering Consensus" description: "Synchronizer ordering architecture — sequencers, mediators, BFT ordering service, and the ISS-inspired consensus protocol" --- -The ordering consensus layer is one half of Canton's [two-layer consensus architecture](/docs-main/overview/reference/canton-protocol-specification). While the smart contract consensus layer validates transaction correctness among affected parties, the ordering layer establishes a single, global sequence of events on a given synchronizer. It does so without accessing transaction content — payloads remain encrypted end-to-end, and the ordering infrastructure sees only metadata. +The ordering consensus layer is one half of Canton's [two-layer consensus architecture](/overview/reference/canton-protocol-specification). While the smart contract consensus layer validates transaction correctness among affected parties, the ordering layer establishes a single, global sequence of events on a given synchronizer. It does so without accessing transaction content — payloads remain encrypted end-to-end, and the ordering infrastructure sees only metadata. ## Synchronizer Components diff --git a/docs-main/overview/reference/smart-contract-consensus.mdx b/docs-main/overview/reference/smart-contract-consensus.mdx index 9ae7b1c8e..7cd2f72e1 100644 --- a/docs-main/overview/reference/smart-contract-consensus.mdx +++ b/docs-main/overview/reference/smart-contract-consensus.mdx @@ -5,7 +5,7 @@ description: "Proof of Stakeholder consensus in Canton — peer-to-peer validati Canton's smart contract consensus layer determines *who validates a transaction* and *how their confirmations produce a binding result*. The mechanism is called **Proof of Stakeholder**: only the participant nodes hosting parties with a stake in the affected contracts participate in validation and confirmation. No global replication of state or computation is required. -This page specifies the validation logic, confirmation policies, and security properties of the smart contract consensus layer. For transaction ordering and sequencer mechanics, see [Ordering Consensus](/docs-main/overview/reference/ordering-consensus). For the end-to-end message flow, see [Transaction Lifecycle](/docs-main/overview/reference/transaction-lifecycle). +This page specifies the validation logic, confirmation policies, and security properties of the smart contract consensus layer. For transaction ordering and sequencer mechanics, see [Ordering Consensus](/overview/reference/ordering-consensus). For the end-to-end message flow, see [Transaction Lifecycle](/overview/reference/transaction-lifecycle). ## Proof of Stakeholder @@ -18,7 +18,7 @@ Stakeholder-scoped validation produces two properties simultaneously: - **Privacy**: non-stakeholders receive no information about the transaction — the payload content remains private. - **Scalability**: the validation workload for any given transaction is bounded by the number of stakeholders, not by the total number of nodes in the network. -The ordering layer (sequencer and mediator) handles global consistency — preventing double-spends and establishing a total order of state changes. The smart contract consensus layer handles correctness — ensuring the Daml logic was executed faithfully and authorization rules were satisfied. This separation is what the [Canton Protocol Specification](/docs-main/overview/reference/canton-protocol-specification) calls two-layer consensus. +The ordering layer (sequencer and mediator) handles global consistency — preventing double-spends and establishing a total order of state changes. The smart contract consensus layer handles correctness — ensuring the Daml logic was executed faithfully and authorization rules were satisfied. This separation is what the [Canton Protocol Specification](/overview/reference/canton-protocol-specification) calls two-layer consensus. ## Peer-to-Peer Validation @@ -125,6 +125,6 @@ Proof of Stakeholder provides the following guarantees, assuming the underlying **Atomicity.** The mediator's verdict is all-or-nothing. Either every action in the transaction commits, or none does. There is no partial commit state. -**Liveness.** Transaction progress depends on the availability of the sequencer (for message ordering and distribution), the mediator (for verdict aggregation), and sufficiently many confirming participants (to meet each signatory's confirmation threshold). If any of these components are unavailable beyond the protocol's timeout, the transaction aborts. Liveness of the ordering layer is analyzed in [Ordering Consensus](/docs-main/overview/reference/ordering-consensus). +**Liveness.** Transaction progress depends on the availability of the sequencer (for message ordering and distribution), the mediator (for verdict aggregation), and sufficiently many confirming participants (to meet each signatory's confirmation threshold). If any of these components are unavailable beyond the protocol's timeout, the transaction aborts. Liveness of the ordering layer is analyzed in [Ordering Consensus](/overview/reference/ordering-consensus). **Consistency.** Double-spend protection operates at two levels. Each confirming participant checks consumed contracts against its local ACS. The sequencer's total ordering ensures that conflicting transactions are sequenced, so participants observe a consistent history. If two transactions attempt to consume the same contract, the one sequenced second will find the contract already archived and reject. diff --git a/docs-main/overview/reference/splice-wallet-reference.mdx b/docs-main/overview/reference/splice-wallet-reference.mdx index e007a1769..19c38d1ef 100644 --- a/docs-main/overview/reference/splice-wallet-reference.mdx +++ b/docs-main/overview/reference/splice-wallet-reference.mdx @@ -111,4 +111,4 @@ The high-level flow for an external party: For each transfer, the caller must prepare the transaction (obtaining a hash), sign the hash with the external party's private key, and submit the signed transaction back to the participant. See the [validator-internal.yaml](https://raw.githubusercontent.com/hyperledger-labs/splice/refs/heads/main/apps/validator/src/main/openapi/validator-internal.yaml) OpenAPI spec for the full endpoint list. -For building custom wallet integrations using the TypeScript Wallet SDK, see the [Wallet SDK documentation](/docs-main/integrations/wallet/sdk-download). +For building custom wallet integrations using the TypeScript Wallet SDK, see the [Wallet SDK documentation](/integrations/wallet/sdk-download). diff --git a/docs-main/overview/reference/super-validator-components.mdx b/docs-main/overview/reference/super-validator-components.mdx index 782c34bc5..e10905375 100644 --- a/docs-main/overview/reference/super-validator-components.mdx +++ b/docs-main/overview/reference/super-validator-components.mdx @@ -57,7 +57,7 @@ flowchart TB ## Validator Stack -Every SV includes the full validator stack. For a detailed breakdown, see the [Validator Node Components](/docs-main/overview/reference/validator-node-components) reference. +Every SV includes the full validator stack. For a detailed breakdown, see the [Validator Node Components](/overview/reference/validator-node-components) reference. The validator layer provides: @@ -95,7 +95,7 @@ The BFT orderer node participates in the consensus protocol that establishes the CometBFT nodes maintain peer-to-peer connections with all other SV CometBFT nodes and communicate over a dedicated TCP gossip/consensus channel (separate from the HTTPS APIs used by validators). The BFT consensus requires agreement from more than two-thirds of the SV nodes to produce a block, meaning the system tolerates up to `f` Byzantine (faulty or malicious) nodes where `f = floor((n-1)/3)` and `n` is the total number of SVs. -For more on how ordering consensus works, see [Ordering Consensus](/docs-main/overview/reference/ordering-consensus). +For more on how ordering consensus works, see [Ordering Consensus](/overview/reference/ordering-consensus). ## SV-Specific Applications diff --git a/docs-main/overview/reference/sv-governance-reference.mdx b/docs-main/overview/reference/sv-governance-reference.mdx index 086273bdf..dca0f2749 100644 --- a/docs-main/overview/reference/sv-governance-reference.mdx +++ b/docs-main/overview/reference/sv-governance-reference.mdx @@ -123,7 +123,7 @@ Traffic parameters require periodic recalibration. Actual traffic costs change d Formal changes to Canton Network standards and protocols go through the Canton Improvement Proposal (CIP) process. CIPs cover technical specifications, governance procedures, and informational guidelines. SVs vote on CIP adoption through the same on-chain governance mechanism described above. -For a full overview of the CIP lifecycle and how to participate, see [What Are CIPs?](/docs-main/overview/reference/what-are-cips). The CIP repository is maintained at [github.com/global-synchronizer-foundation/cips](https://github.com/global-synchronizer-foundation/cips). +For a full overview of the CIP lifecycle and how to participate, see [What Are CIPs?](/overview/reference/what-are-cips). The CIP repository is maintained at [github.com/global-synchronizer-foundation/cips](https://github.com/global-synchronizer-foundation/cips). ## Global Synchronizer Foundation diff --git a/docs-main/overview/reference/transaction-lifecycle.mdx b/docs-main/overview/reference/transaction-lifecycle.mdx index f6118494d..74b624fcd 100644 --- a/docs-main/overview/reference/transaction-lifecycle.mdx +++ b/docs-main/overview/reference/transaction-lifecycle.mdx @@ -77,7 +77,7 @@ The lifecycle begins when the submitting participant node receives a command fro - **Computes a root hash** that cryptographically commits to the entire transaction tree. The root hash binds all views together: every confirming participant can verify that the view it received belongs to the same transaction that every other participant is validating. The mediator uses the root hash to correlate confirmation responses. -If the transaction touches contracts on multiple synchronizers, the participant may need to reassign those contracts to a common synchronizer before submission. This is handled by the [Reassignment Protocol](/docs-main/overview/reference/reassignment-protocol). +If the transaction touches contracts on multiple synchronizers, the participant may need to reassign those contracts to a common synchronizer before submission. This is handled by the [Reassignment Protocol](/overview/reference/reassignment-protocol). ## Phase 2: Submission (Confirmation Request) diff --git a/docs-main/overview/reference/validator-node-components.mdx b/docs-main/overview/reference/validator-node-components.mdx index 3ba538872..94d2674c4 100644 --- a/docs-main/overview/reference/validator-node-components.mdx +++ b/docs-main/overview/reference/validator-node-components.mdx @@ -160,7 +160,7 @@ The protocol layer handles the multi-step Canton transaction protocol: - **Confirmation**: participants send approval or rejection responses via the sequencer to the mediator - **Result**: the mediator aggregates responses and declares commit or rollback via the sequencer; participants apply the outcome -For a full description of these steps, see [Transaction Lifecycle](/docs-main/overview/reference/transaction-lifecycle). +For a full description of these steps, see [Transaction Lifecycle](/overview/reference/transaction-lifecycle). ## APIs @@ -216,7 +216,7 @@ The participant stores its state in PostgreSQL databases. The primary stores inc - **Ledger store** -- committed transactions and the ACS - **Sequencer client store** -- messages received from synchronizers -- **Topology store** -- identity mappings, key registrations, and party-to-participant assignments (see [Topology](/docs-main/overview/reference/topology)) +- **Topology store** -- identity mappings, key registrations, and party-to-participant assignments (see [Topology](/overview/reference/topology)) - **Validator app store** -- the Validator App's own operational state The Splice application layer (Validator App, wallet automation) uses additional database schemas within the same PostgreSQL instance. @@ -287,19 +287,19 @@ From the participant's perspective, the Splice DARs are just Daml packages like - + How the Canton protocol processes a transaction from submission to commit. - + Identity management, key registrations, and party-to-participant mappings. - + Additional components that Super Validators operate beyond a standard validator. - + Minting, burning, rewards, and the burn-mint equilibrium mechanism. diff --git a/docs-main/overview/reference/what-are-cips.mdx b/docs-main/overview/reference/what-are-cips.mdx index ca4b9a20a..84e198db0 100644 --- a/docs-main/overview/reference/what-are-cips.mdx +++ b/docs-main/overview/reference/what-are-cips.mdx @@ -7,7 +7,7 @@ Canton Improvement Proposals (CIPs) are formal design documents that describe st The CIP process is modeled on established improvement proposal frameworks (such as Ethereum's EIPs and Python's PEPs) and is formally defined in [CIP-0000](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0000/cip-0000.md). All CIPs are maintained in a [public GitHub repository](https://github.com/global-synchronizer-foundation/cips) managed by the Global Synchronizer Foundation. -For an introductory overview of CIPs and why they matter, see the [CIPs introduction](/docs-main/overview/understand/cips-introduction). +For an introductory overview of CIPs and why they matter, see the [CIPs introduction](/overview/understand/cips-introduction). ## CIP types @@ -60,7 +60,7 @@ Once the discussion period has elapsed and two Super Validators have agreed to s ## Relationship to on-chain governance -CIP approval through the vote process is not the final step for proposals that require on-chain changes. Once a CIP is approved, Super Validators must adopt it through an on-chain governance action — the same BFT voting mechanism described in the [SV governance reference](/docs-main/overview/reference/sv-governance-reference). A CIP reaches Final status only after two-thirds of Super Validators have implemented the change on-chain. +CIP approval through the vote process is not the final step for proposals that require on-chain changes. Once a CIP is approved, Super Validators must adopt it through an on-chain governance action — the same BFT voting mechanism described in the [SV governance reference](/overview/reference/sv-governance-reference). A CIP reaches Final status only after two-thirds of Super Validators have implemented the change on-chain. This two-phase structure separates the design consensus (off-chain CIP vote) from the operational commitment (on-chain adoption), ensuring that approved proposals are actually deployed before becoming authoritative. On-chain implementation results are visible through the Canton Coin Scan APIs, so you can verify whether a given CIP has reached Final status by checking adoption across Super Validators. @@ -96,7 +96,7 @@ The following CIPs illustrate the range of topics the process covers. - [CIP repository on GitHub](https://github.com/global-synchronizer-foundation/cips) — All CIP documents and the canonical CIP-0000 process definition - [CIP-0000: CIP Process](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0000/cip-0000.md) — The authoritative specification of the CIP process, including editorial team, voting rules, and format requirements - [GSF mailing lists](https://lists.sync.global/) — The `cip-discuss` and `cip-vote` lists where CIP discussion and voting take place -- [SV governance reference](/docs-main/overview/reference/sv-governance-reference) — On-chain governance mechanics that CIPs feed into after approval +- [SV governance reference](/overview/reference/sv-governance-reference) — On-chain governance mechanics that CIPs feed into after approval ## How to propose a CIP diff --git a/docs-main/overview/understand/canton-coin.mdx b/docs-main/overview/understand/canton-coin.mdx index 148b42fb0..b22278df4 100644 --- a/docs-main/overview/understand/canton-coin.mdx +++ b/docs-main/overview/understand/canton-coin.mdx @@ -160,11 +160,11 @@ Unlike most cryptocurrencies where balances are publicly visible: - + Manage your Canton Coin. - + Operate and earn rewards. diff --git a/docs-main/overview/understand/cantons-solution.mdx b/docs-main/overview/understand/cantons-solution.mdx index cbc6d92c1..b71470a7a 100644 --- a/docs-main/overview/understand/cantons-solution.mdx +++ b/docs-main/overview/understand/cantons-solution.mdx @@ -204,19 +204,19 @@ Meet data protection requirements while maintaining shared truth: - + See concrete examples of Canton in action. - + Learn about parties, validators, and synchronizers. - + Understand how components work together technically. - + Explore the privacy guarantees in detail. diff --git a/docs-main/overview/understand/core-concepts.mdx b/docs-main/overview/understand/core-concepts.mdx index 0462b771c..da86b0032 100644 --- a/docs-main/overview/understand/core-concepts.mdx +++ b/docs-main/overview/understand/core-concepts.mdx @@ -242,19 +242,19 @@ sequenceDiagram - + See how components work together technically. - + Learn about the public coordination layer. - + Understand sub-transaction privacy in detail. - + Begin developing on Canton. diff --git a/docs-main/overview/understand/five-minute-overview.mdx b/docs-main/overview/understand/five-minute-overview.mdx index 3f1786403..77d42963d 100644 --- a/docs-main/overview/understand/five-minute-overview.mdx +++ b/docs-main/overview/understand/five-minute-overview.mdx @@ -120,19 +120,19 @@ If you're coming from other blockchains: - + Understand the problem Canton solves in depth. - + Learn about parties, validators, synchronizers, and smart contracts. - + Translate your blockchain knowledge to Canton. - + See how components work together. diff --git a/docs-main/overview/understand/getting-app-featured.mdx b/docs-main/overview/understand/getting-app-featured.mdx index 682d7832b..ef2593800 100644 --- a/docs-main/overview/understand/getting-app-featured.mdx +++ b/docs-main/overview/understand/getting-app-featured.mdx @@ -51,7 +51,7 @@ Misleading claims damage trust. Abandoned projects frustrate users. Poor securit - + Start developing your application. diff --git a/docs-main/overview/understand/global-synchronizer.mdx b/docs-main/overview/understand/global-synchronizer.mdx index be08d26e8..1e29f7cbc 100644 --- a/docs-main/overview/understand/global-synchronizer.mdx +++ b/docs-main/overview/understand/global-synchronizer.mdx @@ -279,9 +279,9 @@ The Global Synchronizer and validators currently have frequent upgrades with the ## Next Steps -- **[Glossary](/docs-main/overview/understand/glossary)** - Terminology reference -- **[Validator Operations](/docs-main/global-synchronizer/understand/introduction)** - Deploy your own validator -- **[Deployment Progression](/docs-main/appdev/modules/m5-deployment-progression)** - Deploy applications across environments +- **[Glossary](/overview/understand/glossary)** - Terminology reference +- **[Validator Operations](/global-synchronizer/understand/introduction)** - Deploy your own validator +- **[Deployment Progression](/appdev/modules/m5-deployment-progression)** - Deploy applications across environments {/* COPIED_START source="docs-website:docs/replicated/canton/3.4/overview/explanations/canton/synchronizers.rst" hash="ff3b68a4" */} diff --git a/docs-main/overview/understand/the-problem.mdx b/docs-main/overview/understand/the-problem.mdx index cb0e550c8..f02fde719 100644 --- a/docs-main/overview/understand/the-problem.mdx +++ b/docs-main/overview/understand/the-problem.mdx @@ -138,11 +138,11 @@ Canton calls this approach **sub-transaction privacy**. Each party sees only the - + Learn how Canton resolves the privacy-integrity tradeoff. - + Deep dive into sub-transaction privacy mechanics. diff --git a/docs-main/overview/understand/use-cases.mdx b/docs-main/overview/understand/use-cases.mdx index 18ebadf25..87384ed21 100644 --- a/docs-main/overview/understand/use-cases.mdx +++ b/docs-main/overview/understand/use-cases.mdx @@ -216,11 +216,11 @@ Consider alternatives if you need: - + Understand parties, validators, and synchronizers. - + Begin your development journey. diff --git a/docs-main/overview/understand/what-is-canton.mdx b/docs-main/overview/understand/what-is-canton.mdx index 256231dad..8c726c47f 100644 --- a/docs-main/overview/understand/what-is-canton.mdx +++ b/docs-main/overview/understand/what-is-canton.mdx @@ -111,6 +111,6 @@ This institutional backing validates Canton's approach for enterprise use cases, ## Next Steps -- **[Canton for Blockchain Developers](/docs-main/appdev/modules/m2-canton-for-ethereum-devs)** - Map your existing blockchain knowledge to Canton concepts -- **[Architecture Overview](/docs-main/overview/learn/architecture)** - Understand how Canton's components work together -- **[Privacy Model Explained](/docs-main/overview/learn/privacy-model)** - Deep dive into sub-transaction privacy +- **[Canton for Blockchain Developers](/appdev/modules/m2-canton-for-ethereum-devs)** - Map your existing blockchain knowledge to Canton concepts +- **[Architecture Overview](/overview/learn/architecture)** - Understand how Canton's components work together +- **[Privacy Model Explained](/overview/learn/privacy-model)** - Deep dive into sub-transaction privacy diff --git a/docs-main/overview/understand/who-should-read.mdx b/docs-main/overview/understand/who-should-read.mdx index b434cf5f9..a1e206c88 100644 --- a/docs-main/overview/understand/who-should-read.mdx +++ b/docs-main/overview/understand/who-should-read.mdx @@ -9,7 +9,7 @@ This documentation serves multiple audiences building on or operating Canton Net - + Build applications on Canton Network using Daml smart contracts and the Ledger API. **Start here if you want to:** @@ -18,7 +18,7 @@ This documentation serves multiple audiences building on or operating Canton Net - Understand Canton's privacy model for application design - + Translate your blockchain knowledge to Canton concepts and patterns. **Start here if you:** @@ -27,7 +27,7 @@ This documentation serves multiple audiences building on or operating Canton Net - Need to adapt existing mental models - + Run validator infrastructure on the Global Synchronizer. **Start here if you want to:** @@ -36,7 +36,7 @@ This documentation serves multiple audiences building on or operating Canton Net - Join the Canton Network as an operator - + Evaluate Canton for enterprise use cases. **Start here if you need to:** @@ -51,13 +51,13 @@ This documentation serves multiple audiences building on or operating Canton Net | I want to... | Start here | |--------------|------------| -| Understand what Canton is in 5 minutes | [Five-Minute Overview](/docs-main/overview/understand/five-minute-overview) | -| Learn why privacy matters for blockchain | [The Problem Canton Solves](/docs-main/overview/understand/the-problem) | -| See how Canton components work together | [Architecture Overview](/docs-main/overview/learn/architecture) | -| Write my first smart contract | [Module 3: Daml Smart Contracts](/docs-main/appdev/modules/m3-dev-environment) | -| Run the example application | [QuickStart](/docs-main/appdev/quickstart) | -| Deploy a validator node | [Validator Setup](/docs-main/global-synchronizer/understand/introduction) | -| Integrate a wallet into my app | [Integrations Overview](/docs-main/integrations/overview) | +| Understand what Canton is in 5 minutes | [Five-Minute Overview](/overview/understand/five-minute-overview) | +| Learn why privacy matters for blockchain | [The Problem Canton Solves](/overview/understand/the-problem) | +| See how Canton components work together | [Architecture Overview](/overview/learn/architecture) | +| Write my first smart contract | [Module 3: Daml Smart Contracts](/appdev/modules/m3-dev-environment) | +| Run the example application | [QuickStart](/appdev/quickstart) | +| Deploy a validator node | [Validator Setup](/global-synchronizer/understand/introduction) | +| Integrate a wallet into my app | [Integrations Overview](/integrations/overview) | ## Documentation Structure @@ -87,11 +87,11 @@ Before diving in, you should have: {/* TODO: Add Slack link once available */} - + Join the developer and operator community channels. - + Contact Digital Asset support for enterprise assistance. diff --git a/docs-main/sdks-tools/api-reference/admin-api.mdx b/docs-main/sdks-tools/api-reference/admin-api.mdx index 417273801..3e04d7848 100644 --- a/docs-main/sdks-tools/api-reference/admin-api.mdx +++ b/docs-main/sdks-tools/api-reference/admin-api.mdx @@ -6,7 +6,7 @@ description: Reference documentation for the Canton Admin API, covering node adm import DamlDocsSdksToolsApiReferenceAdminApiL47 from "/snippets/daml-docs/sdks-tools_api-reference_admin-api_L47.mdx"; -The Canton Admin API is a gRPC API exposed by every Canton node (participant, sequencer, mediator). It provides administrative operations that go beyond the [Ledger API](/docs-main/sdks-tools/api-reference/ledger-api) — managing keys, topology state, synchronizer connections, packages, users, pruning, and node health. Unlike the Ledger API, which handles command submission and transaction reads, the Admin API controls the node itself. +The Canton Admin API is a gRPC API exposed by every Canton node (participant, sequencer, mediator). It provides administrative operations that go beyond the [Ledger API](/sdks-tools/api-reference/ledger-api) — managing keys, topology state, synchronizer connections, packages, users, pruning, and node health. Unlike the Ledger API, which handles command submission and transaction reads, the Admin API controls the node itself. Do not expose the Admin API publicly without proper security controls. It serves administrative purposes only and grants full control over the node. Use TLS and restrict network access to trusted operators. @@ -22,7 +22,7 @@ These services run on participant nodes alongside the Ledger API. They are defin ### Canton-Specific Admin Services -Canton extends the standard Ledger API admin services with operations specific to Canton's architecture. Access these through the [Canton Console](/docs-main/global-synchronizer/reference/canton-console-reference) or directly via gRPC. +Canton extends the standard Ledger API admin services with operations specific to Canton's architecture. Access these through the [Canton Console](/global-synchronizer/reference/canton-console-reference) or directly via gRPC. - **Key management** — generate, import, export, and rotate signing and encryption keys. List keys by purpose (signing, encryption) and view key metadata. - **Topology management** — inspect and modify the topology state: party-to-participant mappings, namespace delegations, synchronizer trust certificates, and package vetting. Topology transactions are the mechanism through which Canton nodes discover each other's capabilities. @@ -51,23 +51,23 @@ While the Admin API is primarily an operator tool, app developers interact with Or via gRPC using the `PartyManagementService.AllocateParty` RPC. -**DAR uploads** — your compiled Daml packages must be uploaded to the participant before contracts can be created. Note that `dpm` does not currently have a `deploy` command for uploading DARs to remote validators. For DAR uploads, use the Admin API directly (via `PackageManagementService.UploadDarFile` gRPC call, `curl`, or the [Canton Console](/docs-main/global-synchronizer/reference/canton-console-reference)). +**DAR uploads** — your compiled Daml packages must be uploaded to the participant before contracts can be created. Note that `dpm` does not currently have a `deploy` command for uploading DARs to remote validators. For DAR uploads, use the Admin API directly (via `PackageManagementService.UploadDarFile` gRPC call, `curl`, or the [Canton Console](/global-synchronizer/reference/canton-console-reference)). **User management** — the Ledger API authorizes requests based on user rights (actAs, readAs, executeAs, readAsAnyParty, executeAsAnyParty). Create and configure users through `UserManagementService` to control which parties your application can operate as. ## Related Pages - + gRPC API for submitting commands and reading transactions - + HTTP/REST wrapper for the Ledger API - + Scan, Validator, and SV REST APIs - + Console commands for interacting with the Admin API diff --git a/docs-main/sdks-tools/api-reference/json-api.mdx b/docs-main/sdks-tools/api-reference/json-api.mdx index 386ac9c04..14faebb2f 100644 --- a/docs-main/sdks-tools/api-reference/json-api.mdx +++ b/docs-main/sdks-tools/api-reference/json-api.mdx @@ -137,6 +137,6 @@ The token must be valid for the OIDC provider configured for your deployment. ## Related Pages -- [Ledger API](/docs-main/sdks-tools/api-reference/ledger-api) — gRPC Ledger API overview +- [Ledger API](/sdks-tools/api-reference/ledger-api) — gRPC Ledger API overview - [Ledger API Reference (AppDev)](/api-reference) — Detailed service documentation -- [Wallet Configuration](/docs-main/integrations/wallet/configuration) — SDK configuration including JSON API endpoints +- [Wallet Configuration](/integrations/wallet/configuration) — SDK configuration including JSON API endpoints diff --git a/docs-main/sdks-tools/api-reference/ledger-api.mdx b/docs-main/sdks-tools/api-reference/ledger-api.mdx index f31cf494a..264885d16 100644 --- a/docs-main/sdks-tools/api-reference/ledger-api.mdx +++ b/docs-main/sdks-tools/api-reference/ledger-api.mdx @@ -57,5 +57,5 @@ gRPC proto files for the Ledger API are available in the [Canton repository](htt ## Related Pages - [Ledger API Reference (AppDev)](/api-reference) — Detailed service documentation -- [JSON API](/docs-main/sdks-tools/api-reference/json-api) — HTTP/REST wrapper for the Ledger API -- [Admin API](/docs-main/sdks-tools/api-reference/admin-api) — Node administration API +- [JSON API](/sdks-tools/api-reference/json-api) — HTTP/REST wrapper for the Ledger API +- [Admin API](/sdks-tools/api-reference/admin-api) — Node administration API diff --git a/docs-main/sdks-tools/api-reference/splice-apis.mdx b/docs-main/sdks-tools/api-reference/splice-apis.mdx index 6e56e187c..30155312f 100644 --- a/docs-main/sdks-tools/api-reference/splice-apis.mdx +++ b/docs-main/sdks-tools/api-reference/splice-apis.mdx @@ -3,7 +3,7 @@ title: Splice APIs description: Reference documentation for the Scan, Validator, and SV REST APIs that run on Canton Network nodes. --- -Splice nodes expose REST APIs that are separate from the [Ledger API](/docs-main/sdks-tools/api-reference/ledger-api) and the [Admin API](/docs-main/sdks-tools/api-reference/admin-api). These APIs are served by the Splice application layer running on top of Canton and provide access to Canton Network-specific functionality: querying network state, managing wallets, operating validators, and governing the network as a super validator (SV). +Splice nodes expose REST APIs that are separate from the [Ledger API](/sdks-tools/api-reference/ledger-api) and the [Admin API](/sdks-tools/api-reference/admin-api). These APIs are served by the Splice application layer running on top of Canton and provide access to Canton Network-specific functionality: querying network state, managing wallets, operating validators, and governing the network as a super validator (SV). Three main APIs exist, each tied to a different Splice application component: @@ -381,6 +381,6 @@ Endpoints in files named `*-external` carry backwards compatibility guarantees. ## Related Pages -- [Ledger API](/docs-main/sdks-tools/api-reference/ledger-api) — gRPC API for submitting commands and reading the transaction stream -- [Admin API](/docs-main/sdks-tools/api-reference/admin-api) — Canton node administration -- [JSON API](/docs-main/sdks-tools/api-reference/json-api) — HTTP/REST wrapper for the Ledger API +- [Ledger API](/sdks-tools/api-reference/ledger-api) — gRPC API for submitting commands and reading the transaction stream +- [Admin API](/sdks-tools/api-reference/admin-api) — Canton node administration +- [JSON API](/sdks-tools/api-reference/json-api) — HTTP/REST wrapper for the Ledger API diff --git a/docs-main/sdks-tools/cli-tools/canton-console.mdx b/docs-main/sdks-tools/cli-tools/canton-console.mdx index 2497cd841..bf539e279 100644 --- a/docs-main/sdks-tools/cli-tools/canton-console.mdx +++ b/docs-main/sdks-tools/cli-tools/canton-console.mdx @@ -12,7 +12,7 @@ import CantonSdksToolsCliToolsCantonConsoleL287 from "/snippets/canton-docs/sdks The Canton Console is an interactive Scala REPL that connects to Canton nodes for administration, debugging, and automation. It provides direct access to participant, sequencer, and mediator operations through a typed API. -For full operational documentation, see the [Global Synchronizer section](/docs-main/global-synchronizer/understand/introduction), which covers node setup and management in detail. This page provides a brief orientation to the Console as a development and operations tool. +For full operational documentation, see the [Global Synchronizer section](/global-synchronizer/understand/introduction), which covers node setup and management in detail. This page provides a brief orientation to the Console as a development and operations tool. ## Overview @@ -264,7 +264,7 @@ Scripts have access to the same node references and APIs as the interactive Cons - Health check routines that run periodically - Data export scripts for auditing -See [Scala bindings](/docs-main/sdks-tools/language-bindings/scala) for more on writing automation scripts. +See [Scala bindings](/sdks-tools/language-bindings/scala) for more on writing automation scripts. ## Tab Completion @@ -279,6 +279,6 @@ This is the fastest way to explore the API without referring to documentation. ## Related Pages -- [Global Synchronizer introduction](/docs-main/global-synchronizer/understand/introduction) -- Operational context for Console use -- [Scala bindings](/docs-main/sdks-tools/language-bindings/scala) -- Writing Scala automation scripts for the Console -- [Validator roles](/docs-main/global-synchronizer/understand/validator-roles) -- Understanding the nodes you manage through the Console +- [Global Synchronizer introduction](/global-synchronizer/understand/introduction) -- Operational context for Console use +- [Scala bindings](/sdks-tools/language-bindings/scala) -- Writing Scala automation scripts for the Console +- [Validator roles](/global-synchronizer/understand/validator-roles) -- Understanding the nodes you manage through the Console diff --git a/docs-main/sdks-tools/cli-tools/daml-script.mdx b/docs-main/sdks-tools/cli-tools/daml-script.mdx index c88d4f7ee..0b29df0d8 100644 --- a/docs-main/sdks-tools/cli-tools/daml-script.mdx +++ b/docs-main/sdks-tools/cli-tools/daml-script.mdx @@ -111,13 +111,13 @@ External template choices 8 ( 66.7%) exercised in any tests ``` -Aim for high coverage on all choices that will run in production. See the [testing guide](/docs-main/appdev/modules/m3-testing) for a detailed walkthrough. +Aim for high coverage on all choices that will run in production. See the [testing guide](/appdev/modules/m3-testing) for a detailed walkthrough. ## Related Pages -- [Testing Daml Contracts](/docs-main/appdev/modules/m3-testing) -- Full testing tutorial with examples -- [Sandbox](/docs-main/sdks-tools/development-tools/sandbox) -- Integration testing beyond Daml Script -- [dpm command reference](/docs-main/sdks-tools/cli-tools/dpm) -- `dpm test` options and flags +- [Testing Daml Contracts](/appdev/modules/m3-testing) -- Full testing tutorial with examples +- [Sandbox](/sdks-tools/development-tools/sandbox) -- Integration testing beyond Daml Script +- [dpm command reference](/sdks-tools/cli-tools/dpm) -- `dpm test` options and flags {/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/tutorials/smart-contracts/daml-scripts.rst" hash="aaa6a370" */} diff --git a/docs-main/sdks-tools/cli-tools/dpm.mdx b/docs-main/sdks-tools/cli-tools/dpm.mdx index 6e92b6f24..c185bfd75 100644 --- a/docs-main/sdks-tools/cli-tools/dpm.mdx +++ b/docs-main/sdks-tools/cli-tools/dpm.mdx @@ -127,10 +127,10 @@ A common development cycle with `dpm`: ## Related Pages -- [Daml SDK](/docs-main/sdks-tools/sdks/daml-sdk) -- What the SDK includes and how versions work -- [Sandbox](/docs-main/sdks-tools/development-tools/sandbox) -- Local testing environment started by `dpm sandbox` -- [Daml Studio](/docs-main/sdks-tools/development-tools/daml-studio) -- VS Code extension launched by `dpm studio` -- [Daml Script](/docs-main/sdks-tools/cli-tools/daml-script) -- Writing and running tests with `dpm test` +- [Daml SDK](/sdks-tools/sdks/daml-sdk) -- What the SDK includes and how versions work +- [Sandbox](/sdks-tools/development-tools/sandbox) -- Local testing environment started by `dpm sandbox` +- [Daml Studio](/sdks-tools/development-tools/daml-studio) -- VS Code extension launched by `dpm studio` +- [Daml Script](/sdks-tools/cli-tools/daml-script) -- Writing and running tests with `dpm test` {/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/component-howtos/application-development/dpm-sandbox.rst" hash="5f8df40b" */} diff --git a/docs-main/sdks-tools/development-tools/daml-studio.mdx b/docs-main/sdks-tools/development-tools/daml-studio.mdx index b9dc47a48..a320807dd 100644 --- a/docs-main/sdks-tools/development-tools/daml-studio.mdx +++ b/docs-main/sdks-tools/development-tools/daml-studio.mdx @@ -98,9 +98,9 @@ For projects with many packages or large dependency trees, the background type c ## Related Pages -- [dpm CLI reference](/docs-main/sdks-tools/cli-tools/dpm) -- `dpm studio` command and other CLI tools -- [Daml SDK](/docs-main/sdks-tools/sdks/daml-sdk) -- SDK installation and components -- [Development environment setup](/docs-main/appdev/modules/m3-dev-environment) -- Full setup guide for Daml development +- [dpm CLI reference](/sdks-tools/cli-tools/dpm) -- `dpm studio` command and other CLI tools +- [Daml SDK](/sdks-tools/sdks/daml-sdk) -- SDK installation and components +- [Development environment setup](/appdev/modules/m3-dev-environment) -- Full setup guide for Daml development {/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/component-howtos/smart-contracts/daml-studio.rst" hash="f04f848f" */} diff --git a/docs-main/sdks-tools/development-tools/localnet.mdx b/docs-main/sdks-tools/development-tools/localnet.mdx index e4109e8fb..816239307 100644 --- a/docs-main/sdks-tools/development-tools/localnet.mdx +++ b/docs-main/sdks-tools/development-tools/localnet.mdx @@ -201,7 +201,7 @@ An empty response indicates a healthy service. - Splice API interactions (Scan, Validator APIs) - End-to-end flows with backend, frontend, and ledger -**Use the [Sandbox](/docs-main/sdks-tools/development-tools/sandbox)** when you need: +**Use the [Sandbox](/sdks-tools/development-tools/sandbox)** when you need: - Fast iteration on contract logic - Single-participant testing without Docker @@ -209,6 +209,6 @@ An empty response indicates a healthy service. ## Related Pages -- [cn-quickstart](/docs-main/sdks-tools/reference-projects/cn-quickstart) -- Repository overview and project structure -- [Sandbox](/docs-main/sdks-tools/development-tools/sandbox) -- Lightweight single-node alternative -- [QuickStart walkthrough](/docs-main/appdev/quickstart/running-the-demo) -- Step-by-step guide to running the demo application +- [cn-quickstart](/sdks-tools/reference-projects/cn-quickstart) -- Repository overview and project structure +- [Sandbox](/sdks-tools/development-tools/sandbox) -- Lightweight single-node alternative +- [QuickStart walkthrough](/appdev/quickstart/running-the-demo) -- Step-by-step guide to running the demo application diff --git a/docs-main/sdks-tools/development-tools/pqs.mdx b/docs-main/sdks-tools/development-tools/pqs.mdx index 1e6ece32c..60db9937e 100644 --- a/docs-main/sdks-tools/development-tools/pqs.mdx +++ b/docs-main/sdks-tools/development-tools/pqs.mdx @@ -145,10 +145,10 @@ Now both PQS and the application can manage their own schema versions independen The cn-quickstart backend demonstrates PQS usage in the `repository/` and `pqs/` modules. The `Pqs` class generates SQL queries, and `DamlRepository` provides domain-specific methods that combine PQS reads with Ledger API writes. -See [Backend Development](/docs-main/appdev/modules/m4-backend-dev) for detailed code examples. +See [Backend Development](/appdev/modules/m4-backend-dev) for detailed code examples. ## Related Pages -- [Backend development](/docs-main/appdev/modules/m4-backend-dev) -- Using PQS in a Java backend -- [Ledger API](/docs-main/sdks-tools/api-reference/ledger-api) -- The underlying transaction stream that PQS consumes -- [LocalNet](/docs-main/sdks-tools/development-tools/localnet) -- Pre-configured PQS instances for local development +- [Backend development](/appdev/modules/m4-backend-dev) -- Using PQS in a Java backend +- [Ledger API](/sdks-tools/api-reference/ledger-api) -- The underlying transaction stream that PQS consumes +- [LocalNet](/sdks-tools/development-tools/localnet) -- Pre-configured PQS instances for local development diff --git a/docs-main/sdks-tools/development-tools/sandbox.mdx b/docs-main/sdks-tools/development-tools/sandbox.mdx index d15e2e6ac..d6da863fe 100644 --- a/docs-main/sdks-tools/development-tools/sandbox.mdx +++ b/docs-main/sdks-tools/development-tools/sandbox.mdx @@ -26,7 +26,7 @@ The Sandbox uses the SDK version from your project's `daml.yaml`, so make sure y - Admin API for party allocation and DAR uploads - In-memory storage (state is lost when the Sandbox stops) -The Sandbox does not include a synchronizer, wallet services, or Canton Coin. For those features, use [LocalNet](/docs-main/sdks-tools/development-tools/localnet). +The Sandbox does not include a synchronizer, wallet services, or Canton Coin. For those features, use [LocalNet](/sdks-tools/development-tools/localnet). ## Common Development Workflows @@ -79,10 +79,10 @@ Press Ctrl+C in the terminal where the Sandbox is running. Since the Sandbox use ## Related Pages -- [dpm command reference](/docs-main/sdks-tools/cli-tools/dpm) -- `dpm sandbox` options -- [LocalNet](/docs-main/sdks-tools/development-tools/localnet) -- Full multi-validator environment for integration testing -- [Daml Script](/docs-main/sdks-tools/cli-tools/daml-script) -- In-process testing without a running node -- [Testing Daml Contracts](/docs-main/appdev/modules/m3-testing) -- Complete testing guide +- [dpm command reference](/sdks-tools/cli-tools/dpm) -- `dpm sandbox` options +- [LocalNet](/sdks-tools/development-tools/localnet) -- Full multi-validator environment for integration testing +- [Daml Script](/sdks-tools/cli-tools/daml-script) -- In-process testing without a running node +- [Testing Daml Contracts](/appdev/modules/m3-testing) -- Complete testing guide {/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/component-howtos/application-development/dpm-sandbox.rst" hash="5f8df40b" */} diff --git a/docs-main/sdks-tools/language-bindings/community.mdx b/docs-main/sdks-tools/language-bindings/community.mdx index bdf4c0418..469c7e6cc 100644 --- a/docs-main/sdks-tools/language-bindings/community.mdx +++ b/docs-main/sdks-tools/language-bindings/community.mdx @@ -64,6 +64,6 @@ Community members often announce new bindings and major updates on the forum. If ## Related Pages -- [Java bindings](/docs-main/sdks-tools/language-bindings/java) -- Official code-generated Java bindings -- [TypeScript bindings](/docs-main/sdks-tools/language-bindings/typescript) -- Official code-generated TypeScript bindings -- [Ledger API reference](/docs-main/sdks-tools/api-reference/ledger-api) -- The underlying API that all bindings connect to +- [Java bindings](/sdks-tools/language-bindings/java) -- Official code-generated Java bindings +- [TypeScript bindings](/sdks-tools/language-bindings/typescript) -- Official code-generated TypeScript bindings +- [Ledger API reference](/sdks-tools/api-reference/ledger-api) -- The underlying API that all bindings connect to diff --git a/docs-main/sdks-tools/language-bindings/java.mdx b/docs-main/sdks-tools/language-bindings/java.mdx index a407c38bb..b63feac3b 100644 --- a/docs-main/sdks-tools/language-bindings/java.mdx +++ b/docs-main/sdks-tools/language-bindings/java.mdx @@ -128,9 +128,9 @@ The full Javadoc for the Daml Java bindings library is available in the [Canton ## Related Pages -- [dpm codegen-java reference](/docs-main/sdks-tools/cli-tools/dpm) -- Command options and flags -- [TypeScript bindings](/docs-main/sdks-tools/language-bindings/typescript) -- Alternative bindings for TypeScript/JavaScript -- [Backend development](/docs-main/appdev/modules/m4-backend-dev) -- Patterns for building Canton backends in Java +- [dpm codegen-java reference](/sdks-tools/cli-tools/dpm) -- Command options and flags +- [TypeScript bindings](/sdks-tools/language-bindings/typescript) -- Alternative bindings for TypeScript/JavaScript +- [Backend development](/appdev/modules/m4-backend-dev) -- Patterns for building Canton backends in Java {/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/component-howtos/application-development/daml-codegen-java.rst" hash="bbdcc685" */} diff --git a/docs-main/sdks-tools/language-bindings/scala.mdx b/docs-main/sdks-tools/language-bindings/scala.mdx index 5b9b4fddf..980006332 100644 --- a/docs-main/sdks-tools/language-bindings/scala.mdx +++ b/docs-main/sdks-tools/language-bindings/scala.mdx @@ -18,7 +18,7 @@ The Canton Console is a Scala REPL (read-eval-print loop) that connects directly -Because the Console is a full Scala environment, you can write functions, import libraries, and build complex automation scripts without leaving the REPL. See [Canton Console](/docs-main/sdks-tools/cli-tools/canton-console) for more on using the Console interactively. +Because the Console is a full Scala environment, you can write functions, import libraries, and build complex automation scripts without leaving the REPL. See [Canton Console](/sdks-tools/cli-tools/canton-console) for more on using the Console interactively. ## When to Use Scala @@ -63,6 +63,6 @@ The Scaladoc for Canton's Scala APIs is published with each Canton release. It c ## Related Pages -- [Canton Console](/docs-main/sdks-tools/cli-tools/canton-console) -- Interactive node management via Scala REPL -- [Java bindings](/docs-main/sdks-tools/language-bindings/java) -- Code-generated bindings for JVM application backends -- [TypeScript bindings](/docs-main/sdks-tools/language-bindings/typescript) -- Code-generated bindings for Node.js and browser applications +- [Canton Console](/sdks-tools/cli-tools/canton-console) -- Interactive node management via Scala REPL +- [Java bindings](/sdks-tools/language-bindings/java) -- Code-generated bindings for JVM application backends +- [TypeScript bindings](/sdks-tools/language-bindings/typescript) -- Code-generated bindings for Node.js and browser applications diff --git a/docs-main/sdks-tools/overview.mdx b/docs-main/sdks-tools/overview.mdx index a11828808..f6987d5ab 100644 --- a/docs-main/sdks-tools/overview.mdx +++ b/docs-main/sdks-tools/overview.mdx @@ -47,13 +47,13 @@ flowchart TB The **Daml SDK** is the primary development kit. It includes the Daml compiler, Daml Script runner, PQS, the Canton Sandbox, and many other components. You install and manage it through `dpm install`. The **[Splice Wallet Kernel](https://github.com/hyperledger-labs/splice-wallet-kernel) SDK** provides a set of TypeScript libraries for dApp, wallet, and exchange libraries for integrating with the Canton Network. -It is covered in the [Integrations](/docs-main/integrations/overview) section. +It is covered in the [Integrations](/integrations/overview) section. ## CLI Tools **dpm** (Daml Package Manager) is the main command-line entry point for Canton development. It handles project initialization, dependency management, compilation, code generation, testing, and running the sandbox. Most developer workflows start with a `dpm` command. -**Canton Console** is a Scala-based REPL for managing Canton nodes directly. Operators and advanced developers use it for topology management, party administration, and debugging. See the [Global Synchronizer section](/docs-main/global-synchronizer/understand/introduction) for operational details. +**Canton Console** is a Scala-based REPL for managing Canton nodes directly. Operators and advanced developers use it for topology management, party administration, and debugging. See the [Global Synchronizer section](/global-synchronizer/understand/introduction) for operational details. **Daml Script** is the testing language for Daml contracts. You write test scripts as `Script ()` values in Daml and run them with `dpm test`. diff --git a/docs-main/sdks-tools/reference-projects/cn-quickstart.mdx b/docs-main/sdks-tools/reference-projects/cn-quickstart.mdx index f821fa3d0..1effb9f08 100644 --- a/docs-main/sdks-tools/reference-projects/cn-quickstart.mdx +++ b/docs-main/sdks-tools/reference-projects/cn-quickstart.mdx @@ -68,7 +68,7 @@ The Provider and User each run on their own validator. License purchases involve -Once running, the frontend is accessible in your browser. See the [QuickStart walkthrough](/docs-main/appdev/quickstart/running-the-demo) for a step-by-step guide. +Once running, the frontend is accessible in your browser. See the [QuickStart walkthrough](/appdev/quickstart/running-the-demo) for a step-by-step guide. ## Extending the Example @@ -102,11 +102,11 @@ The frontend never interacts with the Ledger API directly -- all ledger access g ### Switching Architectures -cn-quickstart uses a fully mediated architecture (backend handles all ledger access). You can switch to a CQRS pattern where the frontend submits commands directly to the Ledger API using TypeScript bindings generated by `dpm codegen-js`. See [SDKs and APIs](/docs-main/appdev/modules/m4-sdks-apis) for a comparison of both approaches. +cn-quickstart uses a fully mediated architecture (backend handles all ledger access). You can switch to a CQRS pattern where the frontend submits commands directly to the Ledger API using TypeScript bindings generated by `dpm codegen-js`. See [SDKs and APIs](/appdev/modules/m4-sdks-apis) for a comparison of both approaches. ## Related Pages -- [QuickStart walkthrough](/docs-main/appdev/quickstart/running-the-demo) -- Step-by-step guide to running the demo -- [Project structure](/docs-main/appdev/quickstart/project-structure) -- Detailed breakdown of every directory and file -- [LocalNet](/docs-main/sdks-tools/development-tools/localnet) -- The Docker Compose environment powering local development -- [Backend development](/docs-main/appdev/modules/m4-backend-dev) -- Patterns from the cn-quickstart backend +- [QuickStart walkthrough](/appdev/quickstart/running-the-demo) -- Step-by-step guide to running the demo +- [Project structure](/appdev/quickstart/project-structure) -- Detailed breakdown of every directory and file +- [LocalNet](/sdks-tools/development-tools/localnet) -- The Docker Compose environment powering local development +- [Backend development](/appdev/modules/m4-backend-dev) -- Patterns from the cn-quickstart backend diff --git a/docs-main/sdks-tools/reference-projects/splice-reference-apps.mdx b/docs-main/sdks-tools/reference-projects/splice-reference-apps.mdx index d368b6b19..286af2fab 100644 --- a/docs-main/sdks-tools/reference-projects/splice-reference-apps.mdx +++ b/docs-main/sdks-tools/reference-projects/splice-reference-apps.mdx @@ -90,7 +90,7 @@ A separate repository, [splice-wallet-kernel](https://github.com/hyperledger-lab ## Related Pages -- [cn-quickstart](/docs-main/sdks-tools/reference-projects/cn-quickstart) -- Full-stack example application that runs on Splice infrastructure -- [Canton Coin](/docs-main/overview/understand/canton-coin) -- Overview of Canton Coin and its role in the network -- [Global Synchronizer](/docs-main/overview/understand/global-synchronizer) -- How the synchronizer infrastructure works -- [Integrations](/docs-main/integrations/overview) -- Wallet and exchange integration patterns +- [cn-quickstart](/sdks-tools/reference-projects/cn-quickstart) -- Full-stack example application that runs on Splice infrastructure +- [Canton Coin](/overview/understand/canton-coin) -- Overview of Canton Coin and its role in the network +- [Global Synchronizer](/overview/understand/global-synchronizer) -- How the synchronizer infrastructure works +- [Integrations](/integrations/overview) -- Wallet and exchange integration patterns diff --git a/docs-main/sdks-tools/sdks/daml-sdk.mdx b/docs-main/sdks-tools/sdks/daml-sdk.mdx index 62b2b1b30..51ce22b51 100644 --- a/docs-main/sdks-tools/sdks/daml-sdk.mdx +++ b/docs-main/sdks-tools/sdks/daml-sdk.mdx @@ -39,7 +39,7 @@ dpm --version - **Java 17 or later** -- The Daml compiler and Canton runtime require a JDK - **Node.js 18 or later** -- Required for TypeScript code generation and frontend tooling -- **dpm** -- See [dpm installation](/docs-main/sdks-tools/cli-tools/dpm) for setup instructions +- **dpm** -- See [dpm installation](/sdks-tools/cli-tools/dpm) for setup instructions ## Key Commands @@ -74,7 +74,7 @@ Daml is a functional smart contract language with a strong type system, built-in -For a full introduction to the Daml language, see [Module 3: Daml Smart Contracts](/docs-main/appdev/modules/m3-dev-environment). +For a full introduction to the Daml language, see [Module 3: Daml Smart Contracts](/appdev/modules/m3-dev-environment). ## Version Compatibility @@ -96,7 +96,7 @@ When deploying to DevNet, TestNet, or MainNet, verify that your SDK version matc ## Further Reading -- [dpm command reference](/docs-main/sdks-tools/cli-tools/dpm) -- Full list of `dpm` commands and options -- [Daml Studio](/docs-main/sdks-tools/development-tools/daml-studio) -- VS Code extension for Daml development -- [Sandbox](/docs-main/sdks-tools/development-tools/sandbox) -- Local single-node testing environment +- [dpm command reference](/sdks-tools/cli-tools/dpm) -- Full list of `dpm` commands and options +- [Daml Studio](/sdks-tools/development-tools/daml-studio) -- VS Code extension for Daml development +- [Sandbox](/sdks-tools/development-tools/sandbox) -- Local single-node testing environment - [Canton 3.x documentation](https://docs.digitalasset.com) -- Full platform documentation diff --git a/docs-main/sdks-tools/sdks/exchange-sdk.mdx b/docs-main/sdks-tools/sdks/exchange-sdk.mdx index 0d84cef7a..b552a06c3 100644 --- a/docs-main/sdks-tools/sdks/exchange-sdk.mdx +++ b/docs-main/sdks-tools/sdks/exchange-sdk.mdx @@ -21,7 +21,7 @@ The SDK covers: The Exchange SDK and its integration patterns are documented in the Integrations section: - + Full setup guide, download instructions, and API reference for exchange integration. @@ -56,7 +56,7 @@ The integration backend sits between your exchange platform and the Canton ledge To work with the Exchange SDK: -1. Review the [Integrations overview](/docs-main/integrations/overview) for the full context on exchange and wallet integrations +1. Review the [Integrations overview](/integrations/overview) for the full context on exchange and wallet integrations 2. Clone or download the SDK from the location specified in the integrations documentation 3. Configure your validator connection (host, port, authentication tokens) 4. Build the project with Gradle and run the provided examples @@ -68,10 +68,10 @@ The SDK includes example workflows that demonstrate deposit and withdrawal flows - Java 17 or later - A running Canton validator (Sandbox for development, or a DevNet/TestNet validator) - The Daml SDK installed via `dpm install` -- Familiarity with the [Ledger API](/docs-main/sdks-tools/api-reference/ledger-api) and [Java bindings](/docs-main/sdks-tools/language-bindings/java) +- Familiarity with the [Ledger API](/sdks-tools/api-reference/ledger-api) and [Java bindings](/sdks-tools/language-bindings/java) ## Related Pages -- [Integrations overview](/docs-main/integrations/overview) -- Wallet and exchange integration patterns -- [Java bindings](/docs-main/sdks-tools/language-bindings/java) -- Code generation for Java -- [PQS](/docs-main/sdks-tools/development-tools/pqs) -- SQL-based ledger queries used by the Exchange SDK +- [Integrations overview](/integrations/overview) -- Wallet and exchange integration patterns +- [Java bindings](/sdks-tools/language-bindings/java) -- Code generation for Java +- [PQS](/sdks-tools/development-tools/pqs) -- SQL-based ledger queries used by the Exchange SDK diff --git a/docs-main/shared/support-channels.mdx b/docs-main/shared/support-channels.mdx index 27a9bf9d2..a1ccba3e9 100644 --- a/docs-main/shared/support-channels.mdx +++ b/docs-main/shared/support-channels.mdx @@ -146,9 +146,9 @@ Before contacting support, check these resources: | Resource | Content | |----------|---------| -| [FAQ](/docs-main/appdev/faq) | Common questions | -| [Troubleshooting](/docs-main/appdev/troubleshooting) | Error solutions | -| [Release Notes](/docs-main/global-synchronizer/release-notes/current-release) | Version changes | +| [FAQ](/appdev/faq) | Common questions | +| [Troubleshooting](/appdev/troubleshooting) | Error solutions | +| [Release Notes](/global-synchronizer/release-notes/current-release) | Version changes | | [Documentation](/) | Full documentation | ## Mailing Lists From 3cb1ebfcd2716b1a2d98a57d042b5f8428b66815 Mon Sep 17 00:00:00 2001 From: 8bitpal Date: Tue, 28 Apr 2026 21:23:11 +0200 Subject: [PATCH 2/6] More content fixes from the 2026-04-27 review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Section extracts (F-019, F-020): - Extracted "Open Tracing in Ledger API Client Applications" and "Authorization" COPIED blocks from m7-security.mdx into new deep-dives (verbatim except the embedded h1 promoted to frontmatter). m7-security.mdx truncated 457 -> 105 lines with "Advanced Topics" pointers added. - Added open-tracing and authorization to docs.json Deep Dives groups (3x versions = 6 entries). Render-breaker fixes: - F-009: external-party.mdx sequenceDiagram was a single one-line block inside
with RST escaping (\>>, \
). Reformatted as a proper ```mermaid``` fence with each statement on its own line and escaping removed. - F-012: global-synchronizer.mdx had a 47-line COPIED block of TODO outlines and GitHub-issue references (lines 286-333) that the reviewer flagged as orphan placeholders. Wrapped the block in a hidden div className="todo" so it stops rendering publicly while preserving source markers for future authoring. - F-022: Fixed 49 malformed side-by-side comparison tables in smart-contract-upgrading-reference.mdx. Each table contained two
```code```
blocks with duplicate cb1/cb2 IDs and code fences inside HTML divs. Removed the div wrappers and added blank lines around the fences so MDX parses the code blocks correctly. 98 sourceCode-divs stripped. - F-063: Fixed numbered-list rendering in party-management.mdx and linking-validator-multi-sync.mdx — 9 list-item prefixes used the RST-escaped form "1\." which renders as literal text in MDX rather than starting an ordered list. Stripped the backslashes. - F-067: Sequence diagram in synchronizer-operations.mdx unreadable in dark mode. Wrapped the image in a div with white background and padding so it contrasts on both light and dark themes; improved alt text from generic "image" to a descriptive sentence. - F-065: getting-started-tutorial.mdx — removed a stray bare "orphan" line (Sphinx orphan directive that didn't translate to MDX) and converted an editorial Note block (writer instructions, not user content) to a hidden div className="todo". - F-096: splice-apis.mdx — converted 5 RST :ref: patterns of the form `Display Text ` into italic text. The angle-bracket slug was parsed by MDX as a JSX element with an invalid lowercase component name, breaking the page render. Same pattern applied site-wide: 22 patterns across 18 files cleaned up. Content deduplication: - F-078: Removed the duplicate Metrics section from observability-configuration.mdx (~2832 lines). The same upstream metrics content already lives in canton-metrics.mdx; observability- configuration.mdx now has a one-line pointer. Note: builds on top of the /docs-main/ URL prefix fix; new files use the corrected URL convention (no docs-main/ prefix). --- docs-main/appdev/deep-dives/authorization.mdx | 190 ++ .../external-signing-transactions.mdx | 10 +- docs-main/appdev/deep-dives/open-tracing.mdx | 168 + .../smart-contract-upgrading-reference.mdx | 588 ++-- .../modules/m3-functional-programming.mdx | 2 +- .../m4-featured-app-activity-marker.mdx | 2 +- docs-main/appdev/modules/m7-security.mdx | 357 +-- .../development-issues.mdx | 4 +- .../error-code-reference.mdx | 4 +- .../ledger-api-errors.mdx | 4 +- .../operational-issues.mdx | 4 +- docs-main/docs.json | 6 + .../getting-started-tutorial.mdx | 6 +- .../deployment/configuration.mdx | 5 +- .../deployment/onboarding-process.mdx | 4 +- .../linking-validator-multi-sync.mdx | 8 +- .../synchronizer-operations.mdx | 4 +- .../production-operations/kms-operations.mdx | 6 +- .../production-operations/logging.mdx | 2 +- .../party-management.mdx | 10 +- .../upgrade-procedures.mdx | 2 +- .../reference/error-codes.mdx | 2 +- .../reference/observability-configuration.mdx | 2830 +---------------- .../transaction-failures.mdx | 8 +- .../troubleshooting-methodology.mdx | 4 +- .../overview/reference/external-party.mdx | 56 +- .../understand/global-synchronizer.mdx | 4 + .../sdks-tools/api-reference/splice-apis.mdx | 12 +- .../development-tools/daml-studio.mdx | 5 +- ...izer_deployment_initial-validation_L12.mdx | 5 +- ...shooting-guide_installation-issues_L28.mdx | 5 +- 31 files changed, 863 insertions(+), 3454 deletions(-) create mode 100644 docs-main/appdev/deep-dives/authorization.mdx create mode 100644 docs-main/appdev/deep-dives/open-tracing.mdx diff --git a/docs-main/appdev/deep-dives/authorization.mdx b/docs-main/appdev/deep-dives/authorization.mdx new file mode 100644 index 000000000..d0bab55fb --- /dev/null +++ b/docs-main/appdev/deep-dives/authorization.mdx @@ -0,0 +1,190 @@ +--- +title: "Authorization" +description: "Access tokens, identity providers, scopes, and rights for the Canton Ledger API." +--- + +{/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/secure/authorization.rst" hash="04aa3ece" */} + + +This section was copied from existing reviewed documentation. +**Source:** `docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/secure/authorization.rst` +Reviewers: Skip this section. Remove markers after final approval. + + +When developing Daml applications using SDK tools, your local setup will most likely not perform any Ledger API request authorization --by default, any valid Ledger API request will be accepted by the sandbox. + +This is not the case for participant nodes of deployed ledgers. For every Ledger API request, the participant node checks whether the request contains an access token that is valid and sufficient to authorize that request. You thus need to add support for authorization using access tokens to your application to run it against a deployed ledger. + + +In case of mutual (two-way) TLS authentication, the Ledger API client must present its certificate (in addition to an access token) to the Ledger API server as part of the authentication process. The provided certificate must be signed by a certificate authority (CA) trusted by the Ledger API server. Note that the identity of the application will not be proven by using this method, i.e. the `application_id` field in the request is not necessarily correlated with the CN (Common Name) in the certificate. + + +## Basic interaction + +Your Daml application sends requests to the Ledger API exposed by a participant node to submit changes to the ledger (e.g., "*exercise choice X on contract Y as party Alice*"), or to read data from the ledger (e.g., "*read all active contracts visible to party Alice*"). + +Whether a participant node *can* serve such a request depends on whether the participant node hosts the respective parties, and whether the request is valid according to the Daml Ledger Model. Whether a participant node *will* serve such a request to a Daml application depends on whether the request includes an access token that is valid and sufficient to authorize the request for this participant node. + +## Acquire and Use Access Tokens + +How an application acquires access tokens depends on the participant node it talks to and is ultimately set up by the participant node operator. Many setups use a flow in the style of [OAuth 2.0](https://oauth.net/2/). + +In this scenario, the Daml application first contacts a token issuer to get an access token. The token issuer verifies the identity of the requesting application, looks up the privileges of the application, and generates a signed access token describing those privileges. + +Once the access token is issued, the Daml application sends it along with every Ledger API request. The Daml ledger verifies: + +- that the token was issued by one of its trusted token issuers +- that the token has not been tampered with +- that the token has not expired +- that the privileges carried by the token authorize the request + +![A flowchart illustrating the process of authentication described in the two paragraphs immediately above.](./images/Authentication.svg) + +How you attach tokens to requests depends on the tool or library you use to interact with the Ledger API. See the tool's or library's documentation for more information. (E.g. relevant documentation to access the gRPC Ledger API using Java bindings and the JSON Ledger API.) + +## Access Token Formats + +Applications should treat access tokens as opaque blobs. However, as an application developer it can be helpful to understand the format of access tokens to debug problems. + +All Daml ledgers represent access tokens as [JSON Web Tokens (JWTs)](https://datatracker.ietf.org/doc/html/rfc7519). + + +To generate access tokens for testing purposes, you can use the [jwt.io](https://jwt.io/) web site. + + +## Access Tokens and Rights + +Access tokens contain information about the rights granted to the bearer of the token. These rights are specific to the API being accessed. + +The Ledger API uses the following rights to govern request authorization: + +- `public`: the right to retrieve publicly available information, such as the ledger identity +- `participant_admin`: the right to administer the participant node +- `idp_admin`: the right to administer the users and parties belonging the same identity provider configuration as the authenticated user +- `canReadAs(p)`: the right to read information off the ledger (like the active contracts) visible to the party `p` +- `canActsAs(p)`: same as `canReadAs(p)`, with the added right of issuing commands on behalf of the party `p` + +The following table summarizes the rights required to access each Ledger API endpoint: + +| Ledger API service | Endpoint | Required right | +|-------------------------------|--------------------------------------------------------------|------------------------------------------------------| +| StateService | GetActiveContracts | for each requested party p: canReadAs(p) | +| CommandCompletionService | CompletionEnd | public | +| | CompletionStream | for each requested party p: canReadAs(p) | +| CommandSubmissionService | Submit | for submitting party p: canActAs(p) | +| CommandService | All | for submitting party p: canActAs(p) | +| EventQueryService | All | for each requesting party p: canReadAs(p) | +| Health | All | no access token required for health checking | +| IdentityProviderConfigService | All | participant_admin | +| PackageService | All | public | +| PackageManagementService | All | participant_admin | +| PartyManagementService | All | participant_admin | +| | All (except GetParticipantId, UpdatePartyIdentityProviderId) | idp_admin | +| ParticipantPruningService | All | participant_admin | +| ServerReflection | All | no access token required for gRPC service reflection | +| TimeService | GetTime | public | +| | SetTime | participant_admin | +| UpdateService | LedgerEnd | public | +| | All (except LedgerEnd) | for each requested party p: canReadAs(p) | +| UserManagementService | All | participant_admin | +| | All (except UpdateUserIdentityProviderId) | idp_admin | +| | GetUser | authenticated users can get their own user | +| | ListUserRights | authenticated users can list their own rights | +| VersionService | All | public | + +## User Access Tokens + +A participant node stores a dynamic set of users as well as their rights. User access tokens encode such participant user on whose behalf the request is issued. + +When handling such requests, participant nodes look up the participant user's current rights before checking request authorization per the table above. Thus the rights granted to an application can be changed dynamically using the participant User Management Service *without* issuing new access tokens. + +User access tokens are [JWTs](https://datatracker.ietf.org/doc/html/rfc7519) that follow the [OAuth 2.0 standard](https://datatracker.ietf.org/doc/html/rfc6749). There are two different JSON encodings: An audience-based token format that relies on the audience field to specify that it is designated for a specific Daml participant and a scope-based token format which relies on the scope field to designate the purpose. Both formats can be used interchangeably but if possible, use of the audience-based token format, as it is compatible with a wider range of IAMs, e.g. Kubernetes does not support setting the scope field and makes the participant id mandatory which prevents misuse of a token on a different participant. + +### Audience-Based Tokens + +```json +{ + "aud": "https://daml.com/jwt/aud/participant/someParticipantId", + "sub": "someUserId", + "iss": "someIdpId", + "exp": 1300819380 +} +``` + +To interpret the above notation: + +- `aud` is a required field which restricts the token to participant nodes with the given ID (e.g. `someParticipantId`) +- `sub` is a required field which specifies the participant user's ID +- `iss` is a field which specifies the identity provider id +- `exp` is an optional field which specifies the JWT expiration date (in seconds since EPOCH) + +### Scope-Based Tokens + +```json +{ + "aud": "someParticipantId", + "sub": "someUserId", + "exp": 1300819380, + "iss": "someIdpId", + "scope": "daml_ledger_api" +} +``` + +To interpret the above notation: + +- `aud` is an optional field which restricts the token to participant nodes with the given ID +- `sub` is a required field which specifies the participant user's ID +- `iss` is a field which specifies the identity provider id +- `exp` is an optional field which specifies the JWT expiration date (in seconds since EPOCH) +- `scope` is a space-separated list of [OAuth 2.0 scopes](https://datatracker.ietf.org/doc/html/rfc6749#section-3.3) that must contain the `"daml_ledger_api"` scope + +### Requirements for User IDs + +User IDs must be non-empty strings of at most 128 characters that are either alphanumeric ASCII characters or one of the symbols "@^\$.!\`-#+'~\_\|:()". + +### Identity providers + +An identity provider configuration can be thought of as a set of participant users which: +- Have a defined way to verify their access tokens +- Can be administered in isolation from the rest of the users on the same participant node +- Have an identity provider id unique per participant node +- Have a related set of parties that share the same identity provider id + +A participant node always has a statically configured default identity provider configuration whose id is the empty string `""`. Additionally, you can configure a small number of non-default identity providers using `IdentityProviderConfigService` by supplying a non-empty identity provider id and a [JWK Set](https://datatracker.ietf.org/doc/html/rfc7517) URL which the participant node will use to retrieve the cryptographic data needed to verify the access tokens. + +When authenticating as a user from a non-default identity provider configuration, your access tokens must contain the `iss` field whose value matches the identity provider id. In case of the default identity provider configuration, the `iss` field can be empty or omitted from the access tokens. + +## Encoding and Signature + +Access tokens conforming to the JWT specification are embedded in a larger JSON structure with a separate header and payload. + +```json +{ + "alg": "RS256", + "typ": "JWT" +} +{ + "aud": "https://daml.com/jwt/aud/participant/someParticipantId", + "sub": "someUserId", + "iss": "someIdpId", + "exp": 1300819380 +} +``` + +Together they are then base64 encoded, forming the final token's stem. Subsequently, the stem is signed using the cryptographic algorithm identified in the header. The signature itself is also base64-encoded and appended to the stem. The resulting character string takes a shape similar to + +```bash +eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJodHRwczovL2RhbWwuY29tL2p3dC9hdWQvcGFydGljaXBhbnQvc29tZVBhcnRpY2lwYW50SWQiLCJzdWIiOiJzb21lVXNlcklkIiwiaXNzIjoic29tZUlkcElkIiwiZXhwIjoxMzAwODE5MzgwfQ.DLVPehRLt8WiddI6mwUU1lqIgRbysLK34mgkuzSDQTThCXlEY_S57SHKEQHw-Pai0Y0OeGP7wNsT6uq51vBVbRNfxOLwy5owQRm3LEeTbSXMjnnPVrtRrhelVQCsH2AcV4J4bbrAe6YfKGYFBXZOfeRL3Gy7KIplcfxDZekHdPD8lhwK8AkvAR4IaOX72Q5jhjB2yOY9IwpVxx-pN0vWCqmxTbQqnIpSGo185Y0f38nKZeofGT5jcJZaSv7z4Ks15gs9gm1pHorEL6TZLCbX7T064hQeTBFea-kxQlUkcfcgmUOMAmA05_4a8fdFz2uHq5km7ylp6pUITogN5MJ-_CVFEwOD0GveOgiUJBBMHDBjq_V_DfRE4nZ04tFQ0DDthWpMd0F59JFIhmjZSZT9DWppj6G7VBWpu9aIFPefyX--2U_aO0Smt_dBBV5A6pvbIgX6ITF2tjEvvOCLHtLKmNTlP8cclna70DCsDIrojNVDMFpLXYLvsP6DhQWkGaRb-nz0hLjQE_PtuQzSexrZG5d8tHFS351E2-aUVTKoJuEGHH3n1it-d9yHdt4fAynIbhWUVAervxc-oXyrA3-uafrxbIiQCpnw0kQ8K-HwJpkfz_Yqf-luI1FaRiPT9F-cYzwvceNf2_2hhmiuGiYp3rVIPwkFAuBc1vgpPiWSNLc +``` + +Note that access token generation in the correct format is typically delegated to the identity provider systems. Client application developers are unlikely to need to deal with it directly. + +## Token expiration + +JWT token-based authorization is inherently stateless, offering excellent scalability and eliminating the need for servers to manage client sessions or perform costly claim verification checks. However, this stateless nature means JWT tokens cannot be revoked. + +To mitigate the risk associated with token loss or theft, we *strongly recommend* to follow the standard practice for systems utilizing JWT tokens: configure the IAM system to issue short-lived tokens, ideally lasting between 5 and 15 minutes. This limits the time window during which unauthorized actors can access the system. + +Using long-lived tokens goes against best practices and risks a costly reconfiguration of your IAM token issuance mechanism should a token be compromised. A token loss may necessitate rotating the token signing key. This action invalidates all outstanding tokens through the JSON Web Key Set (JWKS) mechanism. Consult your IAM system's documentation for detailed strategies on mitigating JWT token theft. + +{/* COPIED_END */} diff --git a/docs-main/appdev/deep-dives/external-signing-transactions.mdx b/docs-main/appdev/deep-dives/external-signing-transactions.mdx index 6fd1608da..4533ffac2 100644 --- a/docs-main/appdev/deep-dives/external-signing-transactions.mdx +++ b/docs-main/appdev/deep-dives/external-signing-transactions.mdx @@ -1820,17 +1820,15 @@ The scripts mentioned in this tutorial can be used as tools for testing and deve ### Decode base64 encoded prepared transaction to JSON -```bash +``*bash ./setup.sh -python daml_transaction_util.py --decode --base64 -``` +python daml_transaction_util.py --decode --base64*`` ### Compute hash of base64 encoded prepared transaction -```bash +``*bash ./setup.sh -python daml_transaction_util.py --hash --base64 -``` +python daml_transaction_util.py --hash --base64*`` {/* COPIED_END */} diff --git a/docs-main/appdev/deep-dives/open-tracing.mdx b/docs-main/appdev/deep-dives/open-tracing.mdx new file mode 100644 index 000000000..32acb77f3 --- /dev/null +++ b/docs-main/appdev/deep-dives/open-tracing.mdx @@ -0,0 +1,168 @@ +--- +title: "Open Tracing in Ledger API Client Applications" +description: "Adding OpenTelemetry-based distributed tracing to Daml applications interacting with the Ledger API." +--- + +{/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/observe/open-tracing.rst" hash="03a5f726" */} + + +This section was copied from existing reviewed documentation. +**Source:** `docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/observe/open-tracing.rst` +Reviewers: Skip this section. Remove markers after final approval. + + +## Introduction + +Distributed tracing is a technique used for troubleshooting performance issues in a microservices environment like Daml Enterprise. Tracing in Canton has been described in a page dedicated to monitoring (Canton Monitoring / Tracing). This guide describes how to write **Ledger API** client applications so that distributed traces and spans can seamlessly continue between the client and Canton software components. + +To study a **Ledger API** client application with OpenTelemetry support in detail, see this [example on GitHub](https://github.com/digital-asset/ex-java-bindings-with-opentelemetry). + +The example implements a variation of the already familiar `PingPong` application where every call to the **Ledger API** is decorated with an OpenTelemetry trace context and demonstrates how to retrieve the trace context from past transactions. + +To familiarize yourself with the broader topic of open tracing, consult the official pages of [the OpenTelemetry project](https://opentelemetry.io/). To find out more about open tracing in Java, the documentation on [Java OpenTelemetry instrumentation](https://opentelemetry.io/docs/instrumentation/java/) is an excellent source of references and examples. + +## Set Up an OpenTelemetry Environment + +To observe distributed tracing in action, you first need to start an OpenTelemetry backend server. Canton supports Jaeger, Zipkin, or OTLP formats. To start a Jaeger server you can use the following docker command: + + docker run --rm -it --name jaeger\ + -p 16686:16686 \ + -p 14250:14250 \ + jaegertracing/all-in-one:1.22.0 + +You also have to start Canton with OpenTelemetry exporting enabled. You can achieve it by defining a new `jaeger.conf` configuration file: + +``` +canton.monitoring.tracing.tracer.exporter { + type = jaeger + address = "localhost" // it's the default, so can be omitted + port = 14250 // it's the default, so can be omitted +} +``` + +Next, launch a small Canton installation combining the `jaeger.conf` into the configuration mix: + + bin/canton -c examples/01-simple-topology/simple-topology.conf -c jaeger.conf + +## Add Project Dependencies + +To use the OpenTelemetry libraries, add the following **Maven** dependencies to your project's `pom.xml`: + +```java + + io.opentelemetry + opentelemetry-api + 1.29.0 + + + io.opentelemetry + opentelemetry-exporter-jaeger + 1.29.0 + + + io.opentelemetry + opentelemetry-sdk + 1.29.0 + + + io.opentelemetry.instrumentation + opentelemetry-grpc-1.6 + 1.29.0-alpha + +``` + + +Replace the version number in each dependency with the version you want to use. To find available versions, check the [Maven Central Repository](https://search.maven.org/artifact/io.opentelemetry/opentelemetry-api). + + +## Initialize + +An application that wants to use OpenTelemetry must initialize a number of global controller objects that orchestrate different aspects of the distributed tracing process such as span creation, propagation, and export. The exact set of controllers needed may vary from application to application. You may draw some inspiration from the selection used in the example inside [the OpenTelemetryUtil.createOpenTelemetry method](https://github.com/digital-asset/ex-java-bindings-with-opentelemetry/blob/master/src/main/java/examples/pingpong/codegen/OpenTelemetryUtil.java). This is the minimum set required for a fully functional Jaeger trace reporting. + +The next step is to initialize the GRPCTelemetry controller, which is responsible for the propagation of the trace contexts inside the HTTP2 headers of the gRPC communication. + +The example wraps the necessary initialization steps in the constructor of the OpenTelemetryUtil class. All you have to do is call: + +```java +OpenTelemetryUtil openTelemetry = new OpenTelemetryUtil(APP_ID); +``` + +The GRPCTelemetry controller can construct client call interceptors that need to be mounted on top of the **Netty** channels used in the gRPC communication. The example provides a useful helper method called `withClientInterceptor` that injects an interceptor at the channel builder level: + +```java +ManagedChannel channel = openTelemetry.withClientInterceptor( + ManagedChannelBuilder + .forAddress(host, port) + .usePlaintext() + ) + .build(); +``` + +And with that, you are all set to start generating own spans, reporting them to the **Jaeger** server and also propagating them transparently to the **Ledger API**. + +## Start New Spans + +Before making a gRPC call, you must generate a new span to cover the multi-component interaction that is about to be initiated. The example provides a useful combinator called `runInNewSpan` that wraps the execution of an arbitrary function in a newly generated span: + +```java +public R runInNewSpan(String spanName, Supplier body) { + Span span = tracer.spanBuilder(spanName).startSpan(); + try(Scope ignored = span.makeCurrent()) { + return body.get(); + } finally { + span.end(); + } +} +``` + +You can use it on a command submission as follows: + +```java +openTelemetry.runInNewSpan("createInitialContracts", () -> submissionService.submit(request)); +``` + +The gRPC interceptors that were mounted at the initialization stage do the rest of the work behind the scenes making sure that the spans make it across to the Canton. + +## Continue Spans Across Different Applications + +Sometimes you may wish to continue the same span across multiple Daml transactions forming a single workflow. This may be especially interesting when different client application instances interact through the ledger and yet their entire conversation should be seen as a single coherent succession of spans. In that case, it is possible to extract the trace context associated with the past transactions from the Transaction, TransactionTree, or Completion records that are returned from the following **Ledger API** calls: + +- `UpdateService.GetUpdates` +- `UpdateService.GetUpdateTrees` +- `UpdateService.GetTransactionByOffset` +- `UpdateService.GetTransactionById` +- `UpdateService.GetTransactionTreeByOffset` +- `UpdateService.GetTransactionTreeById` +- `UpdateService.GetUpdateByOffset` +- `UpdateService.GetUpdateById` +- `CommandCompletionService.CompletionStream` + +You can extract the context by using a helper function implemented in the example: + +```java +Context extractedContext = openTelemetry.contextFromDamlTraceContext(tx.getTraceContext()); +``` + +The extracted context then has to be elevated to the status of the current context. Doing this allows the continuation of the original trace context into the present operation. Again the example provides a convenient combinator for that: + +```java +openTelemetry.runInOpenTelemetryScope(extractedContext, () -> ... ); +``` + +Finally, you generate a new span within the original context. You can use the already familiar `runInNewSpan` method: + +```java +openTelemetry.runInNewSpan("follow", () -> + submissionService.submit(SubmitRequest.toProto(ledgerId, commandsSubmission)) +) +``` + +## Put It All Together + +When the client applications follow the rules and pass the trace contexts without interruption, it becomes possible to witness the entire workflow as one long succession of spans in Jaeger UI. The span diagram collected while running the example application is shown below: + +
+images/jaegerPingSpans.png +
+ +{/* COPIED_END */} diff --git a/docs-main/appdev/deep-dives/smart-contract-upgrading-reference.mdx b/docs-main/appdev/deep-dives/smart-contract-upgrading-reference.mdx index f36878aaf..d58ab8a70 100644 --- a/docs-main/appdev/deep-dives/smart-contract-upgrading-reference.mdx +++ b/docs-main/appdev/deep-dives/smart-contract-upgrading-reference.mdx @@ -92,7 +92,8 @@ Below, the module on the right is a valid upgrade of the module on the left. But -
+ + ```text module M where @@ -102,8 +103,10 @@ template T1 where signatory p ``` -
-
+ + + + ```text module M where @@ -119,7 +122,8 @@ module M where where signatory p ``` -
+ + @@ -151,7 +155,8 @@ Below, the template on the right is a valid upgrade of the template on the left. -
+ + ```text template T with @@ -159,8 +164,10 @@ template T where signatory p ``` -
-
+ + + + ```text template T with @@ -169,7 +176,8 @@ template T where signatory p ``` -
+ + @@ -183,7 +191,8 @@ Below, the template on the right is **not** a valid upgrade of the template on t -
+ + ```text template T with @@ -191,8 +200,10 @@ template T where signatory p ``` -
-
+ + + + ```text template T with @@ -201,7 +212,8 @@ template T where signatory p ``` -
+ + @@ -215,7 +227,8 @@ Below, the template on the right is **not** a valid upgrade of the template on t -
+ + ```text template T with @@ -224,8 +237,10 @@ template T where signatory p ``` -
-
+ + + + ```text template T with @@ -233,7 +248,8 @@ template T where signatory p ``` -
+ + @@ -247,7 +263,8 @@ Below, the template on the right is **not** a valid upgrade of the template on t -
+ + ```text template T with @@ -256,8 +273,10 @@ template T where signatory p ``` -
-
+ + + + ```text template T with @@ -266,7 +285,8 @@ template T where signatory p ``` -
+ + @@ -290,7 +310,8 @@ Below, the template on the right is a valid upgrade of the template on the left. -
+ + ```text template T with @@ -298,8 +319,10 @@ template T where signatory p ``` -
-
+ + + + ```text template T with @@ -312,7 +335,8 @@ template T do return () ``` -
+ + @@ -340,7 +364,8 @@ Below, the choice on the right is a valid upgrade of the choice on the left. It -
+ + ```text choice C : () with @@ -349,8 +374,10 @@ choice C : () do return () ``` -
-
+ + + + ```text choice C : () with @@ -360,7 +387,8 @@ choice C : () do return () ``` -
+ + @@ -374,7 +402,8 @@ Below, the choice on the right is **not** a valid upgrade of the choice on the l -
+ + ```text choice C : () with @@ -383,8 +412,10 @@ choice C : () do return () ``` -
-
+ + + + ```text choice C : () with @@ -394,7 +425,8 @@ choice C : () do return () ``` -
+ + @@ -408,7 +440,8 @@ Below, the choice on the right is **not** a valid upgrade of the choice on the l -
+ + ```text choice C : () with @@ -417,8 +450,10 @@ choice C : () do return () ``` -
-
+ + + + ```text choice C : () with @@ -428,7 +463,8 @@ choice C : () do return () ``` -
+ + @@ -442,7 +478,8 @@ Below, the choice on the right is **not** a valid upgrade of the choice on the l -
+ + ```text choice C : () with @@ -451,8 +488,10 @@ choice C : () do return () ``` -
-
+ + + + ```text choice C : () with @@ -460,7 +499,8 @@ choice C : () do return () ``` -
+ + @@ -474,7 +514,8 @@ Below, the choice on the right is **not** a valid upgrade of the choice on the l -
+ + ```text choice C : () with @@ -483,8 +524,10 @@ choice C : () do return () ``` -
-
+ + + + ```text choice C : () with @@ -492,7 +535,8 @@ choice C : () do return () ``` -
+ + @@ -518,22 +562,26 @@ Below, the choice on the right is **not** a valid upgrade of the choice on the l -
+ + ```text choice C : () controller p do return () ``` -
-
+ + + + ```text choice C : Int controller p do return 1 ``` -
+ + @@ -561,21 +609,25 @@ Below, the module on the right is a valid upgrade of the module on the left. It -
+ + ```text module M where data A = A ``` -
-
+ + + + ```text module M where data A = A data B = B ``` -
+ + @@ -589,7 +641,8 @@ Below, the module on the right is a valid upgrade of the module on the left. It -
+ + ```text module M where @@ -597,15 +650,18 @@ data A = A with x : Int -> Int ``` -
-
+ + + + ```text module M where data A = A with ``` -
+ + @@ -619,21 +675,25 @@ Below, the module on the right is **not** a valid upgrade of the module on the l -
+ + ```text module M where data A = A with ``` -
-
+ + + + ```text module M where data A = A | B ``` -
+ + @@ -647,18 +707,22 @@ Below, the module on the right is **not** a valid upgrade of the module on the l -
+ + ```text module M where data A = A ``` -
-
+ + + + ```text module M where ``` -
+ + @@ -672,15 +736,18 @@ Below, the module on the right is **not** a valid upgrade of the module on the l -
+ + ```text module M where data A = A with ``` -
-
+ + + + ```text module M where @@ -688,7 +755,8 @@ data A = A with x : Optional (Int -> Int) ``` -
+ + @@ -720,19 +788,23 @@ Below, the record on the right is a valid upgrade of the module on the left. It -
+ + ```text data T = T with x1 : Int ``` -
-
+ + + + ```text data T = T with x1 : Int x2 : Optional Text ``` -
+ + @@ -746,19 +818,23 @@ Below, the record on the right is **not** a valid upgrade of the record on the l -
+ + ```text data T = T with x1 : Int ``` -
-
+ + + + ```text data T = T with x2 : Optional Text x1 : Int ``` -
+ + @@ -772,19 +848,23 @@ Below, the record on the right is **not** a valid upgrade of the record on the l -
+ + ```text data T = T with x1 : Int x2 : Text ``` -
-
+ + + + ```text data T = T with x1 : Int ``` -
+ + @@ -798,18 +878,22 @@ Below, the record on the right is **not** a valid upgrade of the record on the l -
+ + ```text data T = T with x1 : Int ``` -
-
+ + + + ```text data T = T with x1 : Text ``` -
+ + @@ -843,18 +927,22 @@ Below, the variant on the right is a valid upgrade of the variant on the left. I -
+ + ```text data T = A Int | B Text ``` -
-
+ + + + ```text data T = A Int | B Text | C Bool ``` -
+ + @@ -868,18 +956,22 @@ Below, the variant on the right is a valid upgrade of the variant on the left. I -
+ + ```text data T = A | B { x : Int } ``` -
-
+ + + + ```text data T = A | B { x : Int, y : Optional Text } ``` -
+ + @@ -893,18 +985,22 @@ Below, the variant on the right is **not** a valid upgrade of the variant on the -
+ + ```text data T = A Int | B Text ``` -
-
+ + + + ```text data T = A Int | C Bool | B Text ``` -
+ + @@ -918,18 +1014,22 @@ Below, the variant on the right is **not** a valid upgrade of the variant on the -
+ + ```text data T = A Int | B Text ``` -
-
+ + + + ```text data T = B Text | A Int ``` -
+ + @@ -943,18 +1043,22 @@ Below, the variant on the right is **not** a valid upgrade of the variant on the -
+ + ```text data T = A Int | B Text ``` -
-
+ + + + ```text data T = A Int ``` -
+ + @@ -968,18 +1072,22 @@ Below, the variant on the right is **not** a valid upgrade of the variant on the -
+ + ```text data T = A Int | B Text ``` -
-
+ + + + ```text data T = A Int | B Bool ``` -
+ + @@ -993,18 +1101,22 @@ Below, the variant on the right is **not** a valid upgrade of the variant on the -
+ + ```text data T = A Int | B ``` -
-
+ + + + ```text data T = A Int | B { x : Optional Text } ``` -
+ + @@ -1018,18 +1130,22 @@ Below, the variant on the right is **not** a valid upgrade of the enum on the le -
+ + ```text data T = A | B ``` -
-
+ + + + ```text data T = A | B | C Int ``` -
+ + @@ -1088,22 +1204,26 @@ In these examples we assume the existence of packages `q-1.0.0` and `q-2.0.0` an In q-2.0.0: -
+ + ```text module Dep where data U = C1 data V = V ``` -
-
+ + + + ```text module Dep where data U = C1 | C2 data V = V ``` -
+ + @@ -1117,7 +1237,8 @@ Then below, the module on the right is a valid upgrade of the module on the left -
+ + ```text module Main where @@ -1126,8 +1247,10 @@ import qualified Dep data T = T Dep.U ``` -
-
+ + + + ```text module Main where @@ -1136,7 +1259,8 @@ import qualified Dep data T = T Dep.U ``` -
+ + @@ -1150,7 +1274,8 @@ However below, the module on the right is **not** a valid upgrade of the module -
+ + ```text module Main where @@ -1159,8 +1284,10 @@ import qualified Dep data T = T Dep.V ``` -
-
+ + + + ```text module Main where @@ -1169,7 +1296,8 @@ import qualified Dep data T = T Dep.V ``` -
+ + @@ -1193,15 +1321,18 @@ Below, the parameterized data type on the right is a valid upgrade of the parame -
+ + ```text data Tree a = Tree with label : a children : [Tree a] ``` -
-
+ + + + ```text data Tree b = Tree with @@ -1209,7 +1340,8 @@ data Tree b = children : [Tree b] cachedSize : Optional Int ``` -
+ + @@ -1229,7 +1361,8 @@ Below, the module on the right is a valid upgrade of the module on the left. The -
+ + ```text module M where @@ -1240,8 +1373,10 @@ data Demo = Demo with field2 : Map T T field3 : Optional T ``` -
-
+ + + + ```text module M where @@ -1252,7 +1387,8 @@ data Demo = Demo with field2 : Map T T field3 : Optional T ``` -
+ + @@ -1266,7 +1402,8 @@ Below, the module on the right is a valid upgrade of the module on the left. The -
+ + ```text module M where @@ -1275,8 +1412,10 @@ data C a = C { x : a } data Demo = Demo with field1 : C T ``` -
-
+ + + + ```text module M where @@ -1285,7 +1424,8 @@ data C a = C { x : a, y : Optional Int } data Demo = Demo with field1 : C T ``` -
+ + @@ -1322,7 +1462,8 @@ Then, below, the instance of `I` for template `T` on the right is a valid upgrad -
+ + ```text template T with @@ -1335,8 +1476,10 @@ template T view = IView i m = i ``` -
-
+ + + + ```text template T with @@ -1350,7 +1493,8 @@ template T view = IView (fromOptional i j) m = fromOptional i j ``` -
+ + @@ -1364,7 +1508,8 @@ Below, the template on the right is a valid upgrade of the template on the left. -
+ + ```text template T3 with @@ -1373,8 +1518,10 @@ template T3 where signatory p ``` -
-
+ + + + ```text template T3 with @@ -1387,7 +1534,8 @@ template T3 view = IView i m = i ``` -
+ + @@ -1401,7 +1549,8 @@ Below, the template on the right is **not** a valid upgrade of the template on t -
+ + ```text template T2 with @@ -1414,8 +1563,10 @@ template T2 view = IView i m = i ``` -
-
+ + + + ```text template T2 with @@ -1424,7 +1575,8 @@ template T2 where signatory p ``` -
+ + @@ -1483,7 +1635,8 @@ Assume two versions of a package called dep, defining a template U and its upgra In dep-2.0.0: -
+ + ```text module Dep where @@ -1493,8 +1646,10 @@ template U where signatory p ``` -
-
+ + + + ```text module Dep where @@ -1505,7 +1660,8 @@ template U where signatory p ``` -
+ + @@ -1568,7 +1724,8 @@ Assume then two versions of a template `T` that implements `I`. -
+ + ```text template T with @@ -1579,8 +1736,10 @@ template T interface instance I for T where view = IView {} ``` -
-
+ + + + ```text template T with @@ -1592,7 +1751,8 @@ template T interface instance I for T where view = IView {} ``` -
+ + @@ -1622,7 +1782,8 @@ Assume a package `p` with two versions. The new version adds an optional text fi In p-2.0.0: -
+ + ```text template T with @@ -1630,8 +1791,10 @@ template T where signatory p ``` -
-
+ + + + ```text template T with @@ -1640,7 +1803,8 @@ template T where signatory p ``` -
+ + @@ -1691,7 +1855,8 @@ Now, assume two versions of a package called `inst`, defining a template `Inst` In inst-2.0.0: -
+ + ```text template Inst with @@ -1703,8 +1868,10 @@ template Inst view = IView getInt = 1 ``` -
-
+ + + + ```text template Inst with @@ -1716,7 +1883,8 @@ template Inst view = IView getInt = 2 ``` -
+ + @@ -1764,7 +1932,8 @@ Assume now a package `r` with two versions. They define a template with a choice In r-2.0.0: -
+ + ```text module M where @@ -1782,8 +1951,10 @@ template V controller p do return Ret ``` -
-
+ + + + ```text module M where @@ -1803,7 +1974,8 @@ template V controller p do return Ret with j = j ``` -
+ + @@ -1836,15 +2008,18 @@ Given a record type and its upgrade, referred to respectively as `T-v1` and `T-v -
+ + ```text data T = T with x1 : T1 ... xn : Tn ``` -
-
+ + + + ```text data T = T with x1 : T1' @@ -1854,7 +2029,8 @@ data T = T with ... ym : Optional Um ``` -
+ + @@ -1876,15 +2052,18 @@ Given a variant type and its upgrade, referred to respectively as `V-v1` and `V- -
+ + ```text data V = = C1 T1 | ... | Cn Tn ``` -
-
+ + + + ```text data V = = C1 T1' @@ -1894,7 +2073,8 @@ data V = | ... | Dm Um ``` -
+ + @@ -1945,7 +2125,8 @@ Below the template on the right is a valid upgrade of the template on the left. In p-2.0.0: -
+ + ```text template T with @@ -1953,8 +2134,10 @@ template T where signatory sig ``` -
-
+ + + + ```text template T with @@ -1963,7 +2146,8 @@ template T where signatory sig, fromOptional [] additionalSig ``` -
+ + @@ -1989,7 +2173,8 @@ On the other hand, below, the template on the right is **not** a valid upgrade o In p-2.0.0: -
+ + ```text template T with @@ -1997,8 +2182,10 @@ template T where signatory sig ``` -
-
+ + + + ```text template T with @@ -2006,7 +2193,8 @@ template T where signatory sig, sig ``` -
+ + @@ -2028,7 +2216,8 @@ Below, the template on the right is **not** a valid upgrade of the template on t -
+ + ```text template T with @@ -2038,8 +2227,10 @@ template T signatory sig ensure n >= 0 ``` -
-
+ + + + ```text template T with @@ -2049,7 +2240,8 @@ template T signatory sig ensure n > 0 ``` -
+ + @@ -2083,7 +2275,8 @@ Below, the template on the right is a valid upgrade of the template on the left. In p-2.0.0: -
+ + ```text template T with @@ -2096,8 +2289,10 @@ template T view = IView i m = i ``` -
-
+ + + + ```text template T with @@ -2110,7 +2305,8 @@ template T view = IView (i+1) m = i ``` -
+ + diff --git a/docs-main/appdev/modules/m3-functional-programming.mdx b/docs-main/appdev/modules/m3-functional-programming.mdx index d3a9fde4c..e2f041e94 100644 --- a/docs-main/appdev/modules/m3-functional-programming.mdx +++ b/docs-main/appdev/modules/m3-functional-programming.mdx @@ -26,7 +26,7 @@ There is a project template `daml-intro-functional-101` for this chapter, but it ## The Haskell connection -The previous chapters of this introduction to Daml have mostly covered the structure of templates, and their connection to the `Daml Ledger Model `. The logic of what happens within the `do` blocks of choices has been kept relatively simple. In this chapter, we will dive deeper into Daml's expression language, the part that allows you to write logic inside those `do` blocks. But we can only scratch the surface here. Daml borrows a lot of its language from [Haskell](https://www.haskell.org). If you want to dive deeper, or learn about specific aspects of the language you can refer to standard literature on Haskell. Some recommendations: +The previous chapters of this introduction to Daml have mostly covered the structure of templates, and their connection to the *Daml Ledger Model*. The logic of what happens within the `do` blocks of choices has been kept relatively simple. In this chapter, we will dive deeper into Daml's expression language, the part that allows you to write logic inside those `do` blocks. But we can only scratch the surface here. Daml borrows a lot of its language from [Haskell](https://www.haskell.org). If you want to dive deeper, or learn about specific aspects of the language you can refer to standard literature on Haskell. Some recommendations: - [Finding Success and Failure in Haskell (Julie Moronuki, Chris Martin)](https://joyofhaskell.com/) - [Haskell Programming from first principles (Christopher Allen, Julie Moronuki)](http://haskellbook.com/) diff --git a/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx b/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx index d9df8d433..4c5993740 100644 --- a/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx +++ b/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx @@ -65,7 +65,7 @@ This section was copied from existing reviewed documentation. Reviewers: Skip this section. Remove markers after final approval. -It is possible to share the attribution of activity for the `FeaturedAppActivityMarker`. The `FeaturedAppRight_CreateActivityMarker` choice accepts a list of `AppRewardBeneficiary ` contracts. Then a `FeaturedAppActivityMarker` is created for each `beneficiary` with the `weight` field set appropriately. +It is possible to share the attribution of activity for the `FeaturedAppActivityMarker`. The `FeaturedAppRight_CreateActivityMarker` choice accepts a list of *AppRewardBeneficiary* contracts. Then a `FeaturedAppActivityMarker` is created for each `beneficiary` with the `weight` field set appropriately. {/* COPIED_END */} diff --git a/docs-main/appdev/modules/m7-security.mdx b/docs-main/appdev/modules/m7-security.mdx index 3bdc0fac3..298be1408 100644 --- a/docs-main/appdev/modules/m7-security.mdx +++ b/docs-main/appdev/modules/m7-security.mdx @@ -100,358 +100,7 @@ Validate all user input before it reaches the Ledger API. While Daml's type syst - [Package Management](/appdev/modules/m7-package-management) — Securing DAR distribution and deployment - [Performance](/appdev/modules/m7-performance) — Optimization strategies for Canton applications -{/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/observe/open-tracing.rst" hash="03a5f726" */} - - -This section was copied from existing reviewed documentation. -**Source:** `docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/observe/open-tracing.rst` -Reviewers: Skip this section. Remove markers after final approval. - - -# Open Tracing in Ledger API Client Applications - -## Introduction - -Distributed tracing is a technique used for troubleshooting performance issues in a microservices environment like Daml Enterprise. Tracing in Canton has been described in a page dedicated to monitoring (Canton Monitoring / Tracing). This guide describes how to write **Ledger API** client applications so that distributed traces and spans can seamlessly continue between the client and Canton software components. - -To study a **Ledger API** client application with OpenTelemetry support in detail, see this [example on GitHub](https://github.com/digital-asset/ex-java-bindings-with-opentelemetry). - -The example implements a variation of the already familiar `PingPong` application where every call to the **Ledger API** is decorated with an OpenTelemetry trace context and demonstrates how to retrieve the trace context from past transactions. - -To familiarize yourself with the broader topic of open tracing, consult the official pages of [the OpenTelemetry project](https://opentelemetry.io/). To find out more about open tracing in Java, the documentation on [Java OpenTelemetry instrumentation](https://opentelemetry.io/docs/instrumentation/java/) is an excellent source of references and examples. - -## Set Up an OpenTelemetry Environment - -To observe distributed tracing in action, you first need to start an OpenTelemetry backend server. Canton supports Jaeger, Zipkin, or OTLP formats. To start a Jaeger server you can use the following docker command: - - docker run --rm -it --name jaeger\ - -p 16686:16686 \ - -p 14250:14250 \ - jaegertracing/all-in-one:1.22.0 - -You also have to start Canton with OpenTelemetry exporting enabled. You can achieve it by defining a new `jaeger.conf` configuration file: - -``` -canton.monitoring.tracing.tracer.exporter { - type = jaeger - address = "localhost" // it's the default, so can be omitted - port = 14250 // it's the default, so can be omitted -} -``` - -Next, launch a small Canton installation combining the `jaeger.conf` into the configuration mix: - - bin/canton -c examples/01-simple-topology/simple-topology.conf -c jaeger.conf - -## Add Project Dependencies - -To use the OpenTelemetry libraries, add the following **Maven** dependencies to your project's `pom.xml`: - -```java - - io.opentelemetry - opentelemetry-api - 1.29.0 - - - io.opentelemetry - opentelemetry-exporter-jaeger - 1.29.0 - - - io.opentelemetry - opentelemetry-sdk - 1.29.0 - - - io.opentelemetry.instrumentation - opentelemetry-grpc-1.6 - 1.29.0-alpha - -``` - - -Replace the version number in each dependency with the version you want to use. To find available versions, check the [Maven Central Repository](https://search.maven.org/artifact/io.opentelemetry/opentelemetry-api). - - -## Initialize - -An application that wants to use OpenTelemetry must initialize a number of global controller objects that orchestrate different aspects of the distributed tracing process such as span creation, propagation, and export. The exact set of controllers needed may vary from application to application. You may draw some inspiration from the selection used in the example inside [the OpenTelemetryUtil.createOpenTelemetry method](https://github.com/digital-asset/ex-java-bindings-with-opentelemetry/blob/master/src/main/java/examples/pingpong/codegen/OpenTelemetryUtil.java). This is the minimum set required for a fully functional Jaeger trace reporting. - -The next step is to initialize the GRPCTelemetry controller, which is responsible for the propagation of the trace contexts inside the HTTP2 headers of the gRPC communication. - -The example wraps the necessary initialization steps in the constructor of the OpenTelemetryUtil class. All you have to do is call: - -```java -OpenTelemetryUtil openTelemetry = new OpenTelemetryUtil(APP_ID); -``` - -The GRPCTelemetry controller can construct client call interceptors that need to be mounted on top of the **Netty** channels used in the gRPC communication. The example provides a useful helper method called `withClientInterceptor` that injects an interceptor at the channel builder level: - -```java -ManagedChannel channel = openTelemetry.withClientInterceptor( - ManagedChannelBuilder - .forAddress(host, port) - .usePlaintext() - ) - .build(); -``` - -And with that, you are all set to start generating own spans, reporting them to the **Jaeger** server and also propagating them transparently to the **Ledger API**. - -## Start New Spans - -Before making a gRPC call, you must generate a new span to cover the multi-component interaction that is about to be initiated. The example provides a useful combinator called `runInNewSpan` that wraps the execution of an arbitrary function in a newly generated span: - -```java -public R runInNewSpan(String spanName, Supplier body) { - Span span = tracer.spanBuilder(spanName).startSpan(); - try(Scope ignored = span.makeCurrent()) { - return body.get(); - } finally { - span.end(); - } -} -``` - -You can use it on a command submission as follows: - -```java -openTelemetry.runInNewSpan("createInitialContracts", () -> submissionService.submit(request)); -``` - -The gRPC interceptors that were mounted at the initialization stage do the rest of the work behind the scenes making sure that the spans make it across to the Canton. - -## Continue Spans Across Different Applications - -Sometimes you may wish to continue the same span across multiple Daml transactions forming a single workflow. This may be especially interesting when different client application instances interact through the ledger and yet their entire conversation should be seen as a single coherent succession of spans. In that case, it is possible to extract the trace context associated with the past transactions from the Transaction, TransactionTree, or Completion records that are returned from the following **Ledger API** calls: - -- `UpdateService.GetUpdates` -- `UpdateService.GetUpdateTrees` -- `UpdateService.GetTransactionByOffset` -- `UpdateService.GetTransactionById` -- `UpdateService.GetTransactionTreeByOffset` -- `UpdateService.GetTransactionTreeById` -- `UpdateService.GetUpdateByOffset` -- `UpdateService.GetUpdateById` -- `CommandCompletionService.CompletionStream` - -You can extract the context by using a helper function implemented in the example: - -```java -Context extractedContext = openTelemetry.contextFromDamlTraceContext(tx.getTraceContext()); -``` - -The extracted context then has to be elevated to the status of the current context. Doing this allows the continuation of the original trace context into the present operation. Again the example provides a convenient combinator for that: - -```java -openTelemetry.runInOpenTelemetryScope(extractedContext, () -> ... ); -``` - -Finally, you generate a new span within the original context. You can use the already familiar `runInNewSpan` method: - -```java -openTelemetry.runInNewSpan("follow", () -> - submissionService.submit(SubmitRequest.toProto(ledgerId, commandsSubmission)) -) -``` - -## Put It All Together - -When the client applications follow the rules and pass the trace contexts without interruption, it becomes possible to witness the entire workflow as one long succession of spans in Jaeger UI. The span diagram collected while running the example application is shown below: - -
-images/jaegerPingSpans.png -
- -{/* COPIED_END */} - - -{/* COPIED_START source="docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/secure/authorization.rst" hash="04aa3ece" */} - - -This section was copied from existing reviewed documentation. -**Source:** `docs-website:docs/replicated/daml/3.4/sdk/sdlc-howtos/applications/secure/authorization.rst` -Reviewers: Skip this section. Remove markers after final approval. - - -# Authorization - -When developing Daml applications using SDK tools, your local setup will most likely not perform any Ledger API request authorization --by default, any valid Ledger API request will be accepted by the sandbox. - -This is not the case for participant nodes of deployed ledgers. For every Ledger API request, the participant node checks whether the request contains an access token that is valid and sufficient to authorize that request. You thus need to add support for authorization using access tokens to your application to run it against a deployed ledger. - - -In case of mutual (two-way) TLS authentication, the Ledger API client must present its certificate (in addition to an access token) to the Ledger API server as part of the authentication process. The provided certificate must be signed by a certificate authority (CA) trusted by the Ledger API server. Note that the identity of the application will not be proven by using this method, i.e. the `application_id` field in the request is not necessarily correlated with the CN (Common Name) in the certificate. - - -## Basic interaction - -Your Daml application sends requests to the Ledger API exposed by a participant node to submit changes to the ledger (e.g., "*exercise choice X on contract Y as party Alice*"), or to read data from the ledger (e.g., "*read all active contracts visible to party Alice*"). - -Whether a participant node *can* serve such a request depends on whether the participant node hosts the respective parties, and whether the request is valid according to the Daml Ledger Model. Whether a participant node *will* serve such a request to a Daml application depends on whether the request includes an access token that is valid and sufficient to authorize the request for this participant node. - -## Acquire and Use Access Tokens - -How an application acquires access tokens depends on the participant node it talks to and is ultimately set up by the participant node operator. Many setups use a flow in the style of [OAuth 2.0](https://oauth.net/2/). - -In this scenario, the Daml application first contacts a token issuer to get an access token. The token issuer verifies the identity of the requesting application, looks up the privileges of the application, and generates a signed access token describing those privileges. - -Once the access token is issued, the Daml application sends it along with every Ledger API request. The Daml ledger verifies: - -- that the token was issued by one of its trusted token issuers -- that the token has not been tampered with -- that the token has not expired -- that the privileges carried by the token authorize the request - -![A flowchart illustrating the process of authentication described in the two paragraphs immediately above.](./images/Authentication.svg) - -How you attach tokens to requests depends on the tool or library you use to interact with the Ledger API. See the tool's or library's documentation for more information. (E.g. relevant documentation to access the gRPC Ledger API using Java bindings and the JSON Ledger API.) - -## Access Token Formats - -Applications should treat access tokens as opaque blobs. However, as an application developer it can be helpful to understand the format of access tokens to debug problems. - -All Daml ledgers represent access tokens as [JSON Web Tokens (JWTs)](https://datatracker.ietf.org/doc/html/rfc7519). - - -To generate access tokens for testing purposes, you can use the [jwt.io](https://jwt.io/) web site. - - -## Access Tokens and Rights - -Access tokens contain information about the rights granted to the bearer of the token. These rights are specific to the API being accessed. - -The Ledger API uses the following rights to govern request authorization: - -- `public`: the right to retrieve publicly available information, such as the ledger identity -- `participant_admin`: the right to administer the participant node -- `idp_admin`: the right to administer the users and parties belonging the same identity provider configuration as the authenticated user -- `canReadAs(p)`: the right to read information off the ledger (like the active contracts) visible to the party `p` -- `canActsAs(p)`: same as `canReadAs(p)`, with the added right of issuing commands on behalf of the party `p` - -The following table summarizes the rights required to access each Ledger API endpoint: - -| Ledger API service | Endpoint | Required right | -|-------------------------------|--------------------------------------------------------------|------------------------------------------------------| -| StateService | GetActiveContracts | for each requested party p: canReadAs(p) | -| CommandCompletionService | CompletionEnd | public | -| | CompletionStream | for each requested party p: canReadAs(p) | -| CommandSubmissionService | Submit | for submitting party p: canActAs(p) | -| CommandService | All | for submitting party p: canActAs(p) | -| EventQueryService | All | for each requesting party p: canReadAs(p) | -| Health | All | no access token required for health checking | -| IdentityProviderConfigService | All | participant_admin | -| PackageService | All | public | -| PackageManagementService | All | participant_admin | -| PartyManagementService | All | participant_admin | -| | All (except GetParticipantId, UpdatePartyIdentityProviderId) | idp_admin | -| ParticipantPruningService | All | participant_admin | -| ServerReflection | All | no access token required for gRPC service reflection | -| TimeService | GetTime | public | -| | SetTime | participant_admin | -| UpdateService | LedgerEnd | public | -| | All (except LedgerEnd) | for each requested party p: canReadAs(p) | -| UserManagementService | All | participant_admin | -| | All (except UpdateUserIdentityProviderId) | idp_admin | -| | GetUser | authenticated users can get their own user | -| | ListUserRights | authenticated users can list their own rights | -| VersionService | All | public | - -## User Access Tokens - -A participant node stores a dynamic set of users as well as their rights. User access tokens encode such participant user on whose behalf the request is issued. - -When handling such requests, participant nodes look up the participant user's current rights before checking request authorization per the table above. Thus the rights granted to an application can be changed dynamically using the participant User Management Service *without* issuing new access tokens. - -User access tokens are [JWTs](https://datatracker.ietf.org/doc/html/rfc7519) that follow the [OAuth 2.0 standard](https://datatracker.ietf.org/doc/html/rfc6749). There are two different JSON encodings: An audience-based token format that relies on the audience field to specify that it is designated for a specific Daml participant and a scope-based token format which relies on the scope field to designate the purpose. Both formats can be used interchangeably but if possible, use of the audience-based token format, as it is compatible with a wider range of IAMs, e.g. Kubernetes does not support setting the scope field and makes the participant id mandatory which prevents misuse of a token on a different participant. - -### Audience-Based Tokens - -```json -{ - "aud": "https://daml.com/jwt/aud/participant/someParticipantId", - "sub": "someUserId", - "iss": "someIdpId", - "exp": 1300819380 -} -``` - -To interpret the above notation: - -- `aud` is a required field which restricts the token to participant nodes with the given ID (e.g. `someParticipantId`) -- `sub` is a required field which specifies the participant user's ID -- `iss` is a field which specifies the identity provider id -- `exp` is an optional field which specifies the JWT expiration date (in seconds since EPOCH) - -### Scope-Based Tokens - -```json -{ - "aud": "someParticipantId", - "sub": "someUserId", - "exp": 1300819380, - "iss": "someIdpId", - "scope": "daml_ledger_api" -} -``` - -To interpret the above notation: - -- `aud` is an optional field which restricts the token to participant nodes with the given ID -- `sub` is a required field which specifies the participant user's ID -- `iss` is a field which specifies the identity provider id -- `exp` is an optional field which specifies the JWT expiration date (in seconds since EPOCH) -- `scope` is a space-separated list of [OAuth 2.0 scopes](https://datatracker.ietf.org/doc/html/rfc6749#section-3.3) that must contain the `"daml_ledger_api"` scope - -### Requirements for User IDs - -User IDs must be non-empty strings of at most 128 characters that are either alphanumeric ASCII characters or one of the symbols "@^\$.!\`-#+'~\_\|:()". - -### Identity providers - -An identity provider configuration can be thought of as a set of participant users which: -- Have a defined way to verify their access tokens -- Can be administered in isolation from the rest of the users on the same participant node -- Have an identity provider id unique per participant node -- Have a related set of parties that share the same identity provider id - -A participant node always has a statically configured default identity provider configuration whose id is the empty string `""`. Additionally, you can configure a small number of non-default identity providers using `IdentityProviderConfigService` by supplying a non-empty identity provider id and a [JWK Set](https://datatracker.ietf.org/doc/html/rfc7517) URL which the participant node will use to retrieve the cryptographic data needed to verify the access tokens. - -When authenticating as a user from a non-default identity provider configuration, your access tokens must contain the `iss` field whose value matches the identity provider id. In case of the default identity provider configuration, the `iss` field can be empty or omitted from the access tokens. - -## Encoding and Signature - -Access tokens conforming to the JWT specification are embedded in a larger JSON structure with a separate header and payload. - -```json -{ - "alg": "RS256", - "typ": "JWT" -} -{ - "aud": "https://daml.com/jwt/aud/participant/someParticipantId", - "sub": "someUserId", - "iss": "someIdpId", - "exp": 1300819380 -} -``` - -Together they are then base64 encoded, forming the final token's stem. Subsequently, the stem is signed using the cryptographic algorithm identified in the header. The signature itself is also base64-encoded and appended to the stem. The resulting character string takes a shape similar to - -```bash -eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJodHRwczovL2RhbWwuY29tL2p3dC9hdWQvcGFydGljaXBhbnQvc29tZVBhcnRpY2lwYW50SWQiLCJzdWIiOiJzb21lVXNlcklkIiwiaXNzIjoic29tZUlkcElkIiwiZXhwIjoxMzAwODE5MzgwfQ.DLVPehRLt8WiddI6mwUU1lqIgRbysLK34mgkuzSDQTThCXlEY_S57SHKEQHw-Pai0Y0OeGP7wNsT6uq51vBVbRNfxOLwy5owQRm3LEeTbSXMjnnPVrtRrhelVQCsH2AcV4J4bbrAe6YfKGYFBXZOfeRL3Gy7KIplcfxDZekHdPD8lhwK8AkvAR4IaOX72Q5jhjB2yOY9IwpVxx-pN0vWCqmxTbQqnIpSGo185Y0f38nKZeofGT5jcJZaSv7z4Ks15gs9gm1pHorEL6TZLCbX7T064hQeTBFea-kxQlUkcfcgmUOMAmA05_4a8fdFz2uHq5km7ylp6pUITogN5MJ-_CVFEwOD0GveOgiUJBBMHDBjq_V_DfRE4nZ04tFQ0DDthWpMd0F59JFIhmjZSZT9DWppj6G7VBWpu9aIFPefyX--2U_aO0Smt_dBBV5A6pvbIgX6ITF2tjEvvOCLHtLKmNTlP8cclna70DCsDIrojNVDMFpLXYLvsP6DhQWkGaRb-nz0hLjQE_PtuQzSexrZG5d8tHFS351E2-aUVTKoJuEGHH3n1it-d9yHdt4fAynIbhWUVAervxc-oXyrA3-uafrxbIiQCpnw0kQ8K-HwJpkfz_Yqf-luI1FaRiPT9F-cYzwvceNf2_2hhmiuGiYp3rVIPwkFAuBc1vgpPiWSNLc -``` - -Note that access token generation in the correct format is typically delegated to the identity provider systems. Client application developers are unlikely to need to deal with it directly. - -## Token expiration - -JWT token-based authorization is inherently stateless, offering excellent scalability and eliminating the need for servers to manage client sessions or perform costly claim verification checks. However, this stateless nature means JWT tokens cannot be revoked. - -To mitigate the risk associated with token loss or theft, we *strongly recommend* to follow the standard practice for systems utilizing JWT tokens: configure the IAM system to issue short-lived tokens, ideally lasting between 5 and 15 minutes. This limits the time window during which unauthorized actors can access the system. - -Using long-lived tokens goes against best practices and risks a costly reconfiguration of your IAM token issuance mechanism should a token be compromised. A token loss may necessitate rotating the token signing key. This action invalidates all outstanding tokens through the JSON Web Key Set (JWKS) mechanism. Consult your IAM system's documentation for detailed strategies on mitigating JWT token theft. - -{/* COPIED_END */} +## Advanced Topics +- [Open Tracing in Ledger API Client Applications](/appdev/deep-dives/open-tracing) — Adding OpenTelemetry-based distributed tracing to applications using the Ledger API. +- [Authorization](/appdev/deep-dives/authorization) — Access tokens, identity providers, scopes, and rights for the Ledger API. diff --git a/docs-main/appdev/troubleshooting-guide/development-issues.mdx b/docs-main/appdev/troubleshooting-guide/development-issues.mdx index f95ee3886..7161e40f3 100644 --- a/docs-main/appdev/troubleshooting-guide/development-issues.mdx +++ b/docs-main/appdev/troubleshooting-guide/development-issues.mdx @@ -141,9 +141,7 @@ This is expected behavior in Canton's UTXO-based model. The fix depends on your If your transaction references a package the validator does not know about, you get: -``` -NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package -``` +``*NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package*`` Upload your DAR file before submitting transactions: diff --git a/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx b/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx index bc792e311..b24260135 100644 --- a/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx +++ b/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx @@ -73,9 +73,7 @@ INVALID_ARGUMENT: ### NOT_FOUND -``` -NOT_FOUND: CONTRACT_NOT_FOUND - Contract could not be found with id -``` +``*NOT_FOUND: CONTRACT_NOT_FOUND - Contract could not be found with id*`` **Cause:** The contract has already been archived (consumed by a previous exercise), or it was never visible to the submitting party. diff --git a/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx b/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx index c2a22d43d..ab0b7bbfc 100644 --- a/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx +++ b/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx @@ -21,9 +21,7 @@ UNAUTHENTICATED: Could not verify JWT token ### PACKAGE_NOT_FOUND -``` -NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package -``` +``*NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package*`` **Cause:** The validator does not have the DAR that contains the referenced package, or the package has not been vetted. diff --git a/docs-main/appdev/troubleshooting-guide/operational-issues.mdx b/docs-main/appdev/troubleshooting-guide/operational-issues.mdx index 24935cc84..45cc971bb 100644 --- a/docs-main/appdev/troubleshooting-guide/operational-issues.mdx +++ b/docs-main/appdev/troubleshooting-guide/operational-issues.mdx @@ -51,9 +51,7 @@ On MainNet, CC has real value. Monitor your balance and set up auto-top-up befor When you deploy a new version of your Daml package alongside an existing version, both must be uploaded and vetted on all validators that process your contracts. If validator A has package v2 but validator B only has v1, transactions involving both parties will fail. -``` -NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package -``` +``*NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package*`` **Fix:** Synchronize package deployment across all validators before submitting transactions that reference the new package. Use the SCU (Smart Contract Upgrade) mechanism so that existing contracts on v1 can be exercised with v2 code. diff --git a/docs-main/docs.json b/docs-main/docs.json index c7f5e4a5e..24c98fc9d 100644 --- a/docs-main/docs.json +++ b/docs-main/docs.json @@ -475,6 +475,8 @@ "appdev/deep-dives/explicit-contract-disclosure", "appdev/deep-dives/smart-contract-upgrading-reference", "appdev/deep-dives/values-in-the-ledger-api", + "appdev/deep-dives/open-tracing", + "appdev/deep-dives/authorization", { "group": "QuickStart", "pages": [ @@ -648,6 +650,8 @@ "appdev/deep-dives/explicit-contract-disclosure", "appdev/deep-dives/smart-contract-upgrading-reference", "appdev/deep-dives/values-in-the-ledger-api", + "appdev/deep-dives/open-tracing", + "appdev/deep-dives/authorization", { "group": "QuickStart", "pages": [ @@ -821,6 +825,8 @@ "appdev/deep-dives/explicit-contract-disclosure", "appdev/deep-dives/smart-contract-upgrading-reference", "appdev/deep-dives/values-in-the-ledger-api", + "appdev/deep-dives/open-tracing", + "appdev/deep-dives/authorization", { "group": "QuickStart", "pages": [ diff --git a/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx b/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx index dd01fcc31..73dd20041 100644 --- a/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx +++ b/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx @@ -11,11 +11,9 @@ This section was copied from existing reviewed documentation. Reviewers: Skip this section. Remove markers after final approval. -orphan - - +
change the section where we provision smart contract code: - create a new empty project - use the "Understanding IOUs" section to explain the structure of a daml contract (`https://docs.daml.com/app-dev/bindings-java/quickstart.html`) - transact on the IOU contract using curl and JSON Ledger API, not via console commands - +
# Getting Started diff --git a/docs-main/global-synchronizer/deployment/configuration.mdx b/docs-main/global-synchronizer/deployment/configuration.mdx index 95211aa25..802bc7145 100644 --- a/docs-main/global-synchronizer/deployment/configuration.mdx +++ b/docs-main/global-synchronizer/deployment/configuration.mdx @@ -87,13 +87,12 @@ You do not need to manually configure sequencer URLs — the onboarding process **Docker Compose** uses environment variables: -```bash +``*bash # In .env or compose.yaml environment section POSTGRES_HOST=localhost POSTGRES_PORT=5432 POSTGRES_USER=canton -POSTGRES_PASSWORD= -``` +POSTGRES_PASSWORD=*`` **Kubernetes** uses Helm values: diff --git a/docs-main/global-synchronizer/deployment/onboarding-process.mdx b/docs-main/global-synchronizer/deployment/onboarding-process.mdx index 041ec7eeb..a11bc3db4 100644 --- a/docs-main/global-synchronizer/deployment/onboarding-process.mdx +++ b/docs-main/global-synchronizer/deployment/onboarding-process.mdx @@ -72,9 +72,9 @@ At this point this can also be accomplished by connecting your validator through 3. If you want to access the Canton Coin Scan Web UI from your laptop, you also need to ensure that you can connect to a VPN operated by one of the SVs. This is required as laptops usually do not have static IP addresses and the Scan web UI is not (yet) fully public. If you can use your validator egress IP also for browsing the web UI this is not necessary. -4. Request an onboarding secret from your SV sponsor. On DevNet, you can do this yourself through an API call (refer to `Deployment instructions ` for details). On TestNet and MainNet your SV sponsor needs to provide you with this manually. Note that onboarding secrets are only valid for 48 hours and are one-time use, and self-generated DevNet secrets are only valid for 1 hour. If it expired, you need to request a new one. +4. Request an onboarding secret from your SV sponsor. On DevNet, you can do this yourself through an API call (refer to *Deployment instructions* for details). On TestNet and MainNet your SV sponsor needs to provide you with this manually. Note that onboarding secrets are only valid for 48 hours and are one-time use, and self-generated DevNet secrets are only valid for 1 hour. If it expired, you need to request a new one. -5. Deploy your node either using docker compose or Kubernetes. Refer to the `Deployment Options ` for information on how to choose between them and references to each of the two approaches. You will need to make sure that all IP traffic going from your validator to the SVs uses the egress IP you provided to your SV sponsor and you need to provide the onboarding secret. +5. Deploy your node either using docker compose or Kubernetes. Refer to the *Deployment Options* for information on how to choose between them and references to each of the two approaches. You will need to make sure that all IP traffic going from your validator to the SVs uses the egress IP you provided to your SV sponsor and you need to provide the onboarding secret. {/* COPIED_END */} diff --git a/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync.mdx b/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync.mdx index 485be08f8..e644843fa 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/linking-validator-multi-sync.mdx @@ -146,7 +146,7 @@ To connect a Participant Node to a Sequencer by reference (local or remote refer synchronizerAlias : String = "mysynchronizer" ``` -2\. Set Optional Sequencer Connection Validation. This parameter determines how thoroughly the provided Sequencer connections are validated before being persisted. +2. Set Optional Sequencer Connection Validation. This parameter determines how thoroughly the provided Sequencer connections are validated before being persisted. ``` none @ val sequencerConnectionValidation = com.digitalasset.canton.sequencing.SequencerConnectionValidation.Active @@ -172,7 +172,7 @@ To connect to a remote Sequencer: sequencerUrl : String = "https://127.0.0.1:30147" ``` -2\. Provide custom certificates (if necessary). If the Sequencer uses TLS certificates that cannot be automatically validated using your JVM's trust store (for example, self-signed certificates), you have to provide a custom certificate such that the client can verify the Sequencer's public API TLS certificate. The operate must trust the custom root CA. Let's assume that this root certificate is given by: +2. Provide custom certificates (if necessary). If the Sequencer uses TLS certificates that cannot be automatically validated using your JVM's trust store (for example, self-signed certificates), you have to provide a custom certificate such that the client can verify the Sequencer's public API TLS certificate. The operate must trust the custom root CA. Let's assume that this root certificate is given by: ``` none @ val certificatesPath = "tls/root-ca.crt" @@ -516,7 +516,7 @@ You can modify the Sequencer liveness margin to control the resilience to faulty ### Update request amplification -1\. Configure submission request amplification. Amplification makes Sequencer clients send eligible submission requests to multiple Sequencers to overcome message loss in faulty Sequencers. +1. Configure submission request amplification. Amplification makes Sequencer clients send eligible submission requests to multiple Sequencers to overcome message loss in faulty Sequencers. ``` none @ participantReference.synchronizers.modify("mysynchronizer", _.withSubmissionRequestAmplification(SubmissionRequestAmplification.NoAmplification)) @@ -589,7 +589,7 @@ For Mediators, update the certificate settings using a similar approach. res30: Boolean = true ``` -4\. To reconnect all Synchronizers that are not configured to require a manual connection, use the following command: +4. To reconnect all Synchronizers that are not configured to require a manual connection, use the following command: ``` none @ participantReference.synchronizers.reconnect_all() diff --git a/docs-main/global-synchronizer/extension-synchronizers/synchronizer-operations.mdx b/docs-main/global-synchronizer/extension-synchronizers/synchronizer-operations.mdx index 109a5fcc0..fd75aeedf 100644 --- a/docs-main/global-synchronizer/extension-synchronizers/synchronizer-operations.mdx +++ b/docs-main/global-synchronizer/extension-synchronizers/synchronizer-operations.mdx @@ -81,7 +81,9 @@ This how-to uses two Sequencer nodes and two Mediator nodes. The Sequencer nodes This diagram illustrates the exchange of information between the operators: -image +
+ Sequence diagram of information exchange between Synchronizer operators during decentralized bootstrap +
Before proceeding, ensure that all of the nodes in the decentralized Synchronizer are started. diff --git a/docs-main/global-synchronizer/production-operations/kms-operations.mdx b/docs-main/global-synchronizer/production-operations/kms-operations.mdx index 768e58bb4..6e28fd8bb 100644 --- a/docs-main/global-synchronizer/production-operations/kms-operations.mdx +++ b/docs-main/global-synchronizer/production-operations/kms-operations.mdx @@ -534,10 +534,8 @@ Canton allows integration with a variety of KMS and HSM solutions through a KMS Configuring Canton to run with a KMS Driver is done similarly to other KMS providers by specifying: -``` -type = driver -name = -``` +``*type = driver +name =*`` For example, for a Participant named \`participant1\`: diff --git a/docs-main/global-synchronizer/production-operations/logging.mdx b/docs-main/global-synchronizer/production-operations/logging.mdx index c1ae7aa99..ffd3026c3 100644 --- a/docs-main/global-synchronizer/production-operations/logging.mdx +++ b/docs-main/global-synchronizer/production-operations/logging.mdx @@ -31,7 +31,7 @@ We recommend to use `lnav` to read the logs. A guideline is provided in [this do Logging in kubernetes (note that this only provides logs for a limited timeframe): -- `kubectl describe pod ` to get a detailed status of the given pod, +- *kubectl describe pod* to get a detailed status of the given pod, - `kubectl logs -n ` or `kubectl logs -l app= -n --tail=-1` to get logs for a given pod in a given namespace. diff --git a/docs-main/global-synchronizer/production-operations/party-management.mdx b/docs-main/global-synchronizer/production-operations/party-management.mdx index 6a49e5de1..4e8f718fb 100644 --- a/docs-main/global-synchronizer/production-operations/party-management.mdx +++ b/docs-main/global-synchronizer/production-operations/party-management.mdx @@ -25,7 +25,7 @@ The following section explains how to onboard a (local) party. Refer to the foll If you have access to the ledger API, you can onboard a new party using the `parties` command. This command is simply a wrapper around the underlying Ledger API endpoints. For more information, see the Ledger API documentation. -1\. Define a name for the Party. You can choose the name freely, but it must conform to the following format: `\[a-zA-Z0-9:-\_ \]`, must not exceed 185 characters, must not use two consecutive colons, and must be unique in the namespace. +1. Define a name for the Party. You can choose the name freely, but it must conform to the following format: `\[a-zA-Z0-9:-\_ \]`, must not exceed 185 characters, must not use two consecutive colons, and must be unique in the namespace. For example, we want to host the Party `bob`. @@ -34,14 +34,14 @@ For example, we want to host the Party `bob`. bob : String = "bob" ``` -2\. Specify an optional Synchronizer ID to which the party should be allocated. The participant must be connected to this Synchronizer. You may omit this parameter if the participant is connected to only one Synchronizer, otherwise the party needs to be enabled on each synchronizer explicitly. +2. Specify an optional Synchronizer ID to which the party should be allocated. The participant must be connected to this Synchronizer. You may omit this parameter if the participant is connected to only one Synchronizer, otherwise the party needs to be enabled on each synchronizer explicitly. ``` none @ val synchronizerId = participant1.synchronizers.id_of("my-synchronizer") synchronizerId : SynchronizerId = da::122032922613... ``` -3\. Define optional annotations. These are key-value pairs associated with this party and stored locally on this Ledger API server. Annotations are useful for maintaining metadata about allocated parties. +3. Define optional annotations. These are key-value pairs associated with this party and stored locally on this Ledger API server. Annotations are useful for maintaining metadata about allocated parties. ``` none @ val annotations = Map("k1" -> "v1", "k2" -> "v2", "k3" -> "v3") @@ -143,7 +143,7 @@ To find a party, you can use the `list` command. If you need finer control when allocating a party, use the Admin API. To onboard a new party to a Participant Node, follow these steps: -1\. Define a name for the Party (same rules as explained above). For example, we want to host the Party `alice`. +1. Define a name for the Party (same rules as explained above). For example, we want to host the Party `alice`. ``` none @ val alice = "alice" @@ -154,7 +154,7 @@ If you need finer control when allocating a party, use the Admin API. To onboard For more information on namespaces, refer to the Namespaces documentation. -3\. Specify an optional Synchronizer alias to which the party should be allocated. The participant must be connected to this Synchronizer. You may omit this parameter if the participant is connected to only one Synchronizer, otherwise the party needs to be enabled on each synchronizer explicitly. +3. Specify an optional Synchronizer alias to which the party should be allocated. The participant must be connected to this Synchronizer. You may omit this parameter if the participant is connected to only one Synchronizer, otherwise the party needs to be enabled on each synchronizer explicitly. 4. Enable the Party on this participant diff --git a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx index 049478e38..6977318e6 100644 --- a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx +++ b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx @@ -133,7 +133,7 @@ Reviewers: Skip this section. Remove markers after final approval. 1. Confirm migration dump: `docker compose logs validator | grep "Wrote domain migration dump"` 2. Stop the validator: `./stop.sh` 3. Update the bundle and `IMAGE_TAG` if needed -4. Restart with incremented migration ID (`-m `) and the `-M` flag to trigger migration +4. Restart with incremented migration ID (*-m*) and the `-M` flag to trigger migration 5. After successful migration: restart without `-M`, keep the new migration ID {/* COPIED_END */} diff --git a/docs-main/global-synchronizer/reference/error-codes.mdx b/docs-main/global-synchronizer/reference/error-codes.mdx index 95aa5f552..f3c6d807a 100644 --- a/docs-main/global-synchronizer/reference/error-codes.mdx +++ b/docs-main/global-synchronizer/reference/error-codes.mdx @@ -33,7 +33,7 @@ Error Categories The error categories allow you to group errors such that application logic can be built to automatically deal with errors and decide whether to retry a request or escalate to the operator. -A full list of error categories is documented `here `. +A full list of error categories is documented *here*. ## Machine Readable Information diff --git a/docs-main/global-synchronizer/reference/observability-configuration.mdx b/docs-main/global-synchronizer/reference/observability-configuration.mdx index 23559f371..46a109100 100644 --- a/docs-main/global-synchronizer/reference/observability-configuration.mdx +++ b/docs-main/global-synchronizer/reference/observability-configuration.mdx @@ -141,2835 +141,7 @@ In addition to Canton metrics, the process can also report Daml metrics (of the ## Metrics -The following sections contain the common metrics exposed for Daml services supporting a Prometheus metrics reporter. - -For the metric types referenced below, see the relevant Prometheus documentation. - -### Participant Metrics - -#### daml.cache.evicted_weight - -> - **Summary**: The sum of weights of cache entries evicted. -> - **Description**: The total weight of the entries evicted from the cache. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.cache.evictions - -> - **Summary**: The number of the evicted cache entries. -> - **Description**: When an entry is evicted from the cache, the counter is incremented. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.cache.hits - -> - **Summary**: The number of cache hits. -> - **Description**: When a cache lookup encounters an existing cache entry, the counter is incremented. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.cache.misses - -> - **Summary**: The number of cache misses. -> - **Description**: When a cache lookup first encounters a missing cache entry, the counter is incremented. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.db-storage.general.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.db-storage.general.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.db-storage.write.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.db-storage.write.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db.commit\* - -> - **Summary**: The time needed to perform the SQL query commit. -> -> - **Description**: This metric measures the time it takes to commit an SQL transaction relating to the \. It roughly corresponds to calling `commit()` on a DB connection. -> -> - **Type**: timer -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **name**: The operation/pool for which the metric is registered. - -#### daml.db.compression\* - -> - **Summary**: The time needed to decompress the SQL query result. -> -> - **Description**: Some index database queries that target contracts involve a decompression step. For such queries this metric represents the time it takes to decompress contract arguments retrieved from the database. -> -> - **Type**: timer -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **name**: The operation/pool for which the metric is registered. - -#### daml.db.exec\* - -> - **Summary**: The time needed to run the SQL query and read the result. -> -> - **Description**: This metric encompasses the time measured by `query` and `commit` metrics. Additionally it includes the time needed to obtain the DB connection, optionally roll it back and close the connection at the end. -> -> - **Type**: timer -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **name**: The operation/pool for which the metric is registered. - -#### daml.db.query\* - -> - **Summary**: The time needed to run the SQL query. -> -> - **Description**: This metric measures the time it takes to execute a block of code (on a dedicated executor) related to the \ that can issue multiple SQL statements such that all run in a single DB transaction (either committed or aborted). -> -> - **Type**: timer -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **name**: The operation/pool for which the metric is registered. - -#### daml.db.translation\* - -> - **Summary**: The time needed to turn serialized Daml-LF values into in-memory objects. -> -> - **Description**: Some index database queries that target contracts and transactions involve a Daml-LF translation step. For such queries this metric stands for the time it takes to turn the serialized Daml-LF values into in-memory representation. -> -> - **Type**: timer -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **name**: The operation/pool for which the metric is registered. - -#### daml.db.wait\* - -> - **Summary**: The time needed to acquire a connection to the database. -> -> - **Description**: SQL statements are run in a dedicated executor. This metric measures the time it takes between creating the SQL statement corresponding to the \ and the point when it starts running on the dedicated executor. -> -> - **Type**: timer -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **name**: The operation/pool for which the metric is registered. - -#### daml.grpc.server - -> - **Summary**: Distribution of the durations of serving gRPC requests. -> - **Description**: -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.grpc.server.handled - -> - **Summary**: Total number of handled gRPC requests. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.received - -> - **Summary**: Total number of gRPC messages received (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.received.bytes - -> - **Summary**: Distribution of payload sizes in gRPC messages received (both unary and streaming). -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.sent - -> - **Summary**: Total number of gRPC messages sent (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.sent.bytes - -> - **Summary**: Distribution of payload sizes in gRPC messages sent (both unary and streaming). -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.grpc.server.requests.rejections\* - -> - **Summary**: Number of rejected requests due to active request limits. -> -> - **Description**: Counts the number of requests rejected because the active request limit was reached. -> -> - **Type**: counter -> -> - **Qualification**: Saturation -> -> - **Labels**: -> - **method**: The method / service name limited. -> - **service**: The API the method belongs to -> - **api**: The API the method belongs to - -#### daml.grpc.server.started - -> - **Summary**: Total number of started gRPC requests (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.http.requests - -> - **Summary**: Total number of HTTP requests received. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.http.requests - -> - **Summary**: The duration of the HTTP requests. -> - **Description**: -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.http.requests.payload.bytes - -> - **Summary**: Distribution of the sizes of payloads received in HTTP requests. -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.http.responses.payload.bytes - -> - **Summary**: Distribution of the sizes of payloads sent in HTTP responses. -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.http.websocket.messages.received - -> - **Summary**: Total number of received WebSocket messages. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.http.websocket.messages.received.bytes - -> - **Summary**: Distribution of the size of received WebSocket messages. -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.http.websocket.messages.sent - -> - **Summary**: Total number of sent WebSocket messages. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.http.websocket.messages.sent.bytes - -> - **Summary**: Distribution of the size of sent WebSocket messages. -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.api.commands.delayed_submissions - -> - **Summary**: The number of the delayed Daml commands. -> - **Description**: The number of Daml commands that have been delayed internally because they have been evaluated to require the ledger time further in the future than the expected latency. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.commands.failed_command_interpretations - -> - **Summary**: The number of Daml commands that failed in interpretation. -> - **Description**: The number of Daml commands that have been rejected by the interpreter (e.g. badly authorized action). -> - **Type**: meter -> - **Qualification**: Errors - -#### daml.participant.api.commands.interactive_prepares - -> - **Summary**: The time to prepare a transaction for interactive submission. -> - **Description**: The time to validate and interpret a command before it is returned to the caller for external signing. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.participant.api.commands.max_in_flight_capacity - -> - **Summary**: The maximum number of Daml commands that can await completion. -> - **Description**: The maximum number of Daml commands that can await completion in the Command Service. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.commands.max_in_flight_length - -> - **Summary**: The number of the Daml commands awaiting completion. -> - **Description**: The number of the currently Daml commands awaiting completion in the Command Service. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.commands.prepares_running - -> - **Summary**: The number of the Daml commands for which transactions are currently being prepared by the ledger api server. -> - **Description**: The number of the Daml commands that are currently being prepared by the ledger api server (including validation, interpretation). -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.participant.api.commands.reassignment_validation - -> - **Summary**: The time to validate a reassignment command. -> - **Description**: The time to validate a submitted Daml command before is fed to the interpreter. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.commands.submissions - -> - **Summary**: The time to fully process a Daml command. -> - **Description**: The time to validate and interpret a command before it is handed over to the synchronization services to be finalized (either committed or rejected). -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.participant.api.commands.submissions_running - -> - **Summary**: The number of the Daml commands that are currently being handled by the ledger api server. -> - **Description**: The number of the Daml commands that are currently being handled by the ledger api server (including validation, interpretation, and handing the transaction over to the synchronization services). -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.participant.api.commands.valid_submissions - -> - **Summary**: The total number of the valid Daml commands. -> - **Description**: The total number of the Daml commands that have passed validation and were sent to interpretation in this ledger api server process. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.commands.validation - -> - **Summary**: The time to validate a Daml command. -> - **Description**: The time to validate a submitted Daml command before is fed to the interpreter. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.contract_store.lookup_batched - -> - **Summary**: The time to execute batched contract lookup. -> - **Description**: The time to enqueue and execute batched contract lookup. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.contract_store.lookup_batched_contract_ids - -> - **Summary**: The time to execute batched contract id lookup. -> - **Description**: The time to enqueue and execute batched contract id lookup. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.contract_store.lookup_batched_internal_ids - -> - **Summary**: The time to execute batched internal id lookup. -> - **Description**: The time to enqueue and execute batched internal id lookup. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.contract_store.lookup_persisted - -> - **Summary**: The time to lookup persisted contract by LF contract id. -> - **Description**: The time to enqueue and execute the lookup for persisted contract by LF contract id. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.cache.contract_state.register_update - -> - **Summary**: The time spent to update the contract state cache. -> - **Description**: The total time spent in sequential update steps of the contract state caches updating logic. This metric is created with debugging purposes in mind. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.cache.key_state.register_update - -> - **Summary**: The time spent to update the key state cache. -> - **Description**: The total time spent in sequential update steps of the key state caches updating logic. This metric is created with debugging purposes in mind. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.engine - -> - **Summary**: The time spent executing a Daml command. -> - **Description**: The time spent by the Daml engine executing a Daml command (excluding fetching data). -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.engine_running - -> - **Summary**: The number of Daml commands currently being executed. -> - **Description**: The number of the commands that are currently being executed by the Daml engine (excluding fetching data). -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.execution.get_lf_package - -> - **Summary**: The time to fetch individual Daml code packages during interpretation. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple Daml packages. This metric exposes the time needed to fetch the packages that are necessary for interpretation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.lookup_active_contract - -> - **Summary**: The time to lookup individual active contracts during interpretation. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple active contracts. This metric exposes the time to lookup individual active contracts. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.lookup_active_contract_count_per_execution - -> - **Summary**: The number of the active contracts looked up per Daml command. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple active contracts. This metric exposes the number of active contracts that must be looked up to process a Daml command. -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.api.execution.lookup_active_contract_per_execution - -> - **Summary**: The compound time to lookup all active contracts in a single Daml command. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple active contracts. This metric exposes the compound time to lookup all the active contracts in a single Daml command. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.lookup_contract_key - -> - **Summary**: The time to lookup individual contract keys during interpretation. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple contract keys. This metric exposes the time needed to lookup individual contract keys. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.lookup_contract_key_count_per_execution - -> - **Summary**: The number of contract keys looked up per Daml command. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple contract keys. This metric exposes the number of contract keys that must be looked up to process a Daml command. -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.api.execution.lookup_contract_key_per_execution - -> - **Summary**: The compound time to lookup all contract keys in a single Daml command. -> - **Description**: The interpretation of a command in the ledger api server might require fetching multiple contract keys. This metric exposes the compound time needed to lookup all the contract keys in a single Daml command. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.retry - -> - **Summary**: The number of the interpretation retries. -> - **Description**: The total number of interpretation retries attempted due to mismatching ledger effective time in this ledger api server process. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.execution.total - -> - **Summary**: The overall time spent interpreting a Daml command. -> - **Description**: The time spent interpreting a Daml command in the ledger api server (includes executing Daml and fetching data). -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.execution.total_running - -> - **Summary**: The number of Daml commands currently being interpreted. -> - **Description**: The number of the commands that are currently being interpreted (includes executing Daml code and fetching data). -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.active_contracts_buffer_size - -> - **Summary**: The buffer size for active contracts requests. -> - **Description**: An Pekko stream buffer is added at the end of all streaming queries, allowing to absorb temporary downstream backpressure (e.g. when the client is slower than upstream delivery throughput). This metric gauges the size of the buffer for queries requesting active contracts that transactions satisfying a given predicate. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.completions_buffer_size - -> - **Summary**: The buffer size for completions requests. -> - **Description**: An Pekko stream buffer is added at the end of all streaming queries, allowing to absorb temporary downstream backpressure (e.g. when the client is slower than upstream delivery throughput). This metric gauges the size of the buffer for queries requesting the completed commands in a specific period of time. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_keys_lookup.batch.batch_size - -> - **Summary**: The batch sizes in the lookup batch-loading Contract Service. -> - **Description**: The number of lookups contained in a batch, used in the batch-loading Contract Service. -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_keys_lookup.batch.buffer_capacity - -> - **Summary**: The capacity of the lookup queue. -> - **Description**: The maximum number of elements that can be kept in the queue of lookups in the batch-loading queue of the Contract Service. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_keys_lookup.batch.buffer_delay - -> - **Summary**: The queuing delay for the lookup queue. -> - **Description**: The queuing delay for the pending lookups in the batch-loading queue of the Contract Service. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_keys_lookup.batch.buffer_length - -> - **Summary**: The number of the currently pending lookups. -> - **Description**: The number of the currently pending lookups in the batch-loading queue of the Contract Service. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_lookup.batch.batch_size - -> - **Summary**: The batch sizes in the lookup batch-loading Contract Service. -> - **Description**: The number of lookups contained in a batch, used in the batch-loading Contract Service. -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_lookup.batch.buffer_capacity - -> - **Summary**: The capacity of the lookup queue. -> - **Description**: The maximum number of elements that can be kept in the queue of lookups in the batch-loading queue of the Contract Service. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_lookup.batch.buffer_delay - -> - **Summary**: The queuing delay for the lookup queue. -> - **Description**: The queuing delay for the pending lookups in the batch-loading queue of the Contract Service. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.index.db.active_contract_lookup.batch.buffer_length - -> - **Summary**: The number of the currently pending lookups. -> - **Description**: The number of the currently pending lookups in the batch-loading queue of the Contract Service. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.db.flat_transactions_stream.translation - -> - **Summary**: The time needed to turn serialized Daml-LF values into in-memory objects. -> - **Description**: Some index database queries that target contracts and transactions involve a Daml-LF translation step. For such queries this metric stands for the time it takes to turn the serialized Daml-LF values into in-memory representation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.index.db.lookup_active_contract - -> - **Summary**: The time spent fetching a contract using its id. -> - **Description**: This metric exposes the time spent fetching a contract using its id from the index db. It is then used by the Daml interpreter when evaluating a command into a transaction. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.index.db.lookup_key - -> - **Summary**: The time spent looking up a contract using its key. -> - **Description**: This metric exposes the time spent looking up a contract using its key in the index db. It is then used by the Daml interpreter when evaluating a command into a transaction. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.index.db.tree_transactions_stream.translation - -> - **Summary**: The time needed to turn serialized Daml-LF values into in-memory objects. -> - **Description**: Some index database queries that target contracts and transactions involve a Daml-LF translation step. For such queries this metric stands for the time it takes to turn the serialized Daml-LF values into in-memory representation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.index.ledger_end_sequential_id - -> - **Summary**: The sequential id of the current ledger end kept in memory. -> - **Description**: The ledger end's sequential id is a monotonically increasing integer value representing the sequential id ascribed to the most recent ledger event ingested by the index db. Please note, that only a subset of all ledger events are ingested and given a sequential id. These are: creates, consuming exercises, non-consuming exercises and divulgence events. This value can be treated as a counter of all such events visible to a given participant. This metric exposes the latest ledger end's sequential id registered in the in-memory data set. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.participant.api.index.transaction_trees_buffer_size - -> - **Summary**: The buffer size for transaction trees requests. -> - **Description**: An Pekko stream buffer is added at the end of all streaming queries, allowing to absorb temporary downstream backpressure (e.g. when the client is slower than upstream delivery throughput). This metric gauges the size of the buffer for queries requesting transaction trees. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.index.updates_buffer_size - -> - **Summary**: The buffer size for streaming updates requests. -> - **Description**: An Pekko stream buffer is added at the end of all streaming queries, allowing to absorb temporary downstream backpressure (e.g. when the client is slower than upstream delivery throughput). This metric gauges the size of the buffer for queries requesting updates in a specific period of time that satisfy a given predicate. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.indexer.deactivation_distances - -> - **Summary**: Event sequence id distances between activations and deactivations. -> - **Description**: Histogram to collect the statistics of how long individual contracts lived. -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.api.indexer.events\* - -> - **Summary**: Number of ledger events processed. -> -> - **Description**: Represents the total number of ledger events processed (transactions, reassignments, party allocations). -> -> - **Type**: meter -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **participant_id**: The id of the participant. -> - **user_id**: The user generating the events. -> - **event_type**: The type of ledger event processed (transaction, reassignment, party_allocation). -> - **status**: Indicates if the event was accepted or not. Possible values accepted\|rejected. - -#### daml.participant.api.indexer.indexer_queue_blocked - -> - **Summary**: The amount of blocked enqueue operations for the indexer queue. -> - **Description**: Indexer queue exerts backpressure by blocking asynchronous enqueue operations. This meter measures the amount of such blocked operations, signalling backpressure materializing from downstream. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.indexer.indexer_queue_buffered - -> - **Summary**: The size of the buffer before the indexer. -> - **Description**: This buffer is located before the indexer, increasing amount signals backpressure mounting. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.indexer.indexer_queue_uncommitted - -> - **Summary**: The amount of entries which are uncommitted for the indexer. -> - **Description**: Uncommitted entries contain all blocked, buffered and submitted, but not yet committed entries. This amount signals the momentum of stream processing, and has a theoretical maximum defined by all the queue perameters. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.indexer.ledger_end_sequential_id - -> - **Summary**: The sequential id of the current ledger end kept in the database. -> - **Description**: The ledger end's sequential id is a monotonically increasing integer value representing the sequential id ascribed to the most recent ledger event ingested by the index db. Please note, that only a subset of all ledger events are ingested and given a sequential id. These are: creates, consuming exercises, non-consuming exercises and divulgence events. This value can be treated as a counter of all such events visible to a given participant. This metric exposes the latest ledger end's sequential id registered in the database. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.participant.api.indexer.metered_events\* - -> - **Summary**: Number of individual ledger events (create, exercise, archive). -> -> - **Description**: Represents the number of individual ledger events constituting a transaction. -> -> - **Type**: meter -> -> - **Qualification**: Debug -> -> - **Labels**: -> - **participant_id**: The id of the participant. -> - **user_id**: The user generating the events. - -#### daml.participant.api.indexer.output_batched_buffer_length - -> - **Summary**: The size of the queue between the indexer and the in-memory state updating flow. -> - **Description**: This counter counts batches of updates passed to the in-memory flow. Batches are dynamically-sized based on amount of backpressure exerted by the downstream stages of the flow. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.indexer.updates - -> - **Summary**: The number of the state updates persisted to the database. -> - **Description**: The number of the state updates persisted to the database. There are updates such as accepted transactions, configuration changes, party allocations, rejections, etc, but they also include synthetic events when the node learned about the sequencer clock advancing without any actual ledger event such as due to submission receipts or time proofs. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.participant.api.lapi.streams.acs_sent - -> - **Summary**: The number of the active contracts sent by the ledger api. -> - **Description**: The total number of active contracts sent over the ledger api streams to all clients. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.participant.api.lapi.streams.completions_sent - -> - **Summary**: The number of the command completions sent by the ledger api. -> - **Description**: The total number of completions sent over the ledger api streams to all clients. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.participant.api.lapi.streams.transaction_trees_sent - -> - **Summary**: The number of the transaction trees sent over the ledger api. -> - **Description**: The total number of the transaction trees sent over the ledger api streams to all clients. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.participant.api.lapi.streams.updates_sent - -> - **Summary**: The number of the flat updates sent over the ledger api. -> - **Description**: The total number of the flat updates sent over the ledger api streams to all clients. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.participant.api.services.current_ledger_end - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_active_contracts - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_completions - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_events_by_contract_id - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_lf_archive - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_participant_id - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_parties - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_update_by_id - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.get_update_by_offset - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.in_memory_fan_out_buffer.prune - -> - **Summary**: The time to remove all elements from the in-memory fan-out buffer. -> - **Description**: It is possible to remove the oldest entries of the in-memory fan out buffer. This metric exposes the time needed to prune the buffer. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.in_memory_fan_out_buffer.push - -> - **Summary**: The time to add a new event into the buffer. -> - **Description**: The in-memory fan-out buffer is a buffer that stores the last ingested maxBufferSize accepted and rejected submission updates as TransactionLogUpdate. It allows bypassing IndexDB persistence fetches for recent updates for flat and transaction tree streams, command completion streams and by-event-id and by-transaction-id flat and transaction tree lookups. This metric exposes the time spent on adding a new event into the buffer. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.in_memory_fan_out_buffer.size - -> - **Summary**: The size of the in-memory fan-out buffer. -> - **Description**: The actual size of the in-memory fan-out buffer. This metric is mostly targeted for debugging purposes. -> - **Type**: histogram -> - **Qualification**: Saturation - -#### daml.participant.api.services.index.write.allocate_party - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.prune - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.submit_reassignment - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.submit_reassignment_running - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.submit_transaction - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.submit_transaction_running - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.update_vetted_packages - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.index.write.upload_packages - -> - **Summary**: The time to execute a write service operation. -> - **Description**: The write service is an internal interface for changing the state through the synchronization services. The methods in this interface are all methods that are supported uniformly across all ledger implementations. This metric exposes the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.latest_pruned_offsets - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.list_known_parties - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.list_lf_packages - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.lookup_active_contract - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.lookup_configuration - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.lookup_contract_key - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.lookup_contract_state - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.lookup_maximum_ledger_time - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.party_entries - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.prune - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.pruning.prune.completed - -> - **Summary**: Total number of completed pruning processes. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.services.pruning.prune.started - -> - **Summary**: Total number of started pruning processes. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.api.services.read.compute_highest_ranked_synchronizer_from_admissible - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.compute_party_vetting_map - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.get_connected_synchronizers - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.get_lf_archive - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.incomplete_reassignment_offsets - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.list_lf_packages - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.list_vetted_packages - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.select_routing_synchronizer - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.state_updates - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.read.validate_dar - -> - **Summary**: The time to execute a read service operation. -> - **Description**: The read service is an internal interface for reading the events from the synchronization interfaces. The metrics expose the time needed to execute each operation. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.transaction_trees - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.api.services.transactions - -> - **Summary**: The time to execute an index service operation. -> - **Description**: The index service is an internal component responsible for access to the index db data. Its operations are invoked whenever a client request received over the ledger api requires access to the index db. This metric captures time statistics of such operations. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.console.tx-node-count - -> - **Summary**: Number of nodes per transaction histogram, measured using canton console ledger_api.updates.start_measure -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.console.tx-nodes-emitted - -> - **Summary**: Total number of nodes emitted, measured using canton console ledger_api.updates.start_measure -> - **Description**: -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.console.tx-size - -> - **Summary**: Transaction size histogram, measured using canton console ledger_api.updates.start_measure -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.declarative_api.errors - -> - **Summary**: Errors for the last update -> - **Description**: The node will attempt to apply the changes configured in the declarative config file. A positive number means that some items failed to be synchronised. A negative number means that the overall synchronisation procedure failed with an error. : 0 = everything good, -1 = config file unreadable, -2 = context could not be created, -3 = failure while applying items, -9 = exception caught. -> - **Type**: gauge -> - **Qualification**: Errors - -#### daml.participant.declarative_api.items - -> - **Summary**: Number of items managed through the declarative API -> - **Description**: This metric indicates the number of items managed through the declarative API -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.participant.inflight_validation_requests\* - -> - **Summary**: Number of requests being validated. -> -> - **Description**: Number of requests that are currently being validated. This also covers requests submitted by other participants. -> -> - **Type**: gauge -> -> - **Qualification**: Saturation -> -> - **Labels**: -> - **participant**: The id of the participant for which the value applies. - -#### daml.participant.sync.commitments.catchup-mode-enabled - -> - **Summary**: Measures how many times the commitment processor catch-up mode has been triggered. -> - **Description**: Participant nodes compute bilateral commitments at regular intervals. This metric exposes how often the catch-up mode has been activated. The catch-up mode is triggered according to catch-up config and happens if the participant lags behind on computation. A healthy value is 0. An increasing value indicates intermittent periods when a participant alternates between healthy and struggling to keep up with commitment computation. However, we do not see a constantly increasing value for a participant that is consistently behind commitment computation because, once catch-up mode is activated, the participant remains in catch-up mode until it has completely caught up, and only triggers the metric once. In order to troubleshoot non-zero values, the operator should cross-correlate this value with the `daml.participant.sync.commitments.compute` metric. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.participant.sync.commitments.compute - -> - **Summary**: Measures the time that the participant node spends computing commitments. -> - **Description**: Participant nodes compute bilateral commitments at regular intervals, i.e., reconciliation intervals. This metric exposes the time spent on each computation in milliseconds. There are two cases that the operator should pay attention to. First, fluctuations in this value are expected if the number of counter-participants or common stakeholder groups changes. However, changes with no apparent reason could indicate a bug and the operator should monitor closely. Second, it is a cause of concern if the value starts approaching or is greater than the reconciliation interval: The participant will perpetually lag behind, because it needs to compute commitments more frequently than it can manage. The operator should consider asking the synchronizer operator to increase the reconciliation interval if the increase in commitment computation is expected, or otherwise investigate the cause. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.sync.commitments.last-incoming-processed - -> - **Summary**: Timestamp of the latest processed incoming ACS commitment period end in microseconds since unix epoch -> - **Description**: Timestamp of the latest incoming ACS commitment period end that was fully processed by the participant. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.participant.sync.commitments.last-incoming-received - -> - **Summary**: Timestamp of the latest received incoming ACS commitment period end in microseconds since unix epoch -> - **Description**: Timestamp of the latest incoming ACS commitment period end that has been received and enqueued, but not yet processed by the participant. To measure the latency of particular counter participants, use one of the counter-participant-latency metrics. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.participant.sync.commitments.last-locally-checkpointed - -> - **Summary**: Record time of the latest checkpointed ACS commitment in microseconds since unix epoch -> - **Description**: Timestamp of the latest checkpointed ACS commitment in microseconds. Crash recovery will start reingesting from this timestamp on or from the latest locally completed ACS commitment interval on, whichever is later. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.participant.sync.commitments.last-locally-completed - -> - **Summary**: Timestamp of the latest locally completed ACS commitment interval in microseconds since unix epoch -> - **Description**: Timestamp of the latest locally completed ACS commitment interval. Crash recovery will start reingesting from this timestamp on or from the latest checkpointed ACS commitment interval on, whichever is later. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.participant.sync.commitments.sequencing-time - -> - **Summary**: Measures the time between the end of a commitment period, and the time when the sequencer observes the corresponding commitment. -> - **Description**: Participant nodes compute bilateral commitments at regular intervals. After a participant computes a commitment, it sends it for sequencing. The time between the end of a commitment interval and sequencing is measured in milliseconds. Because commitment computation is comprised within the measured time, the value is always greater than the `daml.participant.sync.commitments.compute` metric. The operator should pay attention to fluctuations of this value. An increase can be expected, e.g., because the computation time increases. However, a value increase can be a cause of concern, because it can indicate that the participant is lagging behind in processing messages and computing commitments, which is accompanied by `ACS_COMMITMENT_DEGRADATION` warnings in the participant logs. An increase can also indicate that the sequencer is slow in sequencing the commitment messages. The operator should cross-correlate with sequencing metrics such as `daml.sequencer-client.submissions.sequencing` and `daml.sequencer-client.handler.delay.` In this case, the operator should consider changing the preferred sequencer configuration. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.participant.sync.commitments.synchronizer.largest-counter-participant-latency - -> - **Summary**: The highest latency in micros for commitments outstanding from counter-participants for more than a threshold-number of reconciliation intervals. -> - **Description**: Participant nodes compute bilateral commitments at regular intervals and send them. This metric is the default indicator of a counter-participant being slow.The metric exposes the highest latency of a counter-participant, measured by subtracting the highest known counter-participant latency from the most recent period processed by the participant. A counter-participant has to send a commitment at least once in order to appear here. The operator of a participant can configure a default threshold per synchronizer that the participant connects to. The smaller the threshold, the more sensitive the metric is to even small delays in receiving commitments from counter-participants. For example, for a threshold of 5 intervals and a reconciliation interval of 1 minute, the metric measures the latency of counter-participants that have sent no commitments for periods covering the last 5 minutes observed by the participant. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.participant.sync.commitments.synchronizer.largest-distinguished-counter-participant-latency - -> - **Summary**: The highest latency in micros for commitments outstanding from distinguished counter-participants for more than a threshold-number of reconciliation intervals. -> - **Description**: Participant nodes compute bilateral commitments at regular intervals and send them. This metric indicates that a distinguished counter-participant is slow, i.e., the participant cannot confirm that its state is the same with that of a counter-participant with whom the operator has an important business relation.The metric exposes the highest latency of a counter-participant, measured by subtracting the highest known counter-participant latency from the most recent period processed by the participant. A counter-participant has to send a commitment at least once in order to appear here. The operator of a participant can configure a default threshold per synchronizer that the participant connects to. The smaller the threshold, the more sensitive the metric is to even small delays in receiving commitments from counter-participants. For example, for a threshold of 5 intervals and a reconciliation interval of 1 minute, the metric measures the latency of counter-participants that have sent no commitments for periods covering the last 5 minutes observed by the participant. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.participant.sync.conflict-detection.sequencer-counter-queue - -> - **Summary**: Size of conflict detection sequencer counter queue -> - **Description**: The task scheduler will work off tasks according to the timestamp order, scheduling the tasks whenever a new timestamp has been observed. This metric exposes the number of un-processed sequencer messages that will trigger a timestamp advancement. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.participant.sync.in-flight-submission-synchronizer-tracker.unsequenced-in-flight-submissions - -> - **Summary**: Number of unsequenced submissions in-flight. -> - **Description**: Number of unsequenced submissions in-flight. Unsequenced in-flight submissions are tracked in-memory, so high amount here will boil down to memory pressure. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.participant.sync.inflight-validations - -> - **Summary**: Number of requests being validated on the synchronizer. -> - **Description**: Number of requests that are currently being validated on the synchronizer. This also covers requests submitted by other participants. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.participant.sync.protocol-messages.confirmation-request-creation - -> - **Summary**: Time to create a transaction confirmation request -> - **Description**: The time that the transaction protocol processor needs to create a transaction confirmation request. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.participant.sync.protocol-messages.confirmation-request-size - -> - **Summary**: Confirmation request size -> - **Description**: Records the histogram of the sizes of (transaction) confirmation requests. -> - **Type**: histogram -> - **Qualification**: Debug - -#### daml.participant.sync.protocol-messages.transaction-message-receipt - -> - **Summary**: Time to parse and decrypt a transaction message -> - **Description**: The time that the transaction protocol processor needs to parse and decrypt an incoming confirmation request. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.participant.sync.request-tracker.sequencer-counter-queue - -> - **Summary**: Size of record order publisher sequencer counter queue -> - **Description**: Same as for conflict-detection, but measuring the sequencer counter queues for the publishing to the ledger api server according to record time. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.pruning - -> - **Summary**: Duration of prune operations. -> - **Description**: This timer exposes the duration of pruning requests from the Canton portion of the ledger. -> - **Type**: timer -> - **Qualification**: Saturation - -#### daml.pruning.max-event-age - -> - **Summary**: Age of oldest unpruned event. -> - **Description**: This gauge exposes the age of the oldest, unpruned event in hours as a way to quantify the pruning backlog. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.handler.actual-in-flight-event-batches - -> - **Summary**: Nodes process the events from the synchronizer's sequencer in batches. This metric tracks how many such batches are processed in parallel. -> - **Description**: Incoming messages are processed by a sequencer client, which combines them into batches of size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid overwhelming it with too many events at once. Indicators that the configured upper bound may be too low: This metric constantly is close to the configured maximum, which is exposed via 'max-in-flight-event-batches', while the system's resources are under-utilized. Indicators that the configured upper bound may be too high: Out-of-memory errors crashing the JVM or frequent garbage collection cycles that slow down processing. The metric tracks how many of these batches have been sent to the application handler but have not yet been fully processed. This metric can help identify potential bottlenecks or issues with the application's processing of events and provide insights into the overall workload of the system. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer-client.handler.application-handle - -> - **Summary**: Timer monitoring time and rate of sequentially handling the event application logic -> - **Description**: All events are received sequentially. This handler records the rate and time it takes the application (participant or mediator) to handle the events. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.delay - -> - **Summary**: The delay on the event processing in milliseconds -> - **Description**: Every message received from the sequencer carries a timestamp that was assigned by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. The component receiving the message on the participant or mediator is the sequencer client, while on the block sequencer itself, it's the block update generator. Upon having received the same message from enough sequencers (as configured by the trust threshold), the sequencer client compares the time difference between the sequencing time and the computers local clock and exposes this difference as the given metric. The difference will include the clock-skew and the processing latency between assigning the timestamp on the sequencer and receiving the message by the recipient from enough sequencers. If the difference is large compared to the usual latencies, clock skew can be ruled out, and enough sequencers are not slow, then it means that the node is still trying to catch up with events that the sequencers sequenced a while ago. This can happen after having been offline for a while or if the node is too slow to keep up with the messaging load. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.last-sequencing-time-micros - -> - **Summary**: The sequencing time of the last processed event in microseconds since unix epoch -> - **Description**: Every message received from the sequencer carries a timestamp that was assigned by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. The component receiving the message on the participant or mediator is the sequencer client, while on the block sequencer itself, it's the block update generator. Upon having received the same message from enough sequencers (as configured by the trust threshold), this metric is updated with the sequencing time of that message. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.max-in-flight-event-batches - -> - **Summary**: Nodes process the events from the synchronizer's sequencer in batches. This metric tracks the upper bound of such batches being processed in parallel. -> - **Description**: Incoming messages are processed by a sequencer client, which combines them into batches of size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid overwhelming it with too many events at once. Configured by 'maximum-in-flight-event-batches' parameter in the sequencer-client config The metric shows the configured upper limit on how many batches the application handler may process concurrently. The metric 'actual-in-flight-event-batches' tracks the actual number of currently processed batches. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.sequencer-events - -> - **Summary**: Number of received events from the sequencer -> - **Description**: A participant reads events from the sequencer. This metric captures the count and rate of events. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer-client.sequencer-connection-pool.active-subscriptions - -> - **Summary**: Number of active subscriptions in the subscription pool -> - **Description**: This metric indicates the current number of subscriptions that are active. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.grpc-requests - -> - **Summary**: Number of gRPC requests sent on this connection -> - **Description**: This metric indicates the number of gRPC requests that have been sent on this connection. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.subscription-threshold - -> - **Summary**: Sum of trust threshold and liveness margin configured in the subscription pool -> - **Description**: The liveness margin determines how many subscriptions on different sequencers are continuously maintained, beyond the minimum number defined by the trust threshold. In other words, the subscription pool will strive to maintain at all times (trust threshold + liveness margin)-many subscriptions active. This provides tolerance to subscriptions falling, enabling the node to continue operating while some sequencers are down. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.tracked-connections - -> - **Summary**: Number of connections tracked by the connection pool -> - **Description**: The configuration of the connection pool defines the parameters of the sequencer connections. This metrics shows the current number of those connections. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.trust-threshold - -> - **Summary**: Trust threshold configured in the connection pool -> - **Description**: The trust threshold determines how many connections to sequencers must be available and consistent (same synchronizer ID, same protocol version, same static parameters) for the connection pool to initialize. Furthermore, it also determines the number of sequencer subscriptions that must deliver identical copies of an event for that event to be accepted and processed by the node. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.validated-connections - -> - **Summary**: Number of connections validated by the connection pool -> - **Description**: This metric indicates the current number of connections that are up and validated. These connections are available for components of the node that need to communicate with the synchronizer. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.submissions.dropped - -> - **Summary**: Count of send requests that did not cause an event to be sequenced -> - **Description**: Counter of send requests we did not witness a corresponding event to be sequenced by the supplied max-sequencing-time. There could be many reasons for this happening: the request may have been lost before reaching the sequencer, the sequencer may be at capacity and the the max-sequencing-time was exceeded by the time the request was processed, or the supplied max-sequencing-time may just be too small for the sequencer to be able to sequence the request. -> - **Type**: counter -> - **Qualification**: Errors - -#### daml.sequencer-client.submissions.in-flight - -> - **Summary**: Number of sequencer send requests we have that are waiting for an outcome or timeout -> - **Description**: Incremented on every successful send to the sequencer. Decremented when the event or an error is sequenced, or when the max-sequencing-time has elapsed. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer-client.submissions.overloaded - -> - **Summary**: Count of send requests which receive an overloaded response -> - **Description**: Counter that is incremented if a send request receives an overloaded response from the sequencer. -> - **Type**: counter -> - **Qualification**: Errors - -#### daml.sequencer-client.submissions.sends - -> - **Summary**: Rate and timings of send requests to the sequencer -> - **Description**: Provides a rate and time of how long it takes for send requests to be accepted by the sequencer. Note that this is just for the request to be made and not for the requested event to actually be sequenced. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer-client.submissions.sequencing - -> - **Summary**: Rate and timings of sequencing requests -> - **Description**: This timer is started when a submission is made to the sequencer and then completed when a corresponding event is witnessed from the sequencer, so will encompass the entire duration for the sequencer to sequence the request. If the request does not result in an event no timing will be recorded. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer-client.traffic-control.event-delivered - -> - **Summary**: Number of events that were sequenced and delivered. -> - **Description**: Counter for event-delivered-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-delivered-cost - -> - **Summary**: Cost of events that were sequenced and delivered. -> - **Description**: Cost of events for which the sender received confirmation that they were delivered. There is an exception for aggregated submissions: the cost of aggregate events will be recorded as soon as the event is ordered and the sequencer waits to receive threshold-many events. The final event may or may not be delivered successfully depending on the result of the aggregation. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-rejected - -> - **Summary**: Number of events that were sequenced but not delivered. -> - **Description**: Counter for event-rejected-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-rejected-cost - -> - **Summary**: Cost of events that were sequenced but no delivered successfully. -> - **Description**: Cost of events for which the sender received confirmation that the events will not be delivered. The reason for non-delivery is labeled on the metric, if available. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.submitted-event-cost - -> - **Summary**: Cost of event submitted from the sequencer client. -> - **Description**: When the sequencer client sends an event to the sequencer to be sequenced, it will record on this metric the cost of the event. Note that the event may or may not end up being sequenced. So this metric may not exactly match the actual consumed traffic. -> - **Type**: meter -> - **Qualification**: Traffic - -### Sequencer Metrics - -#### daml.cache.evicted_weight - -> - **Summary**: The sum of weights of cache entries evicted. -> - **Description**: The total weight of the entries evicted from the cache. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.cache.evictions - -> - **Summary**: The number of the evicted cache entries. -> - **Description**: When an entry is evicted from the cache, the counter is incremented. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.cache.hits - -> - **Summary**: The number of cache hits. -> - **Description**: When a cache lookup encounters an existing cache entry, the counter is incremented. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.cache.misses - -> - **Summary**: The number of cache misses. -> - **Description**: When a cache lookup first encounters a missing cache entry, the counter is incremented. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.db-storage.general.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.db-storage.general.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.db-storage.write.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.db-storage.write.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.grpc.server - -> - **Summary**: Distribution of the durations of serving gRPC requests. -> - **Description**: -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.grpc.server.handled - -> - **Summary**: Total number of handled gRPC requests. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.received - -> - **Summary**: Total number of gRPC messages received (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.received.bytes - -> - **Summary**: Distribution of payload sizes in gRPC messages received (both unary and streaming). -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.sent - -> - **Summary**: Total number of gRPC messages sent (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.sent.bytes - -> - **Summary**: Distribution of payload sizes in gRPC messages sent (both unary and streaming). -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.grpc.server.requests.rejections\* - -> - **Summary**: Number of rejected requests due to active request limits. -> -> - **Description**: Counts the number of requests rejected because the active request limit was reached. -> -> - **Type**: counter -> -> - **Qualification**: Saturation -> -> - **Labels**: -> - **method**: The method / service name limited. -> - **service**: The API the method belongs to -> - **api**: The API the method belongs to - -#### daml.grpc.server.started - -> - **Summary**: Total number of started gRPC requests (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.handler.actual-in-flight-event-batches - -> - **Summary**: Nodes process the events from the synchronizer's sequencer in batches. This metric tracks how many such batches are processed in parallel. -> - **Description**: Incoming messages are processed by a sequencer client, which combines them into batches of size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid overwhelming it with too many events at once. Indicators that the configured upper bound may be too low: This metric constantly is close to the configured maximum, which is exposed via 'max-in-flight-event-batches', while the system's resources are under-utilized. Indicators that the configured upper bound may be too high: Out-of-memory errors crashing the JVM or frequent garbage collection cycles that slow down processing. The metric tracks how many of these batches have been sent to the application handler but have not yet been fully processed. This metric can help identify potential bottlenecks or issues with the application's processing of events and provide insights into the overall workload of the system. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer-client.handler.application-handle - -> - **Summary**: Timer monitoring time and rate of sequentially handling the event application logic -> - **Description**: All events are received sequentially. This handler records the rate and time it takes the application (participant or mediator) to handle the events. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.delay - -> - **Summary**: The delay on the event processing in milliseconds -> - **Description**: Every message received from the sequencer carries a timestamp that was assigned by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. The component receiving the message on the participant or mediator is the sequencer client, while on the block sequencer itself, it's the block update generator. Upon having received the same message from enough sequencers (as configured by the trust threshold), the sequencer client compares the time difference between the sequencing time and the computers local clock and exposes this difference as the given metric. The difference will include the clock-skew and the processing latency between assigning the timestamp on the sequencer and receiving the message by the recipient from enough sequencers. If the difference is large compared to the usual latencies, clock skew can be ruled out, and enough sequencers are not slow, then it means that the node is still trying to catch up with events that the sequencers sequenced a while ago. This can happen after having been offline for a while or if the node is too slow to keep up with the messaging load. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.last-sequencing-time-micros - -> - **Summary**: The sequencing time of the last processed event in microseconds since unix epoch -> - **Description**: Every message received from the sequencer carries a timestamp that was assigned by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. The component receiving the message on the participant or mediator is the sequencer client, while on the block sequencer itself, it's the block update generator. Upon having received the same message from enough sequencers (as configured by the trust threshold), this metric is updated with the sequencing time of that message. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.max-in-flight-event-batches - -> - **Summary**: Nodes process the events from the synchronizer's sequencer in batches. This metric tracks the upper bound of such batches being processed in parallel. -> - **Description**: Incoming messages are processed by a sequencer client, which combines them into batches of size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid overwhelming it with too many events at once. Configured by 'maximum-in-flight-event-batches' parameter in the sequencer-client config The metric shows the configured upper limit on how many batches the application handler may process concurrently. The metric 'actual-in-flight-event-batches' tracks the actual number of currently processed batches. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.sequencer-events - -> - **Summary**: Number of received events from the sequencer -> - **Description**: A participant reads events from the sequencer. This metric captures the count and rate of events. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer-client.sequencer-connection-pool.active-subscriptions - -> - **Summary**: Number of active subscriptions in the subscription pool -> - **Description**: This metric indicates the current number of subscriptions that are active. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.grpc-requests - -> - **Summary**: Number of gRPC requests sent on this connection -> - **Description**: This metric indicates the number of gRPC requests that have been sent on this connection. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.subscription-threshold - -> - **Summary**: Sum of trust threshold and liveness margin configured in the subscription pool -> - **Description**: The liveness margin determines how many subscriptions on different sequencers are continuously maintained, beyond the minimum number defined by the trust threshold. In other words, the subscription pool will strive to maintain at all times (trust threshold + liveness margin)-many subscriptions active. This provides tolerance to subscriptions falling, enabling the node to continue operating while some sequencers are down. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.tracked-connections - -> - **Summary**: Number of connections tracked by the connection pool -> - **Description**: The configuration of the connection pool defines the parameters of the sequencer connections. This metrics shows the current number of those connections. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.trust-threshold - -> - **Summary**: Trust threshold configured in the connection pool -> - **Description**: The trust threshold determines how many connections to sequencers must be available and consistent (same synchronizer ID, same protocol version, same static parameters) for the connection pool to initialize. Furthermore, it also determines the number of sequencer subscriptions that must deliver identical copies of an event for that event to be accepted and processed by the node. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.validated-connections - -> - **Summary**: Number of connections validated by the connection pool -> - **Description**: This metric indicates the current number of connections that are up and validated. These connections are available for components of the node that need to communicate with the synchronizer. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.submissions.dropped - -> - **Summary**: Count of send requests that did not cause an event to be sequenced -> - **Description**: Counter of send requests we did not witness a corresponding event to be sequenced by the supplied max-sequencing-time. There could be many reasons for this happening: the request may have been lost before reaching the sequencer, the sequencer may be at capacity and the the max-sequencing-time was exceeded by the time the request was processed, or the supplied max-sequencing-time may just be too small for the sequencer to be able to sequence the request. -> - **Type**: counter -> - **Qualification**: Errors - -#### daml.sequencer-client.submissions.in-flight - -> - **Summary**: Number of sequencer send requests we have that are waiting for an outcome or timeout -> - **Description**: Incremented on every successful send to the sequencer. Decremented when the event or an error is sequenced, or when the max-sequencing-time has elapsed. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer-client.submissions.overloaded - -> - **Summary**: Count of send requests which receive an overloaded response -> - **Description**: Counter that is incremented if a send request receives an overloaded response from the sequencer. -> - **Type**: counter -> - **Qualification**: Errors - -#### daml.sequencer-client.submissions.sends - -> - **Summary**: Rate and timings of send requests to the sequencer -> - **Description**: Provides a rate and time of how long it takes for send requests to be accepted by the sequencer. Note that this is just for the request to be made and not for the requested event to actually be sequenced. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer-client.submissions.sequencing - -> - **Summary**: Rate and timings of sequencing requests -> - **Description**: This timer is started when a submission is made to the sequencer and then completed when a corresponding event is witnessed from the sequencer, so will encompass the entire duration for the sequencer to sequence the request. If the request does not result in an event no timing will be recorded. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer-client.traffic-control.event-delivered - -> - **Summary**: Number of events that were sequenced and delivered. -> - **Description**: Counter for event-delivered-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-delivered-cost - -> - **Summary**: Cost of events that were sequenced and delivered. -> - **Description**: Cost of events for which the sender received confirmation that they were delivered. There is an exception for aggregated submissions: the cost of aggregate events will be recorded as soon as the event is ordered and the sequencer waits to receive threshold-many events. The final event may or may not be delivered successfully depending on the result of the aggregation. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-rejected - -> - **Summary**: Number of events that were sequenced but not delivered. -> - **Description**: Counter for event-rejected-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-rejected-cost - -> - **Summary**: Cost of events that were sequenced but no delivered successfully. -> - **Description**: Cost of events for which the sender received confirmation that the events will not be delivered. The reason for non-delivery is labeled on the metric, if available. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.submitted-event-cost - -> - **Summary**: Cost of event submitted from the sequencer client. -> - **Description**: When the sequencer client sends an event to the sequencer to be sequenced, it will record on this metric the cost of the event. Note that the event may or may not end up being sequenced. So this metric may not exactly match the actual consumed traffic. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.commit-latency - -> - **Summary**: Consensus commit latency -> - **Description**: Records the rate and latency it takes to commit a block at the consensus level. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.consensus.discarded-messages - -> - **Summary**: Discarded messages -> - **Description**: Discarded network messages received during an epoch, either due to being repeated (too many retransmissions), invalid or from a stale view -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.discarded-rate-limited-retransmission-requests - -> - **Summary**: Discarded rate limited retransmission requests -> - **Description**: Discarded retransmission requests messages due to rate limiting -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.discarded-wrong-epoch-retransmission-responses - -> - **Summary**: Discarded retransmission response messages -> - **Description**: Discarded retransmission response messages for epoch different than current one -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.epoch - -> - **Summary**: Epoch number -> - **Description**: Current epoch number for the node. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.epoch-length - -> - **Summary**: Epoch length -> - **Description**: Length of the current epoch in number of blocks. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.epoch-view-changes - -> - **Summary**: Number of view changes occurred -> - **Description**: Number of view changes occurred. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.consensus.incoming-retransmission-requests - -> - **Summary**: Incoming retransmissions requests -> - **Description**: Retransmissions requests received during an epoch -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.outgoing-retransmission-requests - -> - **Summary**: Outgoing retransmissions requests -> - **Description**: Retransmissions requests sent during an epoch -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.postponed-view-messages-dropped - -> - **Summary**: Count of messages dropped by queue containing postponed view messages -> - **Description**: Count of messages dropped by queue containing postponed view messages. -> - **Type**: meter -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.consensus.postponed-view-messages-duplicates - -> - **Summary**: Count of messages dropped as duplicates by queue containing postponed view messages -> - **Description**: Count of messages dropped as duplicates by queue containing postponed view messages. -> - **Type**: meter -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.consensus.postponed-view-messages-queue-max-size - -> - **Summary**: Actual maximum size of the queue containing postponed view messages -> - **Description**: Actual maximum size of the queue containing postponed view messages. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.consensus.postponed-view-messages-queue-size - -> - **Summary**: Size of the queue containing postponed view messages -> - **Description**: Size of the queue containing postponed view messages. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.consensus.retransmitted-commit-certificates - -> - **Summary**: Retransmitted commit certificates -> - **Description**: Number of commit certificates retransmitted during an epoch -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.retransmitted-messages - -> - **Summary**: Retransmitted PBFT messages -> - **Description**: Number of PBFT messages retransmitted during an epoch -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.consensus.state-transfer.postponed-consensus-messages-dropped - -> - **Summary**: Count of messages dropped by queue containing consensus messages postponed during state transfer -> - **Description**: Count of messages dropped by queue containing consensus messages postponed during state transfer. -> - **Type**: meter -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.consensus.state-transfer.postponed-consensus-messages-queue-max-size - -> - **Summary**: Actual maximum size of the queue containing consensus messages postponed during state transfer -> - **Description**: Actual maximum size of the queue containing consensus messages postponed during state transfer. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.consensus.state-transfer.postponed-consensus-messages-queue-size - -> - **Summary**: Size of the queue containing consensus messages postponed during state transfer -> - **Description**: Size of the queue containing consensus messages postponed during state transfer. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.declarative_api.errors - -> - **Summary**: Errors for the last update -> - **Description**: The node will attempt to apply the changes configured in the declarative config file. A positive number means that some items failed to be synchronised. A negative number means that the overall synchronisation procedure failed with an error. : 0 = everything good, -1 = config file unreadable, -2 = context could not be created, -3 = failure while applying items, -9 = exception caught. -> - **Type**: gauge -> - **Qualification**: Errors - -#### daml.sequencer.bftordering.declarative_api.items - -> - **Summary**: Number of items managed through the declarative API -> - **Description**: This metric indicates the number of items managed through the declarative API -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.bftordering.global.ordered-batches - -> - **Summary**: Batches ordered -> - **Description**: Measures the total batches ordered. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.global.ordered-blocks - -> - **Summary**: Blocks ordered -> - **Description**: Measures the total blocks ordered. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.global.ordered-requests - -> - **Summary**: Requests ordered -> - **Description**: Measures the total requests ordered. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.global.requests-ordering-latency - -> - **Summary**: Requests ordering latency -> - **Description**: Records the rate and latency it takes to order requests. This metric is always meaningful when queried on and restricted to the receiving sequencer; in other cases, it is meaningful only when the receiving and reporting sequencers' clocks are kept synchronized. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.ingress.bytes-queued - -> - **Summary**: Bytes queued -> - **Description**: Measures the size of the mempool in bytes. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.ingress.received-bytes - -> - **Summary**: Bytes received -> - **Description**: Measures the total bytes received. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.ingress.received-requests - -> - **Summary**: Requests received -> - **Description**: Measures the total requests received. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.ingress.requests-queued - -> - **Summary**: Requests queued -> - **Description**: Measures the size of the mempool in requests. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.ingress.requests-size - -> - **Summary**: Requests size -> - **Description**: Records the size of requests to the BFT ordering service. -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.mempool.requested-batches - -> - **Summary**: Requested batches -> - **Description**: Number of batches requested from the mempool by the availability module. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.bftordering.output.block-delay - -> - **Summary**: Block delay -> - **Description**: Wall-clock time of the ordered block being provided to the sequencer minus BFT time of the block. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.output.block-size-batches - -> - **Summary**: Block size (batches) -> - **Description**: Records the size (in batches) of blocks ordered. -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.output.block-size-bytes - -> - **Summary**: Block size (bytes) -> - **Description**: Records the size (in bytes) of blocks ordered. -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.output.block-size-requests - -> - **Summary**: Block size (requests) -> - **Description**: Records the size (in requests) of blocks ordered. -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.p2p.connections.authenticated - -> - **Summary**: Authenticated peers -> - **Description**: Number of connected P2P endpoints that are also authenticated. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.p2p.connections.connected - -> - **Summary**: Connected peers -> - **Description**: Number of connected P2P endpoints. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.p2p.receive.processing-latency - -> - **Summary**: Message receive processing latency -> - **Description**: Records the rate and latency when processing incoming P2P network messages. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.p2p.receive.received-bytes - -> - **Summary**: Bytes received -> - **Description**: Total P2P bytes received. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.p2p.receive.received-messages - -> - **Summary**: Messages received -> - **Description**: Total P2P messages received. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.p2p.send.grpc-latency - -> - **Summary**: Latency of a gRPC message send -> - **Description**: Records the rate of gRPC message sends and their latency (up to receiving them on the other side). -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.p2p.send.network-write-latency - -> - **Summary**: Message network write latency -> - **Description**: Records the rate and latency when writing P2P messages to the network. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.p2p.send.sends-retried - -> - **Summary**: P2P sends retried -> - **Description**: Total P2P network sends retried after a delay due to missing connectivity. -> - **Type**: counter -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.p2p.send.sent-bytes - -> - **Summary**: Bytes sent -> - **Description**: Total P2P bytes sent. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.p2p.send.sent-messages - -> - **Summary**: Messages sent -> - **Description**: Total P2P messages sent. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.performance.ordering-stage-latency - -> - **Summary**: Ordering stage latency -> - **Description**: Records the rate and latency it takes for an ordering stage, which is recorded as a label. This metric is meaningful only when sequencers' clocks are kept synchronized. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.topology.max-tolerated-faults - -> - **Summary**: Maximum number of tolerated faults -> - **Description**: Maximum number of tolerated faults -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.topology.query-latency - -> - **Summary**: Topology query latency -> - **Description**: Records the rate and latency when querying the topology client. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer.bftordering.topology.strong-quorum - -> - **Summary**: Number of non-faulty nodes required for a strong quorum -> - **Description**: Number of non-faulty nodes required for a strong quorum, like for consensus -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.topology.validators - -> - **Summary**: Active validators -> - **Description**: Number of BFT sequencers actively involved in consensus. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.bftordering.topology.weak-quorum - -> - **Summary**: Number of non-faulty nodes required for a weak quorum -> - **Description**: Number of non-faulty nodes required for a weak quorum, like for batch dissemination -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.block.acknowledgments_micros\* - -> - **Summary**: Acknowledgments by members in Micros -> -> - **Description**: -> -> - **Type**: gauge -> -> - **Qualification**: Latency -> -> - **Labels**: -> - **member**: The sender of the acknowledgment - -#### daml.sequencer.block.delay - -> - **Summary**: The block processing delay in milliseconds, relative to wall clock -> - **Description**: Every block carries a timestamp that was assigned by the ordering service when it ordered the block. This metric shows the difference between the wall clock of the sequencer node and the timestamp of the last processed block. The difference will include the clock-skew and the processing latency of the ordering service. If the delay is large compared to the usual latencies, clock skew can be ruled out, and enough sequencers are not slow, then it means that the node is still trying to catch up reading blocks from the ordering service. This can happen after having been offline for a while or if the node is too slow to keep up with the block processing load. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.sequencer.block.event-bytes\* - -> - **Summary**: Event bytes processed by the sequencer, tagged by type. -> -> - **Description**: Similar to events, except measured by bytes -> -> - **Type**: meter -> -> - **Qualification**: Traffic -> -> - **Labels**: -> - **member**: The sender of the submission request -> - **type**: Type of request - -#### daml.sequencer.block.events\* - -> - **Summary**: Events processed by the sequencer, tagged by type. -> -> - **Description**: The sequencer forwards opaque, possibly encrypted payload. However, by looking at the recipient list, the type of message can still be inferred, and tagged appropriately, including the sender. -> -> - **Type**: meter -> -> - **Qualification**: Traffic -> -> - **Labels**: -> - **member**: The sender of the submission request -> - **type**: Type of request - -#### daml.sequencer.block.height - -> - **Summary**: Current block height processed -> - **Description**: The submission messages are processed in blocks, where each block has an increasing number. The metric shows the height of the last processed block by the given sequencer node. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.block.stream-buffer-size - -> - **Summary**: Size of the buffer of Pekko streams after the flow, tagged by stream flow name -> - **Description**: -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer.block.stream-element-count - -> - **Summary**: Number of elements passing through a Pekko streams flow, tagged by stream element -> - **Description**: -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer.db-storage.general.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer.db-storage.general.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.db-storage.general.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer.db-storage.general.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.db-storage.general.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer.db-storage.write.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer.db-storage.write.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer.db-storage.write.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer.db-storage.write.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.db-storage.write.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer.db.watermark_delay - -> - **Summary**: The event processing delay in milliseconds, relative to wall clock -> - **Description**: Sequencer writes events in parallel using a watermark. This metric shows the difference between the wall clock of the sequencer node and the current watermark of the last written events. The difference will include the clock-skew and the processing latency of the sequencer database write. For block sequencers if the delay is large compared to the usual latencies, clock skew can be ruled out, and enough sequencers are not slow, then it means that the node is still trying to catch up reading blocks from the ordering service. This can happen after having been offline for a while or if the node is too slow to keep up with the block processing load. For database sequencers it means that database system is not being able to keep up with the write load. -> - **Type**: gauge -> - **Qualification**: Latency - -#### daml.sequencer.declarative_api.errors - -> - **Summary**: Errors for the last update -> - **Description**: The node will attempt to apply the changes configured in the declarative config file. A positive number means that some items failed to be synchronised. A negative number means that the overall synchronisation procedure failed with an error. : 0 = everything good, -1 = config file unreadable, -2 = context could not be created, -3 = failure while applying items, -9 = exception caught. -> - **Type**: gauge -> - **Qualification**: Errors - -#### daml.sequencer.declarative_api.items - -> - **Summary**: Number of items managed through the declarative API -> - **Description**: This metric indicates the number of items managed through the declarative API -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.head_timestamp - -> - **Summary**: Timestamp of the head (oldest) event in the buffer -> - **Description**: The timestamp of the first event in the buffer, or 0 if the buffer is empty -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.last_timestamp - -> - **Summary**: Timestamp of the last (newest) event in the buffer -> - **Description**: The timestamp of the last event in the buffer, or 0 if the buffer is empty -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.max-event-age - -> - **Summary**: Age of oldest unpruned sequencer event. -> - **Description**: This gauge exposes the age of the oldest, unpruned sequencer event in hours as a way to quantify the pruning backlog. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer.public-api.processed - -> - **Summary**: Number of messages processed by the sequencer -> - **Description**: This metric measures the number of successfully validated messages processed by the sequencer since the start of this process. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.public-api.processed-bytes - -> - **Summary**: Number of message bytes processed by the sequencer -> - **Description**: This metric measures the total number of message bytes processed by the sequencer. If the message received by the sequencer contains duplicate or irrelevant fields, the contents of these fields do not contribute to this metric. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.public-api.subscriptions - -> - **Summary**: Number of active sequencer subscriptions -> - **Description**: This metric indicates the number of active subscriptions currently open and actively served subscriptions at the sequencer. -> - **Type**: gauge -> - **Qualification**: Traffic - -#### daml.sequencer.public-api.time-requests - -> - **Summary**: Number of time requests received by the sequencer -> - **Description**: When a Participant needs to know the synchronizer time it will make a request for a time proof to be sequenced. It would be normal to see a small number of these being sequenced, however if this number becomes a significant portion of the total requests to the sequencer it could indicate that the strategy for requesting times may need to be revised to deal with different clock skews and latencies between the sequencer and participants. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.sequencer.traffic-control.balance-cache-miss-for-timestamp - -> - **Summary**: Counts cache misses when trying to retrieve a balance for a given timestamp. -> - **Description**: The per member cache only keeps in memory a subset of all the non-pruned balance updates persisted in the database. If the cache contains *some* balances for a member but not the one requested, a DB call will be made to try to retrieve it. When that happens, this metric is incremented. If this occurs too frequently, consider increasing the config value of trafficPurchasedCacheSizePerMember. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer.traffic-control.balance-update - -> - **Summary**: Counts balance updates fully processed by the sequencer. -> - **Description**: Value of balance updates for all (aggregated). -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.event-delivered - -> - **Summary**: Number of events that were sequenced and delivered. -> - **Description**: Counter for event-delivered-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.event-delivered-cost - -> - **Summary**: Cost of events that were sequenced and delivered. -> - **Description**: Cost of events for which the sender received confirmation that they were delivered. There is an exception for aggregated submissions: the cost of aggregate events will be recorded as soon as the event is ordered and the sequencer waits to receive threshold-many events. The final event may or may not be delivered successfully depending on the result of the aggregation. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.event-rejected - -> - **Summary**: Number of events that were sequenced but not delivered. -> - **Description**: Counter for event-rejected-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.event-rejected-cost - -> - **Summary**: Cost of events that were sequenced but no delivered successfully. -> - **Description**: Cost of events for which the sender received confirmation that the events will not be delivered. The reason for non-delivery is labeled on the metric, if available. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.submitted-event-cost - -> - **Summary**: Cost of event submitted from the sequencer client. -> - **Description**: When the sequencer client sends an event to the sequencer to be sequenced, it will record on this metric the cost of the event. Note that the event may or may not end up being sequenced. So this metric may not exactly match the actual consumed traffic. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.wasted-sequencing - -> - **Summary**: Byte size of events that got sequenced but failed to pass validation steps after sequencing -> - **Description**: Record the raw byte size of events that are ordered but were not delivered because of traffic enforcement. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.wasted-sequencing-counter - -> - **Summary**: Number of events that failed traffic validation and were not delivered because of it. -> - **Description**: Counter for wasted-sequencing. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.wasted-traffic - -> - **Summary**: Cost of event that was deducted but not delivered. -> - **Description**: Events can have their cost deducted but still not be delivered due to other failed validation after ordering. This metrics records the traffic cost of such events. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer.traffic-control.wasted-traffic-counter - -> - **Summary**: Number of events that cost traffic but were not delivered. -> - **Description**: Counter for wasted-traffic. -> - **Type**: counter -> - **Qualification**: Traffic - -### Mediator Metrics - -#### daml.db-storage.general.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.db-storage.general.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.db-storage.general.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.db-storage.general.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.exectime - -> - **Summary**: Execution time metric for database tasks -> - **Description**: The time a task is running on the database is measured using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.load - -> - **Summary**: Load of database pool -> - **Description**: Database queries run as tasks on an async executor. This metric shows the current number of queries running in parallel divided by the number database connections for this database connection pool. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.db-storage.write.executor.queued - -> - **Summary**: Number of database access tasks waiting in queue -> - **Description**: Database access tasks get scheduled in this queue and get executed using one of the existing asynchronous sessions. A large queue indicates that the database connection is not able to deal with the large number of requests. Note that the queue has a maximum size. Tasks that do not fit into the queue will be retried, but won't show up in this metric. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.db-storage.write.executor.running - -> - **Summary**: Number of database access tasks currently running -> - **Description**: Database access tasks run on an async executor. This metric shows the current number of tasks running in parallel. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.db-storage.write.executor.waittime - -> - **Summary**: Scheduling time metric for database tasks -> - **Description**: Every database query is scheduled using an asynchronous executor with a queue. The time a task is waiting in this queue is monitored using this metric. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.grpc.server - -> - **Summary**: Distribution of the durations of serving gRPC requests. -> - **Description**: -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.grpc.server.handled - -> - **Summary**: Total number of handled gRPC requests. -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.received - -> - **Summary**: Total number of gRPC messages received (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.received.bytes - -> - **Summary**: Distribution of payload sizes in gRPC messages received (both unary and streaming). -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.sent - -> - **Summary**: Total number of gRPC messages sent (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.grpc.server.messages.sent.bytes - -> - **Summary**: Distribution of payload sizes in gRPC messages sent (both unary and streaming). -> - **Description**: -> - **Type**: histogram -> - **Qualification**: Traffic - -#### daml.grpc.server.requests.rejections\* - -> - **Summary**: Number of rejected requests due to active request limits. -> -> - **Description**: Counts the number of requests rejected because the active request limit was reached. -> -> - **Type**: counter -> -> - **Qualification**: Saturation -> -> - **Labels**: -> - **method**: The method / service name limited. -> - **service**: The API the method belongs to -> - **api**: The API the method belongs to - -#### daml.grpc.server.started - -> - **Summary**: Total number of started gRPC requests (on either type of connection). -> - **Description**: -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.mediator.approved-requests - -> - **Summary**: Total number of approved confirmation requests -> - **Description**: This metric provides the total number of approved confirmation requests since the system has been started. A confirmation request is approved if all the required confirmations are received by the mediator within the decision time. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.mediator.declarative_api.errors - -> - **Summary**: Errors for the last update -> - **Description**: The node will attempt to apply the changes configured in the declarative config file. A positive number means that some items failed to be synchronised. A negative number means that the overall synchronisation procedure failed with an error. : 0 = everything good, -1 = config file unreadable, -2 = context could not be created, -3 = failure while applying items, -9 = exception caught. -> - **Type**: gauge -> - **Qualification**: Errors - -#### daml.mediator.declarative_api.items - -> - **Summary**: Number of items managed through the declarative API -> - **Description**: This metric indicates the number of items managed through the declarative API -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.mediator.max-event-age - -> - **Summary**: Age of oldest unpruned confirmation response. -> - **Description**: This gauge exposes the age of the oldest, unpruned confirmation response in hours as a way to quantify the pruning backlog. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.mediator.outstanding-requests - -> - **Summary**: Number of currently outstanding requests -> - **Description**: This metric provides the number of currently open requests registered with the mediator. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.mediator.requests - -> - **Summary**: Total number of processed confirmation requests (approved and rejected) -> - **Description**: This metric provides the number of processed confirmation requests since the system has been started. -> - **Type**: meter -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.actual-in-flight-event-batches - -> - **Summary**: Nodes process the events from the synchronizer's sequencer in batches. This metric tracks how many such batches are processed in parallel. -> - **Description**: Incoming messages are processed by a sequencer client, which combines them into batches of size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid overwhelming it with too many events at once. Indicators that the configured upper bound may be too low: This metric constantly is close to the configured maximum, which is exposed via 'max-in-flight-event-batches', while the system's resources are under-utilized. Indicators that the configured upper bound may be too high: Out-of-memory errors crashing the JVM or frequent garbage collection cycles that slow down processing. The metric tracks how many of these batches have been sent to the application handler but have not yet been fully processed. This metric can help identify potential bottlenecks or issues with the application's processing of events and provide insights into the overall workload of the system. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer-client.handler.application-handle - -> - **Summary**: Timer monitoring time and rate of sequentially handling the event application logic -> - **Description**: All events are received sequentially. This handler records the rate and time it takes the application (participant or mediator) to handle the events. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.delay - -> - **Summary**: The delay on the event processing in milliseconds -> - **Description**: Every message received from the sequencer carries a timestamp that was assigned by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. The component receiving the message on the participant or mediator is the sequencer client, while on the block sequencer itself, it's the block update generator. Upon having received the same message from enough sequencers (as configured by the trust threshold), the sequencer client compares the time difference between the sequencing time and the computers local clock and exposes this difference as the given metric. The difference will include the clock-skew and the processing latency between assigning the timestamp on the sequencer and receiving the message by the recipient from enough sequencers. If the difference is large compared to the usual latencies, clock skew can be ruled out, and enough sequencers are not slow, then it means that the node is still trying to catch up with events that the sequencers sequenced a while ago. This can happen after having been offline for a while or if the node is too slow to keep up with the messaging load. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.last-sequencing-time-micros - -> - **Summary**: The sequencing time of the last processed event in microseconds since unix epoch -> - **Description**: Every message received from the sequencer carries a timestamp that was assigned by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. The component receiving the message on the participant or mediator is the sequencer client, while on the block sequencer itself, it's the block update generator. Upon having received the same message from enough sequencers (as configured by the trust threshold), this metric is updated with the sequencing time of that message. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.max-in-flight-event-batches - -> - **Summary**: Nodes process the events from the synchronizer's sequencer in batches. This metric tracks the upper bound of such batches being processed in parallel. -> - **Description**: Incoming messages are processed by a sequencer client, which combines them into batches of size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid overwhelming it with too many events at once. Configured by 'maximum-in-flight-event-batches' parameter in the sequencer-client config The metric shows the configured upper limit on how many batches the application handler may process concurrently. The metric 'actual-in-flight-event-batches' tracks the actual number of currently processed batches. -> - **Type**: gauge -> - **Qualification**: Debug - -#### daml.sequencer-client.handler.sequencer-events - -> - **Summary**: Number of received events from the sequencer -> - **Description**: A participant reads events from the sequencer. This metric captures the count and rate of events. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer-client.sequencer-connection-pool.active-subscriptions - -> - **Summary**: Number of active subscriptions in the subscription pool -> - **Description**: This metric indicates the current number of subscriptions that are active. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.grpc-requests - -> - **Summary**: Number of gRPC requests sent on this connection -> - **Description**: This metric indicates the number of gRPC requests that have been sent on this connection. -> - **Type**: counter -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.subscription-threshold - -> - **Summary**: Sum of trust threshold and liveness margin configured in the subscription pool -> - **Description**: The liveness margin determines how many subscriptions on different sequencers are continuously maintained, beyond the minimum number defined by the trust threshold. In other words, the subscription pool will strive to maintain at all times (trust threshold + liveness margin)-many subscriptions active. This provides tolerance to subscriptions falling, enabling the node to continue operating while some sequencers are down. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.tracked-connections - -> - **Summary**: Number of connections tracked by the connection pool -> - **Description**: The configuration of the connection pool defines the parameters of the sequencer connections. This metrics shows the current number of those connections. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.trust-threshold - -> - **Summary**: Trust threshold configured in the connection pool -> - **Description**: The trust threshold determines how many connections to sequencers must be available and consistent (same synchronizer ID, same protocol version, same static parameters) for the connection pool to initialize. Furthermore, it also determines the number of sequencer subscriptions that must deliver identical copies of an event for that event to be accepted and processed by the node. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.sequencer-connection-pool.validated-connections - -> - **Summary**: Number of connections validated by the connection pool -> - **Description**: This metric indicates the current number of connections that are up and validated. These connections are available for components of the node that need to communicate with the synchronizer. -> - **Type**: gauge -> - **Qualification**: Saturation - -#### daml.sequencer-client.submissions.dropped - -> - **Summary**: Count of send requests that did not cause an event to be sequenced -> - **Description**: Counter of send requests we did not witness a corresponding event to be sequenced by the supplied max-sequencing-time. There could be many reasons for this happening: the request may have been lost before reaching the sequencer, the sequencer may be at capacity and the the max-sequencing-time was exceeded by the time the request was processed, or the supplied max-sequencing-time may just be too small for the sequencer to be able to sequence the request. -> - **Type**: counter -> - **Qualification**: Errors - -#### daml.sequencer-client.submissions.in-flight - -> - **Summary**: Number of sequencer send requests we have that are waiting for an outcome or timeout -> - **Description**: Incremented on every successful send to the sequencer. Decremented when the event or an error is sequenced, or when the max-sequencing-time has elapsed. -> - **Type**: counter -> - **Qualification**: Debug - -#### daml.sequencer-client.submissions.overloaded - -> - **Summary**: Count of send requests which receive an overloaded response -> - **Description**: Counter that is incremented if a send request receives an overloaded response from the sequencer. -> - **Type**: counter -> - **Qualification**: Errors - -#### daml.sequencer-client.submissions.sends - -> - **Summary**: Rate and timings of send requests to the sequencer -> - **Description**: Provides a rate and time of how long it takes for send requests to be accepted by the sequencer. Note that this is just for the request to be made and not for the requested event to actually be sequenced. -> - **Type**: timer -> - **Qualification**: Debug - -#### daml.sequencer-client.submissions.sequencing - -> - **Summary**: Rate and timings of sequencing requests -> - **Description**: This timer is started when a submission is made to the sequencer and then completed when a corresponding event is witnessed from the sequencer, so will encompass the entire duration for the sequencer to sequence the request. If the request does not result in an event no timing will be recorded. -> - **Type**: timer -> - **Qualification**: Latency - -#### daml.sequencer-client.traffic-control.event-delivered - -> - **Summary**: Number of events that were sequenced and delivered. -> - **Description**: Counter for event-delivered-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-delivered-cost - -> - **Summary**: Cost of events that were sequenced and delivered. -> - **Description**: Cost of events for which the sender received confirmation that they were delivered. There is an exception for aggregated submissions: the cost of aggregate events will be recorded as soon as the event is ordered and the sequencer waits to receive threshold-many events. The final event may or may not be delivered successfully depending on the result of the aggregation. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-rejected - -> - **Summary**: Number of events that were sequenced but not delivered. -> - **Description**: Counter for event-rejected-cost. -> - **Type**: counter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.event-rejected-cost - -> - **Summary**: Cost of events that were sequenced but no delivered successfully. -> - **Description**: Cost of events for which the sender received confirmation that the events will not be delivered. The reason for non-delivery is labeled on the metric, if available. -> - **Type**: meter -> - **Qualification**: Traffic - -#### daml.sequencer-client.traffic-control.submitted-event-cost - -> - **Summary**: Cost of event submitted from the sequencer client. -> - **Description**: When the sequencer client sends an event to the sequencer to be sequenced, it will record on this metric the cost of the event. Note that the event may or may not end up being sequenced. So this metric may not exactly match the actual consumed traffic. -> - **Type**: meter -> - **Qualification**: Traffic - -### Health Metrics - -The following metrics are exposed for all components. - -#### daml_health_status - -- **Description**: The status of the component -- **Values**: - - **0**: Not healthy - - **1**: Healthy -- **Labels**: - - **component**: the name of the component being monitored -- **Type**: Gauge - -### gRPC Metrics - -The following metrics are exposed for all gRPC endpoints. These metrics have the following common labels attached: - -- **grpc_service_name**: - fully qualified name of the gRPC service (e.g. `com.daml.ledger.api.v1.ActiveContractsService`) - -- **grpc_method_name**: - name of the gRPC method (e.g. `GetActiveContracts`) - -- **grpc_client_type**: - type of client connection (`unary` or `streaming`) - -- **grpc_server_type**: - type of server connection (`unary` or `streaming`) - -- **service**: - Canton service's name (e.g. `participant`, `sequencer`, etc.) - -#### daml_grpc_server_duration_seconds - -- **Description**: Distribution of the durations of serving gRPC requests -- **Type**: Histogram - -#### daml_grpc_server_messages_sent_total - -- **Description**: Total number of gRPC messages sent (on either type of connection) -- **Type**: Counter - -#### daml_grpc_server_messages_received_total - -- **Description**: Total number of gRPC messages received (on either type of connection) -- **Type**: Counter - -#### daml_grpc_server_started_total - -- **Description**: Total number of started gRPC requests (on either type of connection) -- **Type**: Counter - -#### daml_grpc_server_handled_total - -- **Description**: Total number of handled gRPC requests -- **Labels**: - - **grpc_code**: returned gRPC status code for the call (`OK`, `CANCELLED`, `INVALID_ARGUMENT`, etc.) -- **Type**: Counter - -#### daml_grpc_server_messages_sent_bytes - -- **Description**: Distribution of payload sizes in gRPC messages sent (both unary and streaming) -- **Type**: Histogram - -#### daml_grpc_server_messages_received_bytes - -- **Description**: Distribution of payload sizes in gRPC messages received (both unary and streaming) -- **Type**: Histogram - -### HTTP Metrics - -The following metrics are exposed for all HTTP endpoints. These metrics have the following common labels attached: - -- **http_verb**: - HTTP verb used for a given call (e.g. `GET` or `PUT`) - -- **host**: - fully qualified hostname of the HTTP endpoint (e.g. `example.com`) - -- **path**: - path of the HTTP endpoint (e.g. `/v2/parties`) - -- **service**: - Daml service's name (`json_api` for the JSON Ledger API Service) - -#### daml_http_requests_duration_seconds - -- **Description**: Distribution of the durations of serving HTTP requests -- **Type**: Histogram - -#### daml_http_requests_total - -- **Description**: Total number of HTTP requests completed -- **Labels**: - - **http_status**: returned HTTP status code for the call -- **Type**: Counter - -#### daml_http_websocket_messages_received_total - -- **Description**: Total number of WebSocket messages received -- **Type**: Counter - -#### daml_http_websocket_messages_sent_total - -- **Description**: Total number of WebSocket messages sent -- **Type**: Counter - -#### daml_http_requests_payload_bytes - -- **Description**: Distribution of payload sizes in HTTP requests received -- **Type**: Histogram - -#### daml_http_responses_payload_bytes - -- **Description**: Distribution of payload sizes in HTTP responses sent -- **Type**: Histogram - -#### daml_http_websocket_messages_received_bytes - -- **Description**: Distribution of payload sizes in WebSocket messages received -- **Type**: Histogram - -#### daml_http_websocket_messages_sent_bytes - -- **Description**: Distribution of payload sizes in WebSocket messages sent -- **Type**: Histogram - -### Pruning Metrics - -The following metrics are exposed for all pruning processes. These metrics have the following labels: - -- **phase**: - The name of the pruning phase being monitored - -#### daml_services_pruning_prune_started_total - -- **Description**: Total number of started pruning processes -- **Type**: Counter - -#### daml_services_pruning_prune_completed_total - -- **Description**: Total number of completed pruning processes -- **Type**: Counter - -### JVM Metrics - -The following metrics are exposed for the JVM, if enabled. - -#### runtime_jvm_gc_time - -- **Description**: Time spent in a given JVM garbage collector in milliseconds -- **Labels**: - - **gc**: Garbage collector regions (eg: `G1 Old Generation`, `G1 New Generation`) -- **Type**: Counter - -#### runtime_jvm_gc_count - -- **Description**: The number of collections that have occurred for a given JVM garbage collector -- **Labels**: - - **gc**: Garbage collector regions (eg: `G1 Old Generation`, `G1 New Generation`) -- **Type**: Counter - -#### runtime_jvm_memory_area - -- **Description**: JVM memory area statistics -- **Labels**: - - **area**: Can be `heap` or `non_heap` - - **type**: Can be `committed`, `used` or `max` - -#### runtime_jvm_memory_pool - -- **Description**: JVM memory pool statistics -- **Labels**: - - **pool**: Defined pool name. - - **type**: Can be `committed`, `used` or `max` +The full Canton metrics inventory — including Participant Metrics, Sequencer Metrics, Mediator Metrics, and Splice service metrics — lives in [Canton Metrics](/global-synchronizer/reference/canton-metrics). ## Logging diff --git a/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx b/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx index 2cfce3774..c8ddf0bf8 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx @@ -41,9 +41,7 @@ Package vetting ensures that all validators involved in a transaction agree on t ### DAR Not Uploaded -``` -PACKAGE_NOT_FOUND: Could not find package -``` +``*PACKAGE_NOT_FOUND: Could not find package*`` Upload the required DAR to your validator: @@ -62,9 +60,7 @@ Or via Canton Console: Even if your validator has the package, the other party's validator must also have it uploaded and vetted. If you see: -``` -PACKAGE_SELECTION_FAILED: No package found for module -``` +``*PACKAGE_SELECTION_FAILED: No package found for module*`` Contact the counterparty's validator operator to upload and vet the same DAR. Both sides of a transaction must have the package available. diff --git a/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx b/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx index 49886e859..863ee08f5 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx @@ -161,7 +161,7 @@ Reviewers: Skip this section. Remove markers after final approval. ### Traffic balance below reserved amount -A log of the form shown below indicates that your validator app has not been able to `purchase any traffic `. The validator blocks transactions not required to purchase more traffic once the purchased traffic balance falls below a given number to avoid issues where the validator locks itself out by not having enough traffic to complete a traffic purchase. Check the logs for `TopupMemberTrafficTrigger` to find possible causes. +A log of the form shown below indicates that your validator app has not been able to *purchase any traffic*. The validator blocks transactions not required to purchase more traffic once the purchased traffic balance falls below a given number to avoid issues where the validator locks itself out by not having enough traffic to complete a traffic purchase. Check the logs for `TopupMemberTrafficTrigger` to find possible causes. If you only want to rely on free traffic and do not want to purchase any extra traffic, remove the validator top-up config. @@ -171,7 +171,7 @@ ABORTED: Traffic balance below reserved traffic amount (0 < 200000) ### Insufficient funds to buy configured traffic amount -A log of the form shown below indicates that your validator app attempted to `purchase traffic ` but does not have enough in the wallet of the validator operator party. This is common on TestNet and MainNet for new nodes as they start out with a balance of 0 and only slowly accrue CC through validator liveness rewards. So often this just requires waiting until enough CC has accrued. Alternatively, an existing node with a CC balance can transfer CC to you to increase your balance. +A log of the form shown below indicates that your validator app attempted to *purchase traffic* but does not have enough in the wallet of the validator operator party. This is common on TestNet and MainNet for new nodes as they start out with a balance of 0 and only slowly accrue CC through validator liveness rewards. So often this just requires waiting until enough CC has accrued. Alternatively, an existing node with a CC balance can transfer CC to you to increase your balance. If you only want to rely on free traffic and do not want to purchase any extra traffic, remove the validator top-up config. diff --git a/docs-main/overview/reference/external-party.mdx b/docs-main/overview/reference/external-party.mdx index 8b3df4a8b..2d0828111 100644 --- a/docs-main/overview/reference/external-party.mdx +++ b/docs-main/overview/reference/external-party.mdx @@ -108,12 +108,58 @@ The following sequence diagram describes the submission flow both for local and - Interactions in a blue background are exclusive to local parties - Interactions with no background are common to both local and external parties -
- +```mermaid sequenceDiagram -actor Alice participant PPN participant EPN note over PPN,EPN: For local parties,\
PPN == EPN == SPN\
(with submission permission) Alice-\>\>+PPN: Ledger API Command rect rgb(255, 153, 153) PPN-\>\>+Alice: - Transaction Data\
- Transaction Hash Note over Alice: Inspects transaction data Note over Alice: Validates transaction hash Note over Alice: Signs transaction hash Alice-\>\>+EPN: - Transaction Data\
- Transaction Signature end note over EPN: From there on the flow for\
local and external parties\
is the same participant Synchronizer participant CPN 1 participant CPN 2 participant OPN Note over CPN 1,CPN 2: Host Alice w/ Confirmation rights EPN-\>\>+Synchronizer: Confirmation Request Synchronizer-\>\>+EPN: Confirmation Request Synchronizer-\>\>+CPN 1: Confirmation Request Synchronizer-\>\>+CPN 2: Confirmation Request Synchronizer-\>\>+OPN: Confirmation Request Note over OPN: OPNs do not send confirmation responses\
but receive the confirmation request... Note over EPN: For local parties,\
the SPN sends a confirmation response as\
it has confirmation permission for the party. Note over EPN: For external parties,\
the EPN also sends a confirmation response\
but only as a veto mechanism to provide\
consistent completion events. EPN-\>\>+Synchronizer: Confirmation Response rect rgb(255, 153, 153) Note over CPN 1: Validate Alice's transaction signature end CPN 1-\>\>+Synchronizer: Confirmation Response rect rgb(255, 153, 153) Note over CPN 2: Validate Alice's transaction signature end CPN 2-\>\>+Synchronizer: Confirmation Response Synchronizer-\>\>+CPN 1: Transaction Verdict Synchronizer-\>\>+CPN 2: Transaction Verdict Synchronizer-\>\>+OPN: Transaction Verdict Note over OPN: ...and the verdict Synchronizer-\>\>+EPN: Transaction Verdict Note over Alice: Listens to completion events\
to learn about the\
outcome of the transaction. EPN-\>\>+Alice: Completion event Note over Alice: Queries CPNs for transaction stream,\
active contract states. CPN 1-\>\>+Alice: Transaction stream, ACS CPN 2-\>\>+Alice: Transaction stream, ACS Note over EPN: For local parties, SPN can also provide\
ledger data. rect rgb(153, 204, 255) EPN-\>\>+Alice: Transaction stream, ACS end - -
+ actor Alice + participant PPN + participant EPN + note over PPN,EPN: For local parties,
PPN == EPN == SPN
(with submission permission) + Alice->>+PPN: Ledger API Command + rect rgb(255, 153, 153) + PPN->>+Alice: - Transaction Data
- Transaction Hash + Note over Alice: Inspects transaction data + Note over Alice: Validates transaction hash + Note over Alice: Signs transaction hash + Alice->>+EPN: - Transaction Data
- Transaction Signature + end + note over EPN: From there on the flow for
local and external parties
is the same + participant Synchronizer + participant CPN 1 + participant CPN 2 + participant OPN + Note over CPN 1,CPN 2: Host Alice w/ Confirmation rights + EPN->>+Synchronizer: Confirmation Request + Synchronizer->>+EPN: Confirmation Request + Synchronizer->>+CPN 1: Confirmation Request + Synchronizer->>+CPN 2: Confirmation Request + Synchronizer->>+OPN: Confirmation Request + Note over OPN: OPNs do not send confirmation responses
but receive the confirmation request... + Note over EPN: For local parties,
the SPN sends a confirmation response as
it has confirmation permission for the party. + Note over EPN: For external parties,
the EPN also sends a confirmation response
but only as a veto mechanism to provide
consistent completion events. + EPN->>+Synchronizer: Confirmation Response + rect rgb(255, 153, 153) + Note over CPN 1: Validate Alice's transaction signature + end + CPN 1->>+Synchronizer: Confirmation Response + rect rgb(255, 153, 153) + Note over CPN 2: Validate Alice's transaction signature + end + CPN 2->>+Synchronizer: Confirmation Response + Synchronizer->>+CPN 1: Transaction Verdict + Synchronizer->>+CPN 2: Transaction Verdict + Synchronizer->>+OPN: Transaction Verdict + Note over OPN: ...and the verdict + Synchronizer->>+EPN: Transaction Verdict + Note over Alice: Listens to completion events
to learn about the
outcome of the transaction. + EPN->>+Alice: Completion event + Note over Alice: Queries CPNs for transaction stream,
active contract states. + CPN 1->>+Alice: Transaction stream, ACS + CPN 2->>+Alice: Transaction stream, ACS + Note over EPN: For local parties, SPN can also provide
ledger data. + rect rgb(153, 204, 255) + EPN->>+Alice: Transaction stream, ACS + end +``` ## Limitations diff --git a/docs-main/overview/understand/global-synchronizer.mdx b/docs-main/overview/understand/global-synchronizer.mdx index 1e29f7cbc..e5b2cd4c4 100644 --- a/docs-main/overview/understand/global-synchronizer.mdx +++ b/docs-main/overview/understand/global-synchronizer.mdx @@ -283,6 +283,8 @@ The Global Synchronizer and validators currently have frequent upgrades with the - **[Validator Operations](/global-synchronizer/understand/introduction)** - Deploy your own validator - **[Deployment Progression](/appdev/modules/m5-deployment-progression)** - Deploy applications across environments +
+ {/* COPIED_START source="docs-website:docs/replicated/canton/3.4/overview/explanations/canton/synchronizers.rst" hash="ff3b68a4" */} @@ -332,3 +334,5 @@ Mediator = Two-phase commit coordinator {/* COPIED_END */} +
+ diff --git a/docs-main/sdks-tools/api-reference/splice-apis.mdx b/docs-main/sdks-tools/api-reference/splice-apis.mdx index 30155312f..4f162433c 100644 --- a/docs-main/sdks-tools/api-reference/splice-apis.mdx +++ b/docs-main/sdks-tools/api-reference/splice-apis.mdx @@ -126,7 +126,7 @@ These endpoints are intended for users to programmatically interact with their w ### Splice Wallet Transfer Offers (deprecated) -**Deprecated** (since `splice-0.4.11`): Use the `Canton Network Token Standard APIs ` instead. +**Deprecated** (since `splice-0.4.11`): Use the *Canton Network Token Standard APIs* instead. Splice Wallet transfer offers are a legacy two-step workflow to transfer Canton Coin between users. They work as follows: @@ -134,7 +134,7 @@ Splice Wallet transfer offers are a legacy two-step workflow to transfer Canton > - The sender creates a `Splice.Wallet.TransferOffer` daml contract. > - The receiver accepts the offer, which immediately transfers the agreed coin. -This specific transfer offer workflow is deprecated in favor of the two-step workflow supported by Canton Coin implementation of the `Canton Network Token Standard `. +This specific transfer offer workflow is deprecated in favor of the two-step workflow supported by Canton Coin implementation of the *Canton Network Token Standard*. Use the endpoints below to create and manage Splice Wallet transfer offers. Use the Ledger API directly to create and manage Canton Network Token Standard transfer offers. @@ -163,7 +163,7 @@ Any user can buy traffic for any validator. Buying traffic is a multi-step proce These endpoints are used internally by the frontend of the Splice Wallet to interact with a user Canton Coin holdings. -These endpoints are not intended to be used by other applications. If you want to build a wallet of your own, we recommend to build on the `Canton Network Token Standard APIs` instead. +These endpoints are not intended to be used by other applications. If you want to build a wallet of your own, we recommend to build on the *Canton Network Token Standard APIs* instead. **Authorization:** Authentication with a JWT token as described in `app-auth`, where the subject claim of the token is the user whose wallet the endpoint operates on. @@ -214,7 +214,7 @@ External signing is a Canton feature allows setting up a party such that transac For the common case of wanting to set up an external party in a topology where the executing, preparing and confirming participant are the same node and that party should hold and transfer Canton Coin, the validator provides high-level APIs. > 1. Use `/v0/admin/external-party/topology/*` to set up an external party -> 2. Use `/v0/admin/external-party/setup-proposal` to start setting up a `Splice.Wallet.TransferPreapproval` daml contract for the external party, which allows the party to send and receive Canton Coin without having to approve individual `transfer offers `. +> 2. Use `/v0/admin/external-party/setup-proposal` to start setting up a `Splice.Wallet.TransferPreapproval` daml contract for the external party, which allows the party to send and receive Canton Coin without having to approve individual *transfer offers*. > 3. Use `/v0/admin/external-party/setup-proposal/*` to finish setting up the transfer preapproval. > 4. Use `/v0/admin/external-party/transfer-preapproval/*` to send Canton Coin to other parties. > 5. Use `/v0/admin/external-party/balance` to check the balance of the external party. @@ -360,9 +360,7 @@ The base path is `/api/sv`. Key endpoint groups include: Pass the token as an [OAuth2 Bearer token](https://datatracker.ietf.org/doc/html/rfc6750#section-2.1): -``` -Authorization: Bearer -``` +``*Authorization: Bearer*`` The Scan API is the exception -- it is publicly accessible and does not require authentication. diff --git a/docs-main/sdks-tools/development-tools/daml-studio.mdx b/docs-main/sdks-tools/development-tools/daml-studio.mdx index a320807dd..3ad24eb1d 100644 --- a/docs-main/sdks-tools/development-tools/daml-studio.mdx +++ b/docs-main/sdks-tools/development-tools/daml-studio.mdx @@ -194,9 +194,8 @@ If Daml Studio is unable to start a package environment for a package, for examp As discussed above, each package runs its own environment, these environments are managed by the root environment, which if not specified, will be the most recent SDK on your system. You can override this version by providing a `daml.yaml` file at the root of your project (i.e. next to the `multi-package.yaml`) containing only the following: -```yaml -sdk-version: -``` +``*yaml +sdk-version:*`` ### Jump to definition for dependencies diff --git a/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx b/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx index d15c3be02..0575a1fb0 100644 --- a/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx +++ b/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx @@ -1,3 +1,2 @@ -```bash -kubectl get pods -n -``` +``*bash +kubectl get pods -n*`` diff --git a/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx b/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx index 60879dac3..20c36fc60 100644 --- a/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx +++ b/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx @@ -1,4 +1,3 @@ -```bash +``*bash # Check if any container was OOM-killed -docker inspect --format='{{.State.OOMKilled}}' -``` +docker inspect --format='{{.State.OOMKilled}}'*`` From 409729a571554367ad667c740b918cba5e4d4829 Mon Sep 17 00:00:00 2001 From: 8bitpal Date: Thu, 30 Apr 2026 23:08:35 +0200 Subject: [PATCH 3/6] Migrate hand-authored legacy doc-asset links to internal pages Replace docs.digitalasset.com (Canton 3.x predecessor) and docs.daml.com (Daml 2.x) links across 47 hand-authored .mdx files with internal targets under /appdev, /sdks-tools, /global-synchronizer, /reference, and /overview. Out of scope (separate decisions): - 4,187 auto-generated /reference/scala/ Scaladoc cross-links pending an architectural decision on Scaladoc hosting - snippets/external/ files mirrored from upstream splice-wallet-kernel - one Canton Utility Setup link with no internal replacement - the __VERSION__ placeholder bug in canton-configuration-guide.mdx (rewritten to internal Scala refs as part of this change) --- .../external-signing-transactions.mdx | 4 ++-- docs-main/appdev/deep-dives/multi-hosting.mdx | 2 +- .../deep-dives/performance-optimization.mdx | 2 +- docs-main/appdev/faq.mdx | 2 +- .../upgrading-from-previous-versions.mdx | 2 +- docs-main/appdev/get-started/whats-new.mdx | 2 +- .../appdev/modules/m1-understanding-canton.mdx | 2 +- .../appdev/modules/m2-migration-checklist.mdx | 2 +- .../appdev/modules/m3-building-packaging.mdx | 4 ++-- docs-main/appdev/modules/m3-contract-keys.mdx | 2 +- docs-main/appdev/modules/m3-dev-environment.mdx | 2 +- .../appdev/modules/m3-language-fundamentals.mdx | 4 ++-- docs-main/appdev/modules/m3-testing.mdx | 2 +- docs-main/appdev/modules/m7-security.mdx | 2 +- docs-main/appdev/quickstart/index.mdx | 2 +- .../quickstart/observability-and-tracing.mdx | 8 ++++---- docs-main/appdev/quickstart/prerequisites.mdx | 2 +- .../appdev/quickstart/project-structure.mdx | 2 +- .../reference/daml-standard-library/da-fail.mdx | 8 +++----- .../daml-standard-library/da-record.mdx | 2 -- .../appdev/reference/pqs-sql-reference.mdx | 2 +- docs-main/appdev/tooling/ide-setup.mdx | 2 +- docs-main/appdev/troubleshooting.mdx | 4 ++-- .../canton-console/getting-started-tutorial.mdx | 17 +++++++---------- .../deployment/configuration.mdx | 2 +- .../deployment/identity-management.mdx | 6 +++--- .../deployment/installation.mdx | 9 +++------ .../deployment/synchronizer-traffic.mdx | 2 +- .../deployment/validator-users.mdx | 2 +- docs-main/global-synchronizer/faq.mdx | 2 +- .../production-operations/monitoring-setup.mdx | 2 +- .../security-hardening.mdx | 8 ++++---- .../reference/canton-configuration-guide.mdx | 4 ++-- .../reference/canton-console-commands.mdx | 8 ++++---- .../reference/canton-console-reference.mdx | 8 ++++---- .../reference/configuration-reference.mdx | 2 +- .../reference/metrics-reference.mdx | 2 +- .../global-synchronizer/troubleshooting.mdx | 2 +- docs-main/integrations/ecosystem.mdx | 2 +- .../reference/sv-governance-reference.mdx | 4 ++-- .../sdks-tools/api-reference/admin-api.mdx | 2 +- .../sdks-tools/cli-tools/canton-console.mdx | 2 +- docs-main/sdks-tools/cli-tools/dpm.mdx | 2 +- docs-main/sdks-tools/language-bindings/java.mdx | 4 ++-- .../sdks-tools/language-bindings/scala.mdx | 4 ++-- docs-main/sdks-tools/sdks/daml-sdk.mdx | 2 +- docs-main/sdks-tools/sdks/wallet-sdk.mdx | 4 ++-- .../shared/version-compatibility-dashboard.mdx | 2 +- 48 files changed, 80 insertions(+), 90 deletions(-) diff --git a/docs-main/appdev/deep-dives/external-signing-transactions.mdx b/docs-main/appdev/deep-dives/external-signing-transactions.mdx index 4533ffac2..ab2e017b5 100644 --- a/docs-main/appdev/deep-dives/external-signing-transactions.mdx +++ b/docs-main/appdev/deep-dives/external-signing-transactions.mdx @@ -1066,7 +1066,7 @@ prepared_create_transaction = prepare_create_response.prepared_transaction - `command_id`: Unique, random string identifying this specific command. Each command submission must have a new and unique `command_id`. - `act_as`: ID of the party issuing the command. - `synchronizer_id`: ID of the synchronizer that processes the transaction upon submission. -- `commands`: Ledger commands for submission. In this case, it shows the creation of a Ping contract with `Alice` as the initiator, `Bob` as the responder, and a `ping_id` value. See the [command documentation](https://docs.daml.com/app-dev/grpc/proto-docs.html#command-message-version-com-daml-ledger-api-v1) for details. +- `commands`: Ledger commands for submission. In this case, it shows the creation of a Ping contract with `Alice` as the initiator, `Bob` as the responder, and a `ping_id` value. See the [Ledger API command reference](/reference/grpc-ledger-api-reference/com-daml-ledger-api-v2) for details. ### Response @@ -1274,7 +1274,7 @@ In the request, note the presence of: - `signatures`: Object containing the signature of the transaction hash, along with metadata. In particular: > - `signing_algorithm_spec`: Will vary depending on the key used during onboarding. - > - `signed_by`: Fingerprint of the protocol signing *public* key of `Alice`. This tutorial assumes the same key was used to create `Alice`'s namespace and her protocol signing key. This is why the fingerprint of the signing key matches the second part of her Party Id (after `::`). For more details check out the onboarding tutorial and the [parties documentation](https://docs.daml.com/app-dev/parties-users.html#party-id-hints-and-display-names). + > - `signed_by`: Fingerprint of the protocol signing *public* key of `Alice`. This tutorial assumes the same key was used to create `Alice`'s namespace and her protocol signing key. This is why the fingerprint of the signing key matches the second part of her Party Id (after `::`). For more details check out the onboarding tutorial and the [Daml parties guide](/appdev/deep-dives/manage-daml-parties). If the gRPC Ledger API authorization is enabled, the user must have the `actAs` claim on behalf of `Alice` to call the `execute` endpoint. diff --git a/docs-main/appdev/deep-dives/multi-hosting.mdx b/docs-main/appdev/deep-dives/multi-hosting.mdx index 888b00e9f..20ecb2054 100644 --- a/docs-main/appdev/deep-dives/multi-hosting.mdx +++ b/docs-main/appdev/deep-dives/multi-hosting.mdx @@ -45,7 +45,7 @@ Each hosting validator is assigned one of three permission levels: Multi-hosting requires several topology transactions that maps the party to multiple validators. All hosting validators must sign the mapping — it's a proposal that becomes active only when all parties agree. -The instructions below apply to **new external parties**. Adding hosting nodes to existing parties is called [party replication](https://docs.digitalasset.com/operate/3.4/howtos/operate/parties/party_replication.html) and is a different, more involved workflow. For external parties, the external party must authorize (sign) the party to participant mapping with its own key. See the [external signing onboarding documentation](https://docs.digitalasset.com/build/3.5/tutorials/app-dev/external_signing_onboarding.html) for details. +The instructions below apply to **new external parties**. Adding hosting nodes to existing parties is called [party replication](/appdev/deep-dives/manage-daml-parties) and is a different, more involved workflow. For external parties, the external party must authorize (sign) the party to participant mapping with its own key. See the [external signing onboarding documentation](/appdev/deep-dives/external-signing-onboarding) for details. ### Via the Ledger API diff --git a/docs-main/appdev/deep-dives/performance-optimization.mdx b/docs-main/appdev/deep-dives/performance-optimization.mdx index 71b181985..3d0160e1b 100644 --- a/docs-main/appdev/deep-dives/performance-optimization.mdx +++ b/docs-main/appdev/deep-dives/performance-optimization.mdx @@ -1339,7 +1339,7 @@ The default PostgreSQL Docker image ships with minimal settings. For any realist - `max_connections` — Match to your expected connection pool size - `autovacuum` settings — Tune for your write patterns -Resource requirements depend on your workload. Consult the [PQS operations guide](https://docs.digitalasset.com/canton/usermanual/pqs) for sizing guidance specific to your query patterns and contract volumes. +Resource requirements depend on your workload. Tune sizing based on your query patterns and contract volumes. {/* COPIED_END */} diff --git a/docs-main/appdev/faq.mdx b/docs-main/appdev/faq.mdx index a72b1f198..afc242283 100644 --- a/docs-main/appdev/faq.mdx +++ b/docs-main/appdev/faq.mdx @@ -664,7 +664,7 @@ If a course doesn't mention Canton Network or Daml 3.x, or covers only Daml 2.x **Recommended resources:** 1. **Official Documentation:** - - [Build Documentation](https://docs.digitalasset.com/build/3.4) + - [Build Documentation](/appdev/get-started/choose-your-path) - [Operator Documentation](https://docs.sync.global) 2. **Hands-on:** diff --git a/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx b/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx index 848cb9868..2a6c2ba2e 100644 --- a/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx +++ b/docs-main/appdev/get-started/upgrading-from-previous-versions.mdx @@ -9,7 +9,7 @@ Per-version migration steps live with the component release notes. Use the links - **Splice and the Global Synchronizer** — [Current release](/global-synchronizer/release-notes/current-release), [release history](/global-synchronizer/release-notes/release-history), [weekly patch releases](/global-synchronizer/release-notes/weekly-patch-releases) - **Wallet SDK** — [Wallet SDK release notes](/integrations/wallet/release-notes) -- **Canton and Daml SDK** — [GitHub releases (digital-asset/daml)](https://github.com/digital-asset/daml/releases) and the upstream changelog at [docs.digitalasset.com](https://docs.digitalasset.com) +- **Canton and Daml SDK** — [GitHub releases (digital-asset/daml)](https://github.com/digital-asset/daml/releases) ## From Canton 2.x to 3.x diff --git a/docs-main/appdev/get-started/whats-new.mdx b/docs-main/appdev/get-started/whats-new.mdx index 937a96102..6354d2a47 100644 --- a/docs-main/appdev/get-started/whats-new.mdx +++ b/docs-main/appdev/get-started/whats-new.mdx @@ -7,7 +7,7 @@ For changes shipping in each component, see the corresponding release notes: - **Splice and the Global Synchronizer** — [Current release](/global-synchronizer/release-notes/current-release), [release history](/global-synchronizer/release-notes/release-history), [weekly patch releases](/global-synchronizer/release-notes/weekly-patch-releases) - **Wallet SDK** — [Wallet SDK release notes](/integrations/wallet/release-notes) -- **Canton and Daml SDK** — [GitHub releases (digital-asset/daml)](https://github.com/digital-asset/daml/releases) and the upstream documentation at [docs.digitalasset.com](https://docs.digitalasset.com) +- **Canton and Daml SDK** — [GitHub releases (digital-asset/daml)](https://github.com/digital-asset/daml/releases) - **CIPs** — [Canton Improvement Proposals](https://github.com/global-synchronizer-foundation/cips) ## Version compatibility diff --git a/docs-main/appdev/modules/m1-understanding-canton.mdx b/docs-main/appdev/modules/m1-understanding-canton.mdx index cdb070611..66c6ab241 100644 --- a/docs-main/appdev/modules/m1-understanding-canton.mdx +++ b/docs-main/appdev/modules/m1-understanding-canton.mdx @@ -96,4 +96,4 @@ By the end of this module, you'll understand: After completing this module, continue to: - **[Module 2](/appdev/modules/m2-canton-for-ethereum-devs)**: If you have Ethereum/blockchain experience -- **[Daml Documentation](https://docs.daml.com)**: If you're ready to start writing Daml +- **[Choose your path](/appdev/get-started/choose-your-path)**: If you're ready to start writing Daml diff --git a/docs-main/appdev/modules/m2-migration-checklist.mdx b/docs-main/appdev/modules/m2-migration-checklist.mdx index 3d13aa8ab..1047f0c90 100644 --- a/docs-main/appdev/modules/m2-migration-checklist.mdx +++ b/docs-main/appdev/modules/m2-migration-checklist.mdx @@ -41,7 +41,7 @@ If your app heavily relies on global state queries (e.g., "show all NFTs"), you' -- [ ] Install the [Daml SDK](https://docs.daml.com/getting-started/installation.html) +- [ ] Install the [Daml SDK](/sdks-tools/sdks/daml-sdk) - [ ] Install VS Code with the Daml extension - [ ] Clone the [CN Quickstart](https://github.com/digital-asset/cn-quickstart) repository - [ ] Run `make setup && make build && make start` to verify your setup works diff --git a/docs-main/appdev/modules/m3-building-packaging.mdx b/docs-main/appdev/modules/m3-building-packaging.mdx index df955d0f6..ed348d789 100644 --- a/docs-main/appdev/modules/m3-building-packaging.mdx +++ b/docs-main/appdev/modules/m3-building-packaging.mdx @@ -12,7 +12,7 @@ The application from `compose` is a complete and secure model for atomic swaps o 1. Upgrades, which change existing logic. For example, one might want the `Asset` template to have multiple signatories. 2. Extensions, which merely add new functionality through additional templates. -Upgrades are covered in their own section outside this introduction to Daml: [smart contract upgrades](https://docs.digitalasset.com/build/3.4/sdk/explanations/smart-contract-upgrades), +Upgrades are covered in their own section outside this introduction to Daml: [smart contract upgrades](/appdev/deep-dives/smart-contract-upgrade), so in this section we will extend the `compose` model with a simple second workflow: a multi-leg trade. In doing so, you'll learn about: - The software architecture of the Daml stack @@ -111,7 +111,7 @@ The `MultiTrade` model has more complex control flow and data handling than prev ## Building with dpm -When working on Canton Network projects, use the [`dpm`](https://docs.digitalasset.com/build/3.4/dpm/dpm.html) tool for all build operations: +When working on Canton Network projects, use the [`dpm`](/sdks-tools/cli-tools/dpm) tool for all build operations: - `dpm build` — Compile your Daml project and produce a DAR file - `dpm build --all` — Build all packages in a multi-package project diff --git a/docs-main/appdev/modules/m3-contract-keys.mdx b/docs-main/appdev/modules/m3-contract-keys.mdx index 28b6955d5..9058ff74d 100644 --- a/docs-main/appdev/modules/m3-contract-keys.mdx +++ b/docs-main/appdev/modules/m3-contract-keys.mdx @@ -48,7 +48,7 @@ Checking of the keys is done automatically at execution time, by the Daml execut The primary purpose of contract keys is to provide a stable, and possibly meaningful, identifier that can be used in Daml to fetch contracts. There are two functions to perform such lookups: `fetchbykey` and `lookupbykey`. Both types of lookup are performed at interpretation time on the submitting Participant Node, on a best-effort basis. Currently, that best-effort means lookups only return contracts if the submitting Party is a stakeholder of that contract. -In particular, the above means that if multiple commands are submitted simultaneously, all using contract lookups to find and consume a given contract, there will be contention between these commands, and at most one will succeed. For more information, see the section on avoiding contention in the [Canton documentation](https://docs.digitalasset.com/canton/usermanual/performance/index.html). +In particular, the above means that if multiple commands are submitted simultaneously, all using contract lookups to find and consume a given contract, there will be contention between these commands, and at most one will succeed. For more information, see the [performance optimization deep dive](/appdev/deep-dives/performance-optimization). Limiting key usage to stakeholders also means that keys cannot be used to access a divulged contract, i.e. there can be cases where `fetch` succeeds and `fetchByKey` does not. See the example at the end of this section for details. diff --git a/docs-main/appdev/modules/m3-dev-environment.mdx b/docs-main/appdev/modules/m3-dev-environment.mdx index 66bc6643d..71f4a3490 100644 --- a/docs-main/appdev/modules/m3-dev-environment.mdx +++ b/docs-main/appdev/modules/m3-dev-environment.mdx @@ -13,7 +13,7 @@ In this module, you will learn about the structure of a Daml ledger and how to w ## Prerequisites -- You have installed [dpm](https://docs.digitalasset.com/build/3.4/dpm/dpm.html) +- You have installed [dpm](/sdks-tools/cli-tools/dpm) ## Loading Example Code diff --git a/docs-main/appdev/modules/m3-language-fundamentals.mdx b/docs-main/appdev/modules/m3-language-fundamentals.mdx index 2cf68c236..1398d6741 100644 --- a/docs-main/appdev/modules/m3-language-fundamentals.mdx +++ b/docs-main/appdev/modules/m3-language-fundamentals.mdx @@ -138,7 +138,7 @@ If we want to partially apply an infix operation we can also do that as follows: #### Associativity and Precedence -When dealing with multiple infix operators, precedence determines how the Daml compiler should parse an expression. For example, for the expression `x + y * z`, because `\*` has a higher precedence than `+`, the expression is parsed as `x + (y * z)` instead of `(x + y) * z`. When dealing with infix operators with the same precedence, associativity determines how the Daml compiler should parse an expression. For example, because `+` and `-` are left-associative, the expression `x + y - z` is parsed as `(x + y) - z` instead of `x + (y - z)`. For built-in operators this has been predefined, for user-defined operators, it must be user-defined. See the [Daml reference on Fixity, Associativity and Precedence](https://docs.digitalasset.com/daml/reference/base.html) for details. +When dealing with multiple infix operators, precedence determines how the Daml compiler should parse an expression. For example, for the expression `x + y * z`, because `\*` has a higher precedence than `+`, the expression is parsed as `x + (y * z)` instead of `(x + y) * z`. When dealing with infix operators with the same precedence, associativity determines how the Daml compiler should parse an expression. For example, because `+` and `-` are left-associative, the expression `x + y - z` is parsed as `(x + y) - z` instead of `x + (y - z)`. For built-in operators this has been predefined, for user-defined operators, it must be user-defined. See the [Daml language reference](/appdev/reference/daml-language-reference) for details on fixity, associativity, and precedence. ### Type constraints @@ -826,7 +826,7 @@ Other than the typeclasses defined in Prelude, there are two modules generalizin Being able to browse the standard library starting from `stdlib-reference-base` is a start, and the module naming helps, but it's not an efficient process for finding out what a function you've encountered does, even less so for finding a function that does a thing you need to do. -Daml has its own version of the [Hoogle](https://hoogle.haskell.org/) search engine, which offers search both by name and by signature. This function is fully integrated into the search bar on [https://docs.digitalasset.com/](https://docs.digitalasset.com/), but for those wanting a pure standard library search, it's also available on [https://hoogle.daml.com](https://hoogle.daml.com). +Daml has its own version of the [Hoogle](https://hoogle.haskell.org/) search engine, which offers search both by name and by signature. For a pure standard library search, use [https://hoogle.daml.com](https://hoogle.daml.com). ### Search for functions by name diff --git a/docs-main/appdev/modules/m3-testing.mdx b/docs-main/appdev/modules/m3-testing.mdx index 5935c288c..a5afbd7df 100644 --- a/docs-main/appdev/modules/m3-testing.mdx +++ b/docs-main/appdev/modules/m3-testing.mdx @@ -118,7 +118,7 @@ The first part of the summary is a list of each executed script. For each script The second part of the summary is the coverage report. It shows you how many templates and choices are tested by the complete set of scripts in the package, in proportion of the total number of templates and choices. -To learn more about Daml test coverage, read the [Daml test coverage guide](https://docs.digitalasset.com/build/3.4/sdk/howtos/testing/test-coverage). +The coverage summary above is the primary tool for assessing how thoroughly your scripts exercise the templates and choices in your package. ## Debug, trace, and stacktraces diff --git a/docs-main/appdev/modules/m7-security.mdx b/docs-main/appdev/modules/m7-security.mdx index 298be1408..90c6a3367 100644 --- a/docs-main/appdev/modules/m7-security.mdx +++ b/docs-main/appdev/modules/m7-security.mdx @@ -62,7 +62,7 @@ Canton uses cryptographic keys for party identity, node identity, and transactio On LocalNet, keys are generated and stored locally — this is fine for development. In production: -- Use [Hardware Security Modules (HSM) or cloud Key Management Services (KMS)](https://docs.digitalasset.com/canton/usermanual/kms) for private keys +- Use [Hardware Security Modules (HSM) or cloud Key Management Services (KMS)](/global-synchronizer/production-operations/key-management) for private keys - Never store production keys on developer machines or in CI systems - Rotate keys according to your organization's security policy - Back up key material securely — losing keys means losing access to your party identity diff --git a/docs-main/appdev/quickstart/index.mdx b/docs-main/appdev/quickstart/index.mdx index d3c140b4d..f52c0439f 100644 --- a/docs-main/appdev/quickstart/index.mdx +++ b/docs-main/appdev/quickstart/index.mdx @@ -17,7 +17,7 @@ The QuickStart sets up a local environment (called LocalNet) with: - **A React frontend** for both provider and user roles - **A Java backend** service handling Ledger API interactions - **Canton Coin wallets** for traffic purchase and payment flows -- **Log analysis** with [lnav](https://docs.digitalasset.com/build/3.4/quickstart/operate/lnav-in-cn.html) for debugging and troubleshooting +- **Log analysis** with [lnav](/appdev/quickstart/observability-and-tracing) for debugging and troubleshooting ## Pages in this section diff --git a/docs-main/appdev/quickstart/observability-and-tracing.mdx b/docs-main/appdev/quickstart/observability-and-tracing.mdx index 0f477fddf..28ae7627f 100644 --- a/docs-main/appdev/quickstart/observability-and-tracing.mdx +++ b/docs-main/appdev/quickstart/observability-and-tracing.mdx @@ -72,7 +72,7 @@ The Quickstart application provides a foundational production Daml application. The current troubleshooting and debugging services include: -- Local ledger inspection via Daml shell ([https://docs.daml.com/tools/daml-shell/index.html](https://docs.daml.com/tools/daml-shell/index.html)) +- Local ledger inspection via [Daml Shell](/sdks-tools/cli-tools/daml-shell) - Datasource collection and management via **OpenTelemetry** - This uses the **OTEL Collector** ([https://opentelemetry.io/docs/collector](https://opentelemetry.io/docs/collector)) - Metrics are aggregated using **Prometheus** ([https://prometheus.io/](https://prometheus.io/)) @@ -385,17 +385,17 @@ A starting point for finding documentation on these see: [^3]: Daml shell has tab completion on most command arguments, including the Template Id argument to `active` and the Contract Id argument to contract. -[^4]: Further documentation is available at [https://docs.daml.com/tools/daml-shell/index.html](https://docs.daml.com/tools/daml-shell/index.html) +[^4]: Further documentation is available in the [Daml Shell reference](/sdks-tools/cli-tools/daml-shell). !["Daml Shell command line interface"](/images/docs_website/footnote-04-daml-shell-cli.png) -[^5]: A key differentiator of Canton from all other level one blockchains is that it offers privacy. It does this by enforcing right-to-know. rather than via secrecy-via-obscurity and/or via pseudo-anonymity. Canton provides two privacy guarantees: Even in encrypted form (sub-)transactions are only transmitted to participant nodes with a right to be informed of them; and, participant nodes will be informed of every (sub-)transaction they have a right to be informed of. For details on how Canton defines “right” and other aspects of this see the Daml Ledger Privacy Model ([https://docs.daml.com/concepts/ledger-model/ledger-privacy.html#privacy](https://docs.daml.com/concepts/ledger-model/ledger-privacy.html#privacy)) +[^5]: A key differentiator of Canton from all other level one blockchains is that it offers privacy. It does this by enforcing right-to-know. rather than via secrecy-via-obscurity and/or via pseudo-anonymity. Canton provides two privacy guarantees: Even in encrypted form (sub-)transactions are only transmitted to participant nodes with a right to be informed of them; and, participant nodes will be informed of every (sub-)transaction they have a right to be informed of. For details on how Canton defines "right" and other aspects of this see the [Daml ledger privacy model](/appdev/deep-dives/privacy-model). [^6]: Distributed tracing is essential to efficient debugging and diagnosis of any distributed application. While technically distinct identifiers Trace and Span Ids are closely linked. If unfamiliar with their use OpenTelemetry has a good primer ([https://opentelemetry.io/docs/concepts/signals/traces/](https://opentelemetry.io/docs/concepts/signals/traces/)), Grafana has a reasonable demo ([https://grafana.com/docs/tempo/latest/introduction/](https://grafana.com/docs/tempo/latest/introduction/)), and we demonstrate their use later in this guide. [^7]: Equivalent to “blockheight” in other public blockchains that do not support privacy. As privacy dictates that each participant node sees a different projection of the global blockchain, the offset is not comparable across different Participant Nodes. It is commonly the preferred id when dealing with a single participant node due to being a simple, monotonic, total-order on ledger events witnessed by a Participant Node. -[^8]: By virtue of their role in the ledger model, all parties are (and the associated entity must be) capable of authorizing a (sub-)transaction or ledger event. See the Daml Ledger Authorization Model for details ([https://docs.daml.com/concepts/ledger-model/ledger-integrity.html#authorization](https://docs.daml.com/concepts/ledger-model/ledger-integrity.html#authorization)) +[^8]: By virtue of their role in the ledger model, all parties are (and the associated entity must be) capable of authorizing a (sub-)transaction or ledger event. See the [Daml ledger authorization model](/appdev/deep-dives/authorization) for details. [^9]: Where loggers cannot be configured to emit structured logs directly, log parsers are used to convert raw log files in the usual manner. This is primarily done in the OTEL Collector configuration. diff --git a/docs-main/appdev/quickstart/prerequisites.mdx b/docs-main/appdev/quickstart/prerequisites.mdx index b796afc33..13db40186 100644 --- a/docs-main/appdev/quickstart/prerequisites.mdx +++ b/docs-main/appdev/quickstart/prerequisites.mdx @@ -19,7 +19,7 @@ This guide walks through the installation and `LocalNet` deployment of the CN Qu - After installation, [explore the demo](/appdev/quickstart/running-the-demo) to complete a business operation in the example application. - For an overview of how the Quickstart project is structured, read the [project structure guide](/appdev/quickstart/project-structure). -- Learn about debugging using lnav in the [Debugging and troubleshooting with lnav](https://docs.digitalasset.com/build/3.4/quickstart/operate/lnav-in-cn.html). +- Learn about debugging using lnav in the [observability and tracing guide](/appdev/quickstart/observability-and-tracing). - Additional debugging information is in the section in the observability and troubleshooting section of the [cn-quickstart repository](https://github.com/digital-asset/cn-quickstart). ## Prerequisites diff --git a/docs-main/appdev/quickstart/project-structure.mdx b/docs-main/appdev/quickstart/project-structure.mdx index 484da6a0e..e0a2226ca 100644 --- a/docs-main/appdev/quickstart/project-structure.mdx +++ b/docs-main/appdev/quickstart/project-structure.mdx @@ -304,7 +304,7 @@ See `quickstart-debugging-and-troubleshooting-lnav` for log analysis techniques. ## Next steps -Once you understand the project structure, visit the [TL;DR for new Canton Network developers](https://docs.digitalasset.com/build/3.4/overview/tldr.html) for additional guides to explore. +Once you understand the project structure, visit the [Choose your path guide](/appdev/get-started/choose-your-path) for additional guides to explore. {/* COPIED_END */} \ No newline at end of file diff --git a/docs-main/appdev/reference/daml-standard-library/da-fail.mdx b/docs-main/appdev/reference/daml-standard-library/da-fail.mdx index 324801781..eb05476f6 100644 --- a/docs-main/appdev/reference/daml-standard-library/da-fail.mdx +++ b/docs-main/appdev/reference/daml-standard-library/da-fail.mdx @@ -33,7 +33,7 @@ Deprecated since: `-` The category of the failure, which determines the status code and log level of the failure. Maps 1-1 to the Canton error categories documented -here: https://docs.digitalasset.com/operate/3.4/reference/error_codes.html#error-categories-inventory +in the [error codes reference](/appdev/reference/error-codes). If you are more familiar with gRPC error codes, you can use the synonyms referenced in the comments. @@ -47,8 +47,7 @@ and should thus not be retried. Corresponds to the gRPC status code `INVALID_ARGUMENT`. -See https://docs.digitalasset.com/operate/3.4/reference/error_codes.html#invalidindependentofsystemstate -for more information. +See the [error codes reference](/appdev/reference/error-codes) for more information. - `InvalidGivenCurrentSystemStateOther` Use this to report errors that are due to the current state of the ledger, @@ -57,8 +56,7 @@ requests after reading updated state from the ledger. Corresponds to the gRPC status code `FAILED_PRECONDITION`. -See https://docs.digitalasset.com/operate/3.4/reference/error_codes.html#error-categories-inventory -for more information. +See the [error codes reference](/appdev/reference/error-codes) for more information. Instances: diff --git a/docs-main/appdev/reference/daml-standard-library/da-record.mdx b/docs-main/appdev/reference/daml-standard-library/da-record.mdx index 1c5dce613..05b6a55c4 100644 --- a/docs-main/appdev/reference/daml-standard-library/da-record.mdx +++ b/docs-main/appdev/reference/daml-standard-library/da-record.mdx @@ -53,8 +53,6 @@ MyRecord {foo = 3, bar = "hello"} daml> ``` -For more on Record syntax, see https://docs.digitalasset.com/build/3.4/reference/daml/stdlib/DA-Record.html. - `GetField x r a` and `SetField x r a` are typeclasses taking three parameters. The first parameter `x` is the field name, the second parameter `r` is the record type, and the last parameter `a` is the type of the field in this record. For diff --git a/docs-main/appdev/reference/pqs-sql-reference.mdx b/docs-main/appdev/reference/pqs-sql-reference.mdx index f06761381..45dfda0a8 100644 --- a/docs-main/appdev/reference/pqs-sql-reference.mdx +++ b/docs-main/appdev/reference/pqs-sql-reference.mdx @@ -351,7 +351,7 @@ Join through the `*_at_ix` column to enrich contract or exercise data with trans ## Summary functions -This section will be expanded in a future update. For PQS query patterns and usage, see the [PQS documentation](https://docs.digitalasset.com/canton/3.5/participant/how-to/pqs/pqs-user-guide). +This section will be expanded in a future update. For PQS query patterns and usage, see the [PQS development tools page](/sdks-tools/development-tools/pqs). ## Contract columns diff --git a/docs-main/appdev/tooling/ide-setup.mdx b/docs-main/appdev/tooling/ide-setup.mdx index 557d21ab0..a0bd4924f 100644 --- a/docs-main/appdev/tooling/ide-setup.mdx +++ b/docs-main/appdev/tooling/ide-setup.mdx @@ -16,7 +16,7 @@ Daml Studio is the primary IDE for writing Daml smart contracts. It is a VS Code ### Installation 1. Install [VS Code](https://code.visualstudio.com/) version 1.87 or later -2. Install [DPM](https://docs.digitalasset.com/build/3.4/dpm/dpm.html) if you have not already +2. Install [DPM](/sdks-tools/cli-tools/dpm) if you have not already 3. From your project directory, run: diff --git a/docs-main/appdev/troubleshooting.mdx b/docs-main/appdev/troubleshooting.mdx index f7a209fab..83e5a7da9 100644 --- a/docs-main/appdev/troubleshooting.mdx +++ b/docs-main/appdev/troubleshooting.mdx @@ -493,7 +493,7 @@ queued tasks = 2000 # ← Queue is full! 3. Monitor pruning progress via Canton Console: -See [Monitor Pruning Progress](https://docs.daml.com/canton/usermanual/pruning.html#monitor-pruning-progress) in the documentation. +See the [pruning operations guide](/global-synchronizer/production-operations/pruning) for details. @@ -915,7 +915,7 @@ For Canton trace IDs, context extraction patterns, and detailed lnav workflows, ## Timeout Errors -Any transaction submitted to Canton is either successfully worked off (accepted or rejected) or eventually timed out. If a transaction hits a timeout, the application is informed of the rejection reasons by an appropriate completion event on the gRPC Ledger API. We can hit the following timeouts in Canton (you can get further timeouts from the [command service](https://docs.daml.com/app-dev/services.html#command-service)): +Any transaction submitted to Canton is either successfully worked off (accepted or rejected) or eventually timed out. If a transaction hits a timeout, the application is informed of the rejection reasons by an appropriate completion event on the gRPC Ledger API. We can hit the following timeouts in Canton (you can get further timeouts from the [Ledger API command service](/reference/grpc-ledger-api-reference/com-daml-ledger-api-v2)): > - `NOT_SEQUENCED_TIMEOUT` > - `LOCAL_VERDICT_TIMEOUT` diff --git a/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx b/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx index 7d3054c67..1ad471912 100644 --- a/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx +++ b/docs-main/global-synchronizer/canton-console/getting-started-tutorial.mdx @@ -12,7 +12,7 @@ Reviewers: Skip this section. Remove markers after final approval.
-change the section where we provision smart contract code: - create a new empty project - use the "Understanding IOUs" section to explain the structure of a daml contract (`https://docs.daml.com/app-dev/bindings-java/quickstart.html`) - transact on the IOU contract using curl and JSON Ledger API, not via console commands +change the section where we provision smart contract code: - create a new empty project - use the "Understanding IOUs" section to explain the structure of a daml contract (link to the Java bindings page once it covers a quickstart-style IOU walkthrough: /sdks-tools/language-bindings/java) - transact on the IOU contract using curl and JSON Ledger API, not via console commands
# Getting Started @@ -178,7 +178,7 @@ To run the protocol, the participants must connect to one or more synchronizers. images/canton-tutorial-elements.svg -The Participant Nodes provide their parties with a [gRPC Ledger API](https://docs.daml.com/app-dev/ledger-api.html) as a means to access the ledger. The parties can interact with the gRPC Ledger API manually using the console, but in practice these parties use applications to handle the interactions and display the data in a user-friendly interface. +The Participant Nodes provide their parties with a [gRPC Ledger API](/sdks-tools/api-reference/ledger-api) as a means to access the ledger. The parties can interact with the gRPC Ledger API manually using the console, but in practice these parties use applications to handle the interactions and display the data in a user-friendly interface. In addition to the gRPC Ledger API, each participant node also exposes an *Admin API*. The Admin API allows the administrator (that is, you) to: @@ -700,7 +700,7 @@ template OfferToPaintHouseByPainter return iouId2 ``` -We won't dive into the details of Daml, as this is [explained elsewhere](https://docs.daml.com/daml/intro/0_Intro.html). But one key observation is that the contracts themselves are passive. The contract instances represent the ledger and only encode the rules according to which the ledger state can be changed. Any change requires you to trigger some Daml contract execution by sending the appropriate commands over the Ledger API. +We won't dive into the details of Daml, as this is [explained elsewhere](/appdev/modules/m3-language-fundamentals). But one key observation is that the contracts themselves are passive. The contract instances represent the ledger and only encode the rules according to which the ledger state can be changed. Any change requires you to trigger some Daml contract execution by sending the appropriate commands over the Ledger API. The Canton console gives you interactive access to this API, together with some utilities that can be useful for experimentation. The Ledger API uses [gRPC](http://grpc.io). @@ -987,7 +987,7 @@ Note that the conversion to `LfContractId` was required to pass in the IOU contr ## Your Development Choices -While the `ledger_api` functions in the Console can be handy for educational purposes, the Daml SDK provides you with much more convenient tools to inspect and manipulate the ledger content: - [Daml script](https://docs.daml.com/daml-script) for scripting - [Bindings in a variety of languages](https://docs.daml.com/app-dev/ledger-api.html) to build your own applications +While the `ledger_api` functions in the Console can be handy for educational purposes, the Daml SDK provides you with much more convenient tools to inspect and manipulate the ledger content: - [Daml Script](/sdks-tools/cli-tools/daml-script) for scripting - [Language bindings](/sdks-tools/language-bindings/java) to build your own applications All these tools work against the Ledger API. @@ -1042,9 +1042,8 @@ Note how we again use `retry_until_true` to add a manual synchronization point, You are now ready to start using Canton for serious tasks. If you want to develop a Daml application and run it on Canton, we recommend the following resources: -1. Install the [Daml SDK](https://docs.daml.com/getting-started/installation.html) to get access to the Daml IDE and other tools. -2. Follow the [Daml documentation](https://docs.daml.com/) to learn how to program new contracts, or check out the [Daml Examples](https://daml.com/examples/) to find existing ones for your needs. -3. Use the [Navigator (Deprecated)](https://docs.daml.com/tools/navigator/index.html) for easy Web-based access and manipulation of your contracts. +1. Install the [Daml SDK](/sdks-tools/sdks/daml-sdk) to get access to the Daml IDE and other tools. +2. Work through the [Daml language fundamentals](/appdev/modules/m3-language-fundamentals) to learn how to program new contracts. If you want to understand more about Canton: @@ -1092,9 +1091,7 @@ Also, you may want to add `-XX:+UseG1GC` to force the JVM to to use the `G1` gar The Canton Open Source version is available from [Github](https://github.com/digital-asset/daml/releases). -Daml Enterprise includes an enterprise version of the Canton ledger. If you have entitlement to Daml Enterprise you can download the enterprise version of Canton by following the [Installing Daml Enterprise instructions](https://docs.daml.com/getting-started/installation.html#installing-the-enterprise-edition) and downloading the appropriate Canton artifact. - -You can also use the Daml Enterprise Canton Docker images by following our Docker instructions. +For commercial Canton distributions and support, contact [Digital Asset](https://www.digitalasset.com/contact). ## Your Topology diff --git a/docs-main/global-synchronizer/deployment/configuration.mdx b/docs-main/global-synchronizer/deployment/configuration.mdx index a113a7ffc..e868a655e 100644 --- a/docs-main/global-synchronizer/deployment/configuration.mdx +++ b/docs-main/global-synchronizer/deployment/configuration.mdx @@ -38,7 +38,7 @@ The full configuration for each app can be observed in the scala code, with the - [SvAppConfig.scala](https://github.com/hyperledger-labs/splice/blob/main/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala#L199) - [ScanAppConfig.scala](https://github.com/hyperledger-labs/splice/blob/main/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/config/ScanAppConfig.scala#L28) -Furthermore, the participant and other synchronizer components can be configured independently as well. Further info on such configurations can be found in the [daml docs](https://docs.daml.com/canton/usermanual/static_conf.html). +Furthermore, the participant and other synchronizer components can be configured independently as well. Further info on such configurations can be found in the [Canton configuration guide](/global-synchronizer/reference/canton-configuration-guide).
diff --git a/docs-main/global-synchronizer/deployment/identity-management.mdx b/docs-main/global-synchronizer/deployment/identity-management.mdx index d35f0c327..5968ecb7e 100644 --- a/docs-main/global-synchronizer/deployment/identity-management.mdx +++ b/docs-main/global-synchronizer/deployment/identity-management.mdx @@ -27,7 +27,7 @@ The identity management system is self-contained and built without a trusted cen When two system entities such as a participant, synchronizer topology manager, mediator or sequencer communicate with each other, they will use asymmetric cryptography to encrypt messages and sign message contents such that only the recipient can decrypt the content, verify the authenticity of the message, or prove its origin. Therefore, we need a method to uniquely identify the system entities and a way to associate encryption and signing keys with them. -On top of that, Canton uses the contract language Daml, which represents contract ownership and rights through [parties](https://docs.daml.com/concepts/glossary.html#party). But parties are not primary members of the Canton synchronization protocol. They are represented by participants and therefore we need to uniquely identify parties and relate them to participants, such that a participant can represent several parties (and in Canton, a party can be represented by several participants). +On top of that, Canton uses the contract language Daml, which represents contract ownership and rights through [parties](/overview/understand/glossary). But parties are not primary members of the Canton synchronization protocol. They are represented by participants and therefore we need to uniquely identify parties and relate them to participants, such that a participant can represent several parties (and in Canton, a party can be represented by several participants). ### Unique Identifier @@ -198,7 +198,7 @@ Every participant also needs to manage access to their local Ledger API and be a - authentication: recognizing which user an application corresponds to (essentially by matching an application name with a user name) - authorization: knowing which rights an authenticated user has and restricting their Ledger API access according to those rights -Authentication is based on JWT and covered in the [application development/authorization section](https://docs.daml.com/app-dev/authorization.html) of the manual; the related Ledger API authorization configuration is covered in the Ledger API JWT configuration section. +Authentication is based on JWT and covered in the [authorization deep dive](/appdev/deep-dives/authorization); the related Ledger API authorization configuration is covered in the Ledger API JWT configuration section. Authorization is managed by the Ledger API's User Management Service. In essence, a user is a mapping from a user name to a set of parties with read or write permissions. In more detail a user consists of: @@ -208,7 +208,7 @@ Authorization is managed by the Ledger API's User Management Service. In essence - a set of user rights (describes whether a user has access to the admin portion of the Ledger API and what parties this user can act or read as) - a set of custom annotations (string-based key-value pairs, stored locally on the Ledger API server, that can be used to attach extra information to this party, e.g. how it relates to some business entity) -All these properties except the user ID can be modified. To learn more about annotations refer to the Ledger API Reference documentation. For an overview of the Ledger API's UserManagementService, see this [section](https://docs.daml.com/app-dev/services.html#user-management-service). +All these properties except the user ID can be modified. To learn more about annotations refer to the Ledger API Reference documentation. For an overview of the Ledger API's UserManagementService, see the [Ledger API reference](/sdks-tools/api-reference/ledger-api). You can manage users through the Canton console user management commands, an alpha feature. See the cookbook below for some concrete examples of how to manage users. diff --git a/docs-main/global-synchronizer/deployment/installation.mdx b/docs-main/global-synchronizer/deployment/installation.mdx index 4cadd85fb..b9f838ac6 100644 --- a/docs-main/global-synchronizer/deployment/installation.mdx +++ b/docs-main/global-synchronizer/deployment/installation.mdx @@ -674,10 +674,7 @@ Below you can see an example of the pruning config that you need to add to `vali Note that if your node is down for longer than the pruning window (48 hours in the example above), your node will most probably get corrupted, as the apps race catching up with the participant's attempts to keep pruning. It is therefore advisable to set the pruning window to a value that you are comfortable with in terms of guaranteeing uptime of your node. Setting it to 30 days is in general a reasonable choice, as the sequencers currently are also pruned after 30 days, so you will not be able to catch up with the network after a longer downtime anyway (see Disaster Recovery for disaster recovery guidelines). -Refer to the Canton documentation for more details on participant pruning: - -- `https://docs.daml.com/ops/pruning.html` -- `https://docs.daml.com/canton/usermanual/pruning.html` +For more details on participant pruning, see the [pruning operations guide](/global-synchronizer/production-operations/pruning). ```yaml -- Code from: ../../../apps/app/src/pack/examples/sv-helm/validator-values.yaml @@ -840,9 +837,9 @@ You can open your browser at [http://ans.localhost](http://ans.localhost) (note ### Accessing the Canton Participant APIs -The [JSON Ledger API](https://docs.digitalasset.com/build/3.4/tutorials/json-api/canton_and_the_json_ledger_api.html) is exposed under `json-ledger-api.localhost:80`. Note that for some clients you may explicitly need to set the `Host: json-ledger-api.localhost` header for this to get resolved correctly. +The [JSON Ledger API](/sdks-tools/api-reference/json-api) is exposed under `json-ledger-api.localhost:80`. Note that for some clients you may explicitly need to set the `Host: json-ledger-api.localhost` header for this to get resolved correctly. -The [gRPC Ledger API](https://docs.digitalasset.com/build/3.4/explanations/ledger-api-services.html) is exposed under `grpc-ledger-api.localhost:80`. Note that for some clients you may explicitly need to set the `:authority: json-ledger-api.localhost` pseudo-header for this to get resolved correctly. +The [gRPC Ledger API](/sdks-tools/api-reference/ledger-api) is exposed under `grpc-ledger-api.localhost:80`. Note that for some clients you may explicitly need to set the `:authority: json-ledger-api.localhost` pseudo-header for this to get resolved correctly. The Canton Admin API is not exposed by default as it does not yet support auth. There is a commented out section in `nginx.conf` that you can enable to expose it if you ensure that it is not exposed publicly, e.g., through network restrictions. diff --git a/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx b/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx index 4c07c3344..a08c77c1b 100644 --- a/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx +++ b/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx @@ -21,7 +21,7 @@ SVs, or more specifically SV participants and mediators, have unlimited traffic Sequencers keep track of the traffic used by validator participants. `Traffic` in this context refers to all messages from participants that have to be `sequenced`, i.e., messages that the group of sequencers has to order, persist (up to a pruning interval), and deliver to recipients (typically mediators and other participants). -Most prominently, traffic is used for Daml workflows as part of the [Canton transaction processing protocol](https://docs.daml.com/canton/architecture/overview.html#transaction-processing-in-canton). This includes: +Most prominently, traffic is used for Daml workflows as part of the [Canton transaction processing protocol](/overview/learn/how-transactions-work). This includes: - Confirmation requests; sent when a participant initiates the committing of a ledger transaction. - Confirmation responses; sent for participants that host stakeholders of a transaction. diff --git a/docs-main/global-synchronizer/deployment/validator-users.mdx b/docs-main/global-synchronizer/deployment/validator-users.mdx index 2dd5cc4a3..c08393478 100644 --- a/docs-main/global-synchronizer/deployment/validator-users.mdx +++ b/docs-main/global-synchronizer/deployment/validator-users.mdx @@ -13,7 +13,7 @@ Reviewers: Skip this section. Remove markers after final approval. # Users, Parties and Wallets in the Splice Wallet -Canton distinguishes between parties and users, as documented in detail in the [Canton docs](https://docs.daml.com/app-dev/parties-users.html). In essence, a party is an identity on-ledger, while a user represents an off-ledger entity that can be associated with one or more parties. +Canton distinguishes between parties and users, as documented in detail in the [Daml parties guide](/appdev/deep-dives/manage-daml-parties). In essence, a party is an identity on-ledger, while a user represents an off-ledger entity that can be associated with one or more parties. By default, when a user logs in for the first time in the wallet, and presses the "Onboard yourself" button, the Validator allocates a fresh party, with a fresh Party ID, and associates that user with the newly allocated party. As part of validator initialization, a party is automatically created for the Validator Operator. The user provided during installation as the `validatorWalletUser` will be associated with this party as its primary party. diff --git a/docs-main/global-synchronizer/faq.mdx b/docs-main/global-synchronizer/faq.mdx index ef018e7ac..2948b35db 100644 --- a/docs-main/global-synchronizer/faq.mdx +++ b/docs-main/global-synchronizer/faq.mdx @@ -664,7 +664,7 @@ If a course doesn't mention Canton Network or Daml 3.x, or covers only Daml 2.x **Recommended resources:** 1. **Official Documentation:** - - [Build Documentation](https://docs.digitalasset.com/build/3.4) + - [What is Canton?](/overview/understand/what-is-canton) - [Operator Documentation](https://docs.sync.global) 2. **Hands-on:** diff --git a/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx b/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx index f171ca697..9429aac02 100644 --- a/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx +++ b/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx @@ -147,7 +147,7 @@ how should this relate to the other observability docs that we have? we have the # Example Monitoring Setup -This section provides an example of how Canton can be run inside a connected network of Docker containers. The example also shows how you can monitor network activity. See the [monitoring glossary](https://docs.daml.com/canton/usermanual/monitoring_glossary.html) for an explanation of the terms and the [Monitoring Choices](#monitoring-choices) section for the reasoning behind the example monitoring setup. +This section provides an example of how Canton can be run inside a connected network of Docker containers. The example also shows how you can monitor network activity. See the [glossary](/overview/understand/glossary) for monitoring term definitions and the [Monitoring Choices](#monitoring-choices) section for the reasoning behind the example monitoring setup. ## Container Setup diff --git a/docs-main/global-synchronizer/production-operations/security-hardening.mdx b/docs-main/global-synchronizer/production-operations/security-hardening.mdx index ef2db6e13..9c6bf0b32 100644 --- a/docs-main/global-synchronizer/production-operations/security-hardening.mdx +++ b/docs-main/global-synchronizer/production-operations/security-hardening.mdx @@ -22,14 +22,14 @@ If you want to run third-party applications, deploy a separate validator node fo ## External KMS for managing participant keys {/* COPIED_START source="splice:docs/src/common/kms_participants_context.rst" */} -By default, Canton participants use cryptographic [keys](https://docs.daml.com/canton/usermanual/security.html) that are generated by the participant itself and stored in the regular database used by the participant. To improve key security, participants can be configured to use an external Key Management System (KMS) to generate and store keys. Consult the official [Canton documentation on KMS support](https://docs.daml.com/canton/usermanual/kms/kms.html) for additional details and a list of supported KMS providers. Participants deployed as part of a Splice deployment support the [External Key Storage](https://docs.daml.com/canton/usermanual/kms/external_key_storage/external_key_storage.html) mode of KMS usage. +By default, Canton participants use cryptographic [keys](/global-synchronizer/production-operations/key-management) that are generated by the participant itself and stored in the regular database used by the participant. To improve key security, participants can be configured to use an external Key Management System (KMS) to generate and store keys. See [KMS operations](/global-synchronizer/production-operations/kms-operations) for additional details and a list of supported KMS providers. Participants deployed as part of a Splice deployment support External Key Storage mode of KMS usage. {/* COPIED_END */} Official support for KMS-based operation of sequencers and mediators that are part of an SV deployment is planned for a future release. ### General configuration -Refer to the [Canton documentation on configuring KMS support](https://docs.daml.com/canton/usermanual/kms/kms_setup.html) for determining the right configuration options for your desired KMS provider. +Refer to the [KMS driver guide](/global-synchronizer/reference/kms-driver-guide) for determining the right configuration options for your desired KMS provider. When configuring KMS through Helm values: @@ -40,7 +40,7 @@ When configuring KMS through Helm values: ### Google Cloud KMS {/* COPIED_START source="splice:docs/src/common/kms_config_gcp.rst" */} -Refer to the [Canton documentation](https://docs.daml.com/canton/usermanual/kms/kms_gcp_setup.html) for a list of supported configuration options and their meaning, as well as for instructions on configuring authentication to the KMS. Splice participants support the External Key Storage mode, so the authentication credentials you supply must correspond to a GCP service account with the following IAM permissions: +Refer to the [KMS driver guide](/global-synchronizer/reference/kms-driver-guide) for a list of supported configuration options and their meaning, as well as for instructions on configuring authentication to the KMS. Splice participants support the External Key Storage mode, so the authentication credentials you supply must correspond to a GCP service account with the following IAM permissions: - `cloudkms.cryptoKeyVersions.create` - `cloudkms.cryptoKeyVersions.useToDecrypt` @@ -56,7 +56,7 @@ A mock GCP KMS configuration is available in [apps/app/src/pack/examples/sv-helm ### Amazon Web Services KMS {/* COPIED_START source="splice:docs/src/common/kms_config_aws.rst" */} -Refer to the [Canton documentation](https://docs.daml.com/canton/usermanual/kms/kms_aws_setup.html) for a list of supported configuration options and their meaning, as well as for instructions on configuring authentication to the KMS. Splice participants support the External Key Storage mode, so the authentication credentials you supply must correspond to an entity with the following IAM permissions: +Refer to the [KMS driver guide](/global-synchronizer/reference/kms-driver-guide) for a list of supported configuration options and their meaning, as well as for instructions on configuring authentication to the KMS. Splice participants support the External Key Storage mode, so the authentication credentials you supply must correspond to an entity with the following IAM permissions: - `kms:CreateKey` - `kms:TagResource` diff --git a/docs-main/global-synchronizer/reference/canton-configuration-guide.mdx b/docs-main/global-synchronizer/reference/canton-configuration-guide.mdx index a87f74919..073aa0e3c 100644 --- a/docs-main/global-synchronizer/reference/canton-configuration-guide.mdx +++ b/docs-main/global-synchronizer/reference/canton-configuration-guide.mdx @@ -72,12 +72,12 @@ canton { ## Configuration reference -The Canton configuration file for static properties is based on [PureConfig](https://pureconfig.github.io/). PureConfig maps Scala case classes and their class structure into analogue configuration options (see e.g. the [PureConfig quick start](https://pureconfig.github.io/docs/#quick-start) for an example). Therefore, the ultimate source of truth for all available configuration options and the configuration file syntax is given by the appropriate scaladocs of the [CantonConfig](https://docs.daml.com/__VERSION__/canton/scaladoc/com/digitalasset/canton/config/index.html) classes. +The Canton configuration file for static properties is based on [PureConfig](https://pureconfig.github.io/). PureConfig maps Scala case classes and their class structure into analogue configuration options (see e.g. the [PureConfig quick start](https://pureconfig.github.io/docs/#quick-start) for an example). Therefore, the ultimate source of truth for all available configuration options and the configuration file syntax is given by the [CantonConfig Scala reference](/reference/scala/com-digitalasset-canton-config/cantonconfig) and the related types in `com.digitalasset.canton.config`. When understanding the mapping from scaladocs to configuration, please keep in mind that: - CamelCase Scala names are mapped to lowercase-with-dashes names in configuration files, e.g. `synchronizerParameters` in the scaladocs becomes `synchronizer-parameters` in a configuration file (dash, not underscore). -- `Option[<scala-class>]` means that the configuration can be specified but doesn't need to be, e.g. you can specify a JWT token via `token=token` [in a remote participant configuration](https://docs.daml.com/__VERSION__/canton/scaladoc/com/digitalasset/canton/participant/config/RemoteParticipantConfig.html#token:Option%5BString%5D), but not specifying `token` is also valid. +- `Option[<scala-class>]` means that the configuration can be specified but doesn't need to be, e.g. you can specify a JWT token via `token=token` in [RemoteParticipantConfig](/reference/scala/com-digitalasset-canton-participant-config/remoteparticipantconfig), but not specifying `token` is also valid. ## Configuration Compatibility diff --git a/docs-main/global-synchronizer/reference/canton-console-commands.mdx b/docs-main/global-synchronizer/reference/canton-console-commands.mdx index 464acd071..89562c7fb 100644 --- a/docs-main/global-synchronizer/reference/canton-console-commands.mdx +++ b/docs-main/global-synchronizer/reference/canton-console-commands.mdx @@ -3031,7 +3031,7 @@ ledger_api.commands.submit_assign_async > ``` none > Provides access to the command submission service of the Ledger API. - > See https://docs.daml.com/app-dev/services.html for documentation of the parameters. + > See the [Ledger API reference](/sdks-tools/api-reference/ledger-api) for documentation of the parameters. > ```
@@ -3091,7 +3091,7 @@ ledger_api.commands.submit_async > ``` none > Provides access to the command submission service of the Ledger API. - > See https://docs.daml.com/app-dev/services.html for documentation of the parameters. + > See the [Ledger API reference](/sdks-tools/api-reference/ledger-api) for documentation of the parameters. > ```
@@ -3171,7 +3171,7 @@ ledger_api.commands.submit_unassign_async > ``` none > Provides access to the command submission service of the Ledger API. - > See https://docs.daml.com/app-dev/services.html for documentation of the parameters. + > See the [Ledger API reference](/sdks-tools/api-reference/ledger-api) for documentation of the parameters. > ``` @@ -3312,7 +3312,7 @@ ledger_api.javaapi.commands.submit_async > ``` none > Provides access to the command submission service of the Ledger API. - > See https://docs.daml.com/app-dev/services.html for documentation of the parameters. + > See the [Ledger API reference](/sdks-tools/api-reference/ledger-api) for documentation of the parameters. > ``` diff --git a/docs-main/global-synchronizer/reference/canton-console-reference.mdx b/docs-main/global-synchronizer/reference/canton-console-reference.mdx index fc1d75731..dfba47e69 100644 --- a/docs-main/global-synchronizer/reference/canton-console-reference.mdx +++ b/docs-main/global-synchronizer/reference/canton-console-reference.mdx @@ -9,7 +9,7 @@ import CantonGlobalSynchronizerReferenceCantonConsoleReferenceL331 from "/snippe The Canton console is a Scala-based REPL (built on [Ammonite](http://ammonite.io)) that provides administrative access to Canton nodes. All console commands are valid Scala expressions. String arguments are automatically converted to the appropriate Canton types (`SynchronizerAlias`, `Fingerprint`, `Identifier`) where needed. This page covers the commands most relevant to validator and SV operators. For the full -command reference, see the [Canton documentation](https://docs.digitalasset.com/operate/3.5/reference/console.html). +command reference, see the [Canton console commands](/global-synchronizer/reference/canton-console-commands). ## Starting the Console @@ -45,7 +45,7 @@ Welcome to Canton! ## Participant console -1. Obtain an authentication token as specified in [the Canton authentication docs](https://docs.digitalasset.com/operate/3.4/howtos/secure/apis/jwt.html) +1. Obtain an authentication token as specified in the [security configuration reference](/global-synchronizer/reference/security-configuration) 2. Ensure you can access the participant's ports 5001 and 5002 @@ -340,5 +340,5 @@ Both sequencer and mediator nodes also expose `health`, `keys`, and `topology` c ## Related Resources -- [Canton documentation - Console reference](https://docs.digitalasset.com/operate/3.5/reference/console.html) for the full command listing -- [Canton documentation - Administration APIs](https://docs.digitalasset.com/operate/3.5/reference/apis.html) for the underlying gRPC APIs +- [Canton console commands](/global-synchronizer/reference/canton-console-commands) for the full command listing +- [Canton admin API reference](/global-synchronizer/reference/canton-admin-api) for the underlying gRPC APIs diff --git a/docs-main/global-synchronizer/reference/configuration-reference.mdx b/docs-main/global-synchronizer/reference/configuration-reference.mdx index 1808d7a2b..36f8d7e12 100644 --- a/docs-main/global-synchronizer/reference/configuration-reference.mdx +++ b/docs-main/global-synchronizer/reference/configuration-reference.mdx @@ -199,7 +199,7 @@ Add this to `validator-values.yaml`: If your node is offline longer than the pruning retention window, it may become corrupted as apps race to catch up with pruned data. Set the retention to a value that reflects your uptime guarantee -- 30 days is a reasonable starting point, since sequencers are also pruned after 30 days.
-See the Canton documentation on [pruning operations](https://docs.digitalasset.com/operate/3.5/usermanual/pruning.html) for more details. +See [pruning operations](/global-synchronizer/production-operations/pruning) for more details. ## Monitoring and observability diff --git a/docs-main/global-synchronizer/reference/metrics-reference.mdx b/docs-main/global-synchronizer/reference/metrics-reference.mdx index 92e1f5ebb..085d75a49 100644 --- a/docs-main/global-synchronizer/reference/metrics-reference.mdx +++ b/docs-main/global-synchronizer/reference/metrics-reference.mdx @@ -80,7 +80,7 @@ This section will be expanded in a future update. Topology metrics track synchro ## Key participant metrics -These are the most operationally significant metrics from the participant node. For the full catalog of several hundred metrics, see the [Canton 3.x metrics reference](https://docs.digitalasset.com/canton/3.5/participant/reference/metrics). +These are the most operationally significant metrics from the participant node. For the full catalog of several hundred metrics, see the [Canton metrics reference](/global-synchronizer/reference/canton-metrics). ### Sequencer client diff --git a/docs-main/global-synchronizer/troubleshooting.mdx b/docs-main/global-synchronizer/troubleshooting.mdx index e06d7beb8..5f0829e39 100644 --- a/docs-main/global-synchronizer/troubleshooting.mdx +++ b/docs-main/global-synchronizer/troubleshooting.mdx @@ -493,7 +493,7 @@ queued tasks = 2000 # ← Queue is full! 3. Monitor pruning progress via Canton Console: -See [Monitor Pruning Progress](https://docs.daml.com/canton/usermanual/pruning.html#monitor-pruning-progress) in the documentation. +See the [pruning operations guide](/global-synchronizer/production-operations/pruning) for details.
diff --git a/docs-main/integrations/ecosystem.mdx b/docs-main/integrations/ecosystem.mdx index 8b4580349..b036e3445 100644 --- a/docs-main/integrations/ecosystem.mdx +++ b/docs-main/integrations/ecosystem.mdx @@ -145,7 +145,7 @@ For current network statistics, visit: ### As a Developer 1. Start with the [QuickStart](/appdev/quickstart) -2. Learn [Daml](https://docs.daml.com) +2. Learn [Daml](/appdev/get-started/choose-your-path) 3. Build and deploy your application 4. Join the developer community {/* TODO: Add Slack link once available */} diff --git a/docs-main/overview/reference/sv-governance-reference.mdx b/docs-main/overview/reference/sv-governance-reference.mdx index dca0f2749..5d627709b 100644 --- a/docs-main/overview/reference/sv-governance-reference.mdx +++ b/docs-main/overview/reference/sv-governance-reference.mdx @@ -26,7 +26,7 @@ The implementation uses three key techniques to achieves this Byzantine fault to Thus CC and CNS users that are willing to assume that no more than `f` SV nodes are dishonest can rely on the following guarantees: -- **valid transactions**: every transaction that requires confirmation from the DSO party is [valid](https://docs.daml.com/concepts/ledger-model/ledger-integrity.html#valid-ledgers). +- **valid transactions**: every transaction that requires confirmation from the DSO party is [valid](/overview/learn/ledger-model). - **timely automation**: actions required to be taken by the DSO party are taken in a timely fashion. - **predictable fees and configuration values**: fees and configuration values are reasonably predictable as they represent the *aggregate preferences* of ~2/3 of SV operators, which can be assumed to be acting in their own best interest. @@ -146,6 +146,6 @@ The decentralized party model means that the DSO party itself has a confirmation The combination of on-chain voting and decentralized automation provides three guarantees for network participants who trust that no more than `f` SVs are dishonest: -- **Valid transactions** -- Every DSO-confirmed transaction satisfies Daml's [ledger validity model](https://docs.daml.com/concepts/ledger-model/ledger-integrity.html#valid-ledgers) +- **Valid transactions** -- Every DSO-confirmed transaction satisfies Daml's [ledger validity model](/overview/learn/ledger-model) - **Timely automation** -- Routine operational actions (round advancement, reward issuance) execute without delay - **Predictable parameters** -- Fees and configuration values reflect the aggregate preferences of at least two-thirds of SV operators diff --git a/docs-main/sdks-tools/api-reference/admin-api.mdx b/docs-main/sdks-tools/api-reference/admin-api.mdx index 3e04d7848..e15c0f0c9 100644 --- a/docs-main/sdks-tools/api-reference/admin-api.mdx +++ b/docs-main/sdks-tools/api-reference/admin-api.mdx @@ -18,7 +18,7 @@ The Admin API is split across two layers: the **Ledger API admin services** (def ### Ledger API Admin Services -These services run on participant nodes alongside the Ledger API. They are defined in the [gRPC Ledger API proto specifications](https://docs.digitalasset.com/build/3.5/reference/lapi-proto-docs.html). +These services run on participant nodes alongside the Ledger API. They are defined in the [gRPC Ledger API proto specifications](/reference/protobuf). ### Canton-Specific Admin Services diff --git a/docs-main/sdks-tools/cli-tools/canton-console.mdx b/docs-main/sdks-tools/cli-tools/canton-console.mdx index bf539e279..7e5d69415 100644 --- a/docs-main/sdks-tools/cli-tools/canton-console.mdx +++ b/docs-main/sdks-tools/cli-tools/canton-console.mdx @@ -50,7 +50,7 @@ Welcome to Canton! ## Participant console -1. Obtain an authentication token as specified in [the Canton authentication docs](https://docs.digitalasset.com/operate/3.4/howtos/secure/apis/jwt.html) +1. Obtain an authentication token as specified in the [security configuration reference](/global-synchronizer/reference/security-configuration) 2. Ensure you can access the participant's ports 5001 and 5002 diff --git a/docs-main/sdks-tools/cli-tools/dpm.mdx b/docs-main/sdks-tools/cli-tools/dpm.mdx index c185bfd75..39e076d60 100644 --- a/docs-main/sdks-tools/cli-tools/dpm.mdx +++ b/docs-main/sdks-tools/cli-tools/dpm.mdx @@ -25,7 +25,7 @@ import DamlDocsSdksToolsCliToolsDpmL152 from "/snippets/daml-docs/sdks-tools_cli ## Installation -Install `dpm` following the instructions in the [Canton 3.x documentation](https://docs.digitalasset.com). On most systems, this involves downloading a single binary and adding it to your `PATH`. +Install `dpm` by downloading the binary from the [Daml SDK GitHub releases](https://github.com/digital-asset/daml/releases) and adding it to your `PATH`. Once installed, verify the installation: diff --git a/docs-main/sdks-tools/language-bindings/java.mdx b/docs-main/sdks-tools/language-bindings/java.mdx index b37a35d70..ce8584e32 100644 --- a/docs-main/sdks-tools/language-bindings/java.mdx +++ b/docs-main/sdks-tools/language-bindings/java.mdx @@ -120,11 +120,11 @@ For Maven, add the generated directory as a source folder in your `pom.xml`: ``` -You will also need the Daml Java bindings library as a dependency. Check the [Canton 3.x documentation](https://docs.digitalasset.com) for the current Maven coordinates and version. +You will also need the Daml Java bindings library as a dependency. Refer to the [Daml SDK GitHub releases](https://github.com/digital-asset/daml/releases) for the current Maven coordinates and version. ## Reference Documentation -The full Javadoc for the Daml Java bindings library is available in the [Canton 3.x SDK documentation](https://docs.digitalasset.com). The generated classes follow the same patterns documented there. +The full Javadoc for the Daml Java bindings library is published with each [Daml SDK release](https://github.com/digital-asset/daml/releases). The generated classes follow the same patterns documented there. ## Related Pages diff --git a/docs-main/sdks-tools/language-bindings/scala.mdx b/docs-main/sdks-tools/language-bindings/scala.mdx index 980006332..21d6edb68 100644 --- a/docs-main/sdks-tools/language-bindings/scala.mdx +++ b/docs-main/sdks-tools/language-bindings/scala.mdx @@ -55,11 +55,11 @@ If you are writing a standalone Scala project that uses Canton's APIs (outside o -Check the [Canton 3.x documentation](https://docs.digitalasset.com) for the current artifact coordinates and version numbers. +Refer to the [Daml SDK GitHub releases](https://github.com/digital-asset/daml/releases) for the current artifact coordinates and version numbers. ## Reference Documentation -The Scaladoc for Canton's Scala APIs is published with each Canton release. It covers the Console command set, node reference types, and the topology management API. Refer to the [Canton 3.x SDK documentation](https://docs.digitalasset.com) for the latest Scaladoc. +The Scaladoc for Canton's Scala APIs is published with each Canton release. It covers the Console command set, node reference types, and the topology management API. See the [Scala API reference](/reference/scala/com-digitalasset-canton) for the catalog of types and the link to the upstream Scaladoc. ## Related Pages diff --git a/docs-main/sdks-tools/sdks/daml-sdk.mdx b/docs-main/sdks-tools/sdks/daml-sdk.mdx index 51ce22b51..1c6f4a59f 100644 --- a/docs-main/sdks-tools/sdks/daml-sdk.mdx +++ b/docs-main/sdks-tools/sdks/daml-sdk.mdx @@ -99,4 +99,4 @@ When deploying to DevNet, TestNet, or MainNet, verify that your SDK version matc - [dpm command reference](/sdks-tools/cli-tools/dpm) -- Full list of `dpm` commands and options - [Daml Studio](/sdks-tools/development-tools/daml-studio) -- VS Code extension for Daml development - [Sandbox](/sdks-tools/development-tools/sandbox) -- Local single-node testing environment -- [Canton 3.x documentation](https://docs.digitalasset.com) -- Full platform documentation +- [Choose your path](/appdev/get-started/choose-your-path) -- Pick a development workflow diff --git a/docs-main/sdks-tools/sdks/wallet-sdk.mdx b/docs-main/sdks-tools/sdks/wallet-sdk.mdx index 28a576808..385019326 100644 --- a/docs-main/sdks-tools/sdks/wallet-sdk.mdx +++ b/docs-main/sdks-tools/sdks/wallet-sdk.mdx @@ -85,7 +85,7 @@ Before performing any operations, you need to create a key pair and allocate an -The SDK generates Ed25519 key pairs by default. BIP-0039 mnemonic-based key generation is also supported for deterministic key recovery. For details on the key pair and party creation process, including topology transactions and multi-hosting, see the full [Wallet Integration Guide](https://docs.digitalasset.com/integrate/devnet/index.html). +The SDK generates Ed25519 key pairs by default. BIP-0039 mnemonic-based key generation is also supported for deterministic key recovery. For details on the key pair and party creation process, including topology transactions and multi-hosting, see the [wallet integration guidance](/integrations/wallet/guidance). ## Token operations @@ -185,5 +185,5 @@ The SDK can decode prepared transactions into human-readable JSON for display to - [Splice Wallet Kernel GitHub repository](https://github.com/hyperledger-labs/splice-wallet-kernel) -- source code, API specs, and example scripts - [Token standard documentation](https://docs.sync.global/app_dev/token_standard/index.html) -- full token standard reference - [CIP-0056](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0056/cip-0056.md) -- Canton Network token standard specification -- [External party signing tutorial](https://docs.digitalasset.com/build/3.5/tutorials/app-dev/external_signing_onboarding) -- step-by-step external signing walkthrough +- [External signing onboarding](/appdev/deep-dives/external-signing-onboarding) -- step-by-step external signing walkthrough - [CN Quickstart](https://github.com/digital-asset/cn-quickstart) -- reference application for getting started with Canton Network development diff --git a/docs-main/shared/version-compatibility-dashboard.mdx b/docs-main/shared/version-compatibility-dashboard.mdx index ad33c91ff..f00dc6afa 100644 --- a/docs-main/shared/version-compatibility-dashboard.mdx +++ b/docs-main/shared/version-compatibility-dashboard.mdx @@ -9,7 +9,7 @@ import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; export const componentDescriptions = { splice: { text: 'Canton Network infrastructure including validator apps, wallet, and governance tools.', link: 'https://github.com/hyperledger-labs/splice/releases', linkText: 'View releases' }, damlSdk: { text: 'Smart contract language SDK for building and compiling Daml applications.', link: 'https://github.com/digital-asset/daml/releases', linkText: 'View releases' }, - pqs: { text: 'Participant Query Store - SQL database for querying ledger data.', link: 'https://docs.digitalasset.com/', linkText: 'Documentation' }, + pqs: { text: 'Participant Query Store - SQL database for querying ledger data.', link: '/sdks-tools/development-tools/pqs', linkText: 'Documentation' }, tokenStandard: { text: 'Canton Network Token Standard API for fungible token operations (CIP-0056).' }, walletSdk: { text: 'TypeScript SDK for wallet providers and exchanges.', link: 'https://www.npmjs.com/package/@canton-network/wallet-sdk', linkText: 'npm package' }, dappSdk: { text: 'Browser-optimized SDK for building decentralized applications.', link: 'https://www.npmjs.com/package/@canton-network/dapp-sdk', linkText: 'npm package' }, From ba742dca1203ad8df91660db4979576ec8fb72e1 Mon Sep 17 00:00:00 2001 From: 8bitpal Date: Mon, 4 May 2026 11:42:13 +0200 Subject: [PATCH 4/6] Restore destroyed `` text and cross-link refs from F-096 regex sweep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The site-wide RST `:ref:` cleanup in 3cb1ebfc was over-aggressive. It matched two unintended classes of content and damaged them: A. Code-block placeholders inside fenced code (` ``` `) blocks — examples: - `` in NOT_FOUND/PACKAGE_NOT_FOUND error message samples - `` in CONTRACT_NOT_FOUND samples - `` in POSTGRES_PASSWORD environment-variable example - `` in `kubectl get pods -n` snippet - `` in Authorization: Bearer example - `` in `docker inspect` snippet - `` in CLI invocations - `` in daml.yaml example - `` in KMS driver config - `` in -m migration ID flag - `` in kubectl describe pod Each was inside a ` ``` ` fence in source. The regex still rewrote the whole region into double-backtick inline code with embedded italics, collapsing fenced code blocks and erasing the placeholder text. Reverted to the original fenced blocks with placeholders intact. B. RST `:ref:` cross-links in prose — example: `Daml Ledger Model ` -> *Daml Ledger Model* The italic conversion preserved the visible text but lost the link target slug. Restored as proper MDX links where the local target is known: - da-ledgers -> /overview/learn/ledger-model - validator_operator -> /global-synchronizer/deployment/deployment-options - error-categories-inventory -> #error-categories-inventory (same page) - traffic_topup -> /global-synchronizer/deployment/synchronizer-traffic - app_dev_token_standard_overview -> /overview/reference/cip-0056 - validator-api-user-wallet-transfer-offers -> #splice-wallet-transfer-offers-deprecated (same page) - type-splice-api-featuredapprightv1-... -> code-span (no local target) 19 files updated; 0 remaining `*\`\`` or `\`\`*` placeholder-corruption patterns in docs-main/. --- .../deep-dives/external-signing-transactions.mdx | 10 ++++++---- .../appdev/modules/m3-functional-programming.mdx | 2 +- .../modules/m4-featured-app-activity-marker.mdx | 2 +- .../troubleshooting-guide/development-issues.mdx | 4 +++- .../troubleshooting-guide/error-code-reference.mdx | 4 +++- .../troubleshooting-guide/ledger-api-errors.mdx | 4 +++- .../troubleshooting-guide/operational-issues.mdx | 4 +++- .../global-synchronizer/deployment/configuration.mdx | 5 +++-- .../deployment/onboarding-process.mdx | 4 ++-- .../production-operations/kms-operations.mdx | 6 ++++-- .../production-operations/logging.mdx | 2 +- .../production-operations/upgrade-procedures.mdx | 2 +- .../global-synchronizer/reference/error-codes.mdx | 2 +- .../troubleshooting-guide/transaction-failures.mdx | 8 ++++++-- .../troubleshooting-methodology.mdx | 4 ++-- docs-main/sdks-tools/api-reference/splice-apis.mdx | 12 +++++++----- .../sdks-tools/development-tools/daml-studio.mdx | 5 +++-- ...ynchronizer_deployment_initial-validation_L12.mdx | 5 +++-- ...troubleshooting-guide_installation-issues_L28.mdx | 5 +++-- 19 files changed, 56 insertions(+), 34 deletions(-) diff --git a/docs-main/appdev/deep-dives/external-signing-transactions.mdx b/docs-main/appdev/deep-dives/external-signing-transactions.mdx index ab2e017b5..a5774eed9 100644 --- a/docs-main/appdev/deep-dives/external-signing-transactions.mdx +++ b/docs-main/appdev/deep-dives/external-signing-transactions.mdx @@ -1820,15 +1820,17 @@ The scripts mentioned in this tutorial can be used as tools for testing and deve ### Decode base64 encoded prepared transaction to JSON -``*bash +```bash ./setup.sh -python daml_transaction_util.py --decode --base64*`` +python daml_transaction_util.py --decode --base64 +``` ### Compute hash of base64 encoded prepared transaction -``*bash +```bash ./setup.sh -python daml_transaction_util.py --hash --base64*`` +python daml_transaction_util.py --hash --base64 +``` {/* COPIED_END */} diff --git a/docs-main/appdev/modules/m3-functional-programming.mdx b/docs-main/appdev/modules/m3-functional-programming.mdx index e2f041e94..0a62d2852 100644 --- a/docs-main/appdev/modules/m3-functional-programming.mdx +++ b/docs-main/appdev/modules/m3-functional-programming.mdx @@ -26,7 +26,7 @@ There is a project template `daml-intro-functional-101` for this chapter, but it ## The Haskell connection -The previous chapters of this introduction to Daml have mostly covered the structure of templates, and their connection to the *Daml Ledger Model*. The logic of what happens within the `do` blocks of choices has been kept relatively simple. In this chapter, we will dive deeper into Daml's expression language, the part that allows you to write logic inside those `do` blocks. But we can only scratch the surface here. Daml borrows a lot of its language from [Haskell](https://www.haskell.org). If you want to dive deeper, or learn about specific aspects of the language you can refer to standard literature on Haskell. Some recommendations: +The previous chapters of this introduction to Daml have mostly covered the structure of templates, and their connection to the [Daml Ledger Model](/overview/learn/ledger-model). The logic of what happens within the `do` blocks of choices has been kept relatively simple. In this chapter, we will dive deeper into Daml's expression language, the part that allows you to write logic inside those `do` blocks. But we can only scratch the surface here. Daml borrows a lot of its language from [Haskell](https://www.haskell.org). If you want to dive deeper, or learn about specific aspects of the language you can refer to standard literature on Haskell. Some recommendations: - [Finding Success and Failure in Haskell (Julie Moronuki, Chris Martin)](https://joyofhaskell.com/) - [Haskell Programming from first principles (Christopher Allen, Julie Moronuki)](http://haskellbook.com/) diff --git a/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx b/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx index 4c5993740..2083d4ea8 100644 --- a/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx +++ b/docs-main/appdev/modules/m4-featured-app-activity-marker.mdx @@ -65,7 +65,7 @@ This section was copied from existing reviewed documentation. Reviewers: Skip this section. Remove markers after final approval. -It is possible to share the attribution of activity for the `FeaturedAppActivityMarker`. The `FeaturedAppRight_CreateActivityMarker` choice accepts a list of *AppRewardBeneficiary* contracts. Then a `FeaturedAppActivityMarker` is created for each `beneficiary` with the `weight` field set appropriately. +It is possible to share the attribution of activity for the `FeaturedAppActivityMarker`. The `FeaturedAppRight_CreateActivityMarker` choice accepts a list of `AppRewardBeneficiary` contracts. Then a `FeaturedAppActivityMarker` is created for each `beneficiary` with the `weight` field set appropriately. {/* COPIED_END */} diff --git a/docs-main/appdev/troubleshooting-guide/development-issues.mdx b/docs-main/appdev/troubleshooting-guide/development-issues.mdx index 7161e40f3..f95ee3886 100644 --- a/docs-main/appdev/troubleshooting-guide/development-issues.mdx +++ b/docs-main/appdev/troubleshooting-guide/development-issues.mdx @@ -141,7 +141,9 @@ This is expected behavior in Canton's UTXO-based model. The fix depends on your If your transaction references a package the validator does not know about, you get: -``*NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package*`` +``` +NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package +``` Upload your DAR file before submitting transactions: diff --git a/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx b/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx index b24260135..bc792e311 100644 --- a/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx +++ b/docs-main/appdev/troubleshooting-guide/error-code-reference.mdx @@ -73,7 +73,9 @@ INVALID_ARGUMENT: ### NOT_FOUND -``*NOT_FOUND: CONTRACT_NOT_FOUND - Contract could not be found with id*`` +``` +NOT_FOUND: CONTRACT_NOT_FOUND - Contract could not be found with id +``` **Cause:** The contract has already been archived (consumed by a previous exercise), or it was never visible to the submitting party. diff --git a/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx b/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx index ab0b7bbfc..c2a22d43d 100644 --- a/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx +++ b/docs-main/appdev/troubleshooting-guide/ledger-api-errors.mdx @@ -21,7 +21,9 @@ UNAUTHENTICATED: Could not verify JWT token ### PACKAGE_NOT_FOUND -``*NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package*`` +``` +NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package +``` **Cause:** The validator does not have the DAR that contains the referenced package, or the package has not been vetted. diff --git a/docs-main/appdev/troubleshooting-guide/operational-issues.mdx b/docs-main/appdev/troubleshooting-guide/operational-issues.mdx index 45cc971bb..24935cc84 100644 --- a/docs-main/appdev/troubleshooting-guide/operational-issues.mdx +++ b/docs-main/appdev/troubleshooting-guide/operational-issues.mdx @@ -51,7 +51,9 @@ On MainNet, CC has real value. Monitor your balance and set up auto-top-up befor When you deploy a new version of your Daml package alongside an existing version, both must be uploaded and vetted on all validators that process your contracts. If validator A has package v2 but validator B only has v1, transactions involving both parties will fail. -``*NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package*`` +``` +NOT_FOUND: PACKAGE_NOT_FOUND - Could not find package +``` **Fix:** Synchronize package deployment across all validators before submitting transactions that reference the new package. Use the SCU (Smart Contract Upgrade) mechanism so that existing contracts on v1 can be exercised with v2 code. diff --git a/docs-main/global-synchronizer/deployment/configuration.mdx b/docs-main/global-synchronizer/deployment/configuration.mdx index e868a655e..222823c34 100644 --- a/docs-main/global-synchronizer/deployment/configuration.mdx +++ b/docs-main/global-synchronizer/deployment/configuration.mdx @@ -87,12 +87,13 @@ You do not need to manually configure sequencer URLs — the onboarding process **Docker Compose** uses environment variables: -``*bash +```bash # In .env or compose.yaml environment section POSTGRES_HOST=localhost POSTGRES_PORT=5432 POSTGRES_USER=canton -POSTGRES_PASSWORD=*`` +POSTGRES_PASSWORD= +``` **Kubernetes** uses Helm values: diff --git a/docs-main/global-synchronizer/deployment/onboarding-process.mdx b/docs-main/global-synchronizer/deployment/onboarding-process.mdx index a11bc3db4..310131f1a 100644 --- a/docs-main/global-synchronizer/deployment/onboarding-process.mdx +++ b/docs-main/global-synchronizer/deployment/onboarding-process.mdx @@ -72,9 +72,9 @@ At this point this can also be accomplished by connecting your validator through 3. If you want to access the Canton Coin Scan Web UI from your laptop, you also need to ensure that you can connect to a VPN operated by one of the SVs. This is required as laptops usually do not have static IP addresses and the Scan web UI is not (yet) fully public. If you can use your validator egress IP also for browsing the web UI this is not necessary. -4. Request an onboarding secret from your SV sponsor. On DevNet, you can do this yourself through an API call (refer to *Deployment instructions* for details). On TestNet and MainNet your SV sponsor needs to provide you with this manually. Note that onboarding secrets are only valid for 48 hours and are one-time use, and self-generated DevNet secrets are only valid for 1 hour. If it expired, you need to request a new one. +4. Request an onboarding secret from your SV sponsor. On DevNet, you can do this yourself through an API call (refer to [Deployment Options](/global-synchronizer/deployment/deployment-options) for details). On TestNet and MainNet your SV sponsor needs to provide you with this manually. Note that onboarding secrets are only valid for 48 hours and are one-time use, and self-generated DevNet secrets are only valid for 1 hour. If it expired, you need to request a new one. -5. Deploy your node either using docker compose or Kubernetes. Refer to the *Deployment Options* for information on how to choose between them and references to each of the two approaches. You will need to make sure that all IP traffic going from your validator to the SVs uses the egress IP you provided to your SV sponsor and you need to provide the onboarding secret. +5. Deploy your node either using docker compose or Kubernetes. Refer to the [Deployment Options](/global-synchronizer/deployment/deployment-options) for information on how to choose between them and references to each of the two approaches. You will need to make sure that all IP traffic going from your validator to the SVs uses the egress IP you provided to your SV sponsor and you need to provide the onboarding secret. {/* COPIED_END */} diff --git a/docs-main/global-synchronizer/production-operations/kms-operations.mdx b/docs-main/global-synchronizer/production-operations/kms-operations.mdx index 6e28fd8bb..c0dd1bb94 100644 --- a/docs-main/global-synchronizer/production-operations/kms-operations.mdx +++ b/docs-main/global-synchronizer/production-operations/kms-operations.mdx @@ -534,8 +534,10 @@ Canton allows integration with a variety of KMS and HSM solutions through a KMS Configuring Canton to run with a KMS Driver is done similarly to other KMS providers by specifying: -``*type = driver -name =*`` +``` +type = driver +name = +``` For example, for a Participant named \`participant1\`: diff --git a/docs-main/global-synchronizer/production-operations/logging.mdx b/docs-main/global-synchronizer/production-operations/logging.mdx index ffd3026c3..c1ae7aa99 100644 --- a/docs-main/global-synchronizer/production-operations/logging.mdx +++ b/docs-main/global-synchronizer/production-operations/logging.mdx @@ -31,7 +31,7 @@ We recommend to use `lnav` to read the logs. A guideline is provided in [this do Logging in kubernetes (note that this only provides logs for a limited timeframe): -- *kubectl describe pod* to get a detailed status of the given pod, +- `kubectl describe pod ` to get a detailed status of the given pod, - `kubectl logs -n ` or `kubectl logs -l app= -n --tail=-1` to get logs for a given pod in a given namespace. diff --git a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx index 6977318e6..049478e38 100644 --- a/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx +++ b/docs-main/global-synchronizer/production-operations/upgrade-procedures.mdx @@ -133,7 +133,7 @@ Reviewers: Skip this section. Remove markers after final approval. 1. Confirm migration dump: `docker compose logs validator | grep "Wrote domain migration dump"` 2. Stop the validator: `./stop.sh` 3. Update the bundle and `IMAGE_TAG` if needed -4. Restart with incremented migration ID (*-m*) and the `-M` flag to trigger migration +4. Restart with incremented migration ID (`-m `) and the `-M` flag to trigger migration 5. After successful migration: restart without `-M`, keep the new migration ID {/* COPIED_END */} diff --git a/docs-main/global-synchronizer/reference/error-codes.mdx b/docs-main/global-synchronizer/reference/error-codes.mdx index f3c6d807a..977cc4d56 100644 --- a/docs-main/global-synchronizer/reference/error-codes.mdx +++ b/docs-main/global-synchronizer/reference/error-codes.mdx @@ -33,7 +33,7 @@ Error Categories The error categories allow you to group errors such that application logic can be built to automatically deal with errors and decide whether to retry a request or escalate to the operator. -A full list of error categories is documented *here*. +A full list of error categories is documented [here](#error-categories-inventory). ## Machine Readable Information diff --git a/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx b/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx index c8ddf0bf8..2cfce3774 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/transaction-failures.mdx @@ -41,7 +41,9 @@ Package vetting ensures that all validators involved in a transaction agree on t ### DAR Not Uploaded -``*PACKAGE_NOT_FOUND: Could not find package*`` +``` +PACKAGE_NOT_FOUND: Could not find package +``` Upload the required DAR to your validator: @@ -60,7 +62,9 @@ Or via Canton Console: Even if your validator has the package, the other party's validator must also have it uploaded and vetted. If you see: -``*PACKAGE_SELECTION_FAILED: No package found for module*`` +``` +PACKAGE_SELECTION_FAILED: No package found for module +``` Contact the counterparty's validator operator to upload and vet the same DAR. Both sides of a transaction must have the package available. diff --git a/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx b/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx index 863ee08f5..978a8c474 100644 --- a/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx +++ b/docs-main/global-synchronizer/troubleshooting-guide/troubleshooting-methodology.mdx @@ -161,7 +161,7 @@ Reviewers: Skip this section. Remove markers after final approval. ### Traffic balance below reserved amount -A log of the form shown below indicates that your validator app has not been able to *purchase any traffic*. The validator blocks transactions not required to purchase more traffic once the purchased traffic balance falls below a given number to avoid issues where the validator locks itself out by not having enough traffic to complete a traffic purchase. Check the logs for `TopupMemberTrafficTrigger` to find possible causes. +A log of the form shown below indicates that your validator app has not been able to [purchase any traffic](/global-synchronizer/deployment/synchronizer-traffic). The validator blocks transactions not required to purchase more traffic once the purchased traffic balance falls below a given number to avoid issues where the validator locks itself out by not having enough traffic to complete a traffic purchase. Check the logs for `TopupMemberTrafficTrigger` to find possible causes. If you only want to rely on free traffic and do not want to purchase any extra traffic, remove the validator top-up config. @@ -171,7 +171,7 @@ ABORTED: Traffic balance below reserved traffic amount (0 < 200000) ### Insufficient funds to buy configured traffic amount -A log of the form shown below indicates that your validator app attempted to *purchase traffic* but does not have enough in the wallet of the validator operator party. This is common on TestNet and MainNet for new nodes as they start out with a balance of 0 and only slowly accrue CC through validator liveness rewards. So often this just requires waiting until enough CC has accrued. Alternatively, an existing node with a CC balance can transfer CC to you to increase your balance. +A log of the form shown below indicates that your validator app attempted to [purchase traffic](/global-synchronizer/deployment/synchronizer-traffic) but does not have enough in the wallet of the validator operator party. This is common on TestNet and MainNet for new nodes as they start out with a balance of 0 and only slowly accrue CC through validator liveness rewards. So often this just requires waiting until enough CC has accrued. Alternatively, an existing node with a CC balance can transfer CC to you to increase your balance. If you only want to rely on free traffic and do not want to purchase any extra traffic, remove the validator top-up config. diff --git a/docs-main/sdks-tools/api-reference/splice-apis.mdx b/docs-main/sdks-tools/api-reference/splice-apis.mdx index 4f162433c..38739a454 100644 --- a/docs-main/sdks-tools/api-reference/splice-apis.mdx +++ b/docs-main/sdks-tools/api-reference/splice-apis.mdx @@ -126,7 +126,7 @@ These endpoints are intended for users to programmatically interact with their w ### Splice Wallet Transfer Offers (deprecated) -**Deprecated** (since `splice-0.4.11`): Use the *Canton Network Token Standard APIs* instead. +**Deprecated** (since `splice-0.4.11`): Use the [Canton Network Token Standard APIs](/overview/reference/cip-0056) instead. Splice Wallet transfer offers are a legacy two-step workflow to transfer Canton Coin between users. They work as follows: @@ -134,7 +134,7 @@ Splice Wallet transfer offers are a legacy two-step workflow to transfer Canton > - The sender creates a `Splice.Wallet.TransferOffer` daml contract. > - The receiver accepts the offer, which immediately transfers the agreed coin. -This specific transfer offer workflow is deprecated in favor of the two-step workflow supported by Canton Coin implementation of the *Canton Network Token Standard*. +This specific transfer offer workflow is deprecated in favor of the two-step workflow supported by Canton Coin implementation of the [Canton Network Token Standard](/overview/reference/cip-0056). Use the endpoints below to create and manage Splice Wallet transfer offers. Use the Ledger API directly to create and manage Canton Network Token Standard transfer offers. @@ -163,7 +163,7 @@ Any user can buy traffic for any validator. Buying traffic is a multi-step proce These endpoints are used internally by the frontend of the Splice Wallet to interact with a user Canton Coin holdings. -These endpoints are not intended to be used by other applications. If you want to build a wallet of your own, we recommend to build on the *Canton Network Token Standard APIs* instead. +These endpoints are not intended to be used by other applications. If you want to build a wallet of your own, we recommend to build on the [Canton Network Token Standard APIs](/overview/reference/cip-0056) instead. **Authorization:** Authentication with a JWT token as described in `app-auth`, where the subject claim of the token is the user whose wallet the endpoint operates on. @@ -214,7 +214,7 @@ External signing is a Canton feature allows setting up a party such that transac For the common case of wanting to set up an external party in a topology where the executing, preparing and confirming participant are the same node and that party should hold and transfer Canton Coin, the validator provides high-level APIs. > 1. Use `/v0/admin/external-party/topology/*` to set up an external party -> 2. Use `/v0/admin/external-party/setup-proposal` to start setting up a `Splice.Wallet.TransferPreapproval` daml contract for the external party, which allows the party to send and receive Canton Coin without having to approve individual *transfer offers*. +> 2. Use `/v0/admin/external-party/setup-proposal` to start setting up a `Splice.Wallet.TransferPreapproval` daml contract for the external party, which allows the party to send and receive Canton Coin without having to approve individual [transfer offers](#splice-wallet-transfer-offers-deprecated). > 3. Use `/v0/admin/external-party/setup-proposal/*` to finish setting up the transfer preapproval. > 4. Use `/v0/admin/external-party/transfer-preapproval/*` to send Canton Coin to other parties. > 5. Use `/v0/admin/external-party/balance` to check the balance of the external party. @@ -360,7 +360,9 @@ The base path is `/api/sv`. Key endpoint groups include: Pass the token as an [OAuth2 Bearer token](https://datatracker.ietf.org/doc/html/rfc6750#section-2.1): -``*Authorization: Bearer*`` +``` +Authorization: Bearer +``` The Scan API is the exception -- it is publicly accessible and does not require authentication. diff --git a/docs-main/sdks-tools/development-tools/daml-studio.mdx b/docs-main/sdks-tools/development-tools/daml-studio.mdx index 3ad24eb1d..a320807dd 100644 --- a/docs-main/sdks-tools/development-tools/daml-studio.mdx +++ b/docs-main/sdks-tools/development-tools/daml-studio.mdx @@ -194,8 +194,9 @@ If Daml Studio is unable to start a package environment for a package, for examp As discussed above, each package runs its own environment, these environments are managed by the root environment, which if not specified, will be the most recent SDK on your system. You can override this version by providing a `daml.yaml` file at the root of your project (i.e. next to the `multi-package.yaml`) containing only the following: -``*yaml -sdk-version:*`` +```yaml +sdk-version: +``` ### Jump to definition for dependencies diff --git a/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx b/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx index 0575a1fb0..d15c3be02 100644 --- a/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx +++ b/docs-main/snippets/canton-docs/global-synchronizer_deployment_initial-validation_L12.mdx @@ -1,2 +1,3 @@ -``*bash -kubectl get pods -n*`` +```bash +kubectl get pods -n +``` diff --git a/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx b/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx index 20c36fc60..60879dac3 100644 --- a/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx +++ b/docs-main/snippets/canton-docs/global-synchronizer_troubleshooting-guide_installation-issues_L28.mdx @@ -1,3 +1,4 @@ -``*bash +```bash # Check if any container was OOM-killed -docker inspect --format='{{.State.OOMKilled}}'*`` +docker inspect --format='{{.State.OOMKilled}}' +``` From 18fdfcdc346dfc09475557010293dd04f18dddb6 Mon Sep 17 00:00:00 2001 From: 8bitpal Date: Mon, 4 May 2026 21:56:37 +0200 Subject: [PATCH 5/6] Migrate three remaining legacy upstream-doc links to internal pages Three pointers that had local equivalents on docs.canton.network were still linking to upstream Splice docs. Replaced with internal targets: - sdks-tools/sdks/wallet-sdk.mdx (Further resources): "Token standard documentation" docs.sync.global/app_dev/token_standard -> /overview/ reference/cip-0056 (the local Canton Network Token Standard reference page) - global-synchronizer/deployment/configuration.mdx: "Splice documentation on external KMS" docs.sync.global -> /global-synchronizer/production-operations/ kms-operations (the local KMS Operations page covers AWS, GCP, and driver integrations) - appdev/quickstart/running-the-demo.mdx: "Splice Local Network docs" docs.dev.sync.global/app_dev/testing/localnet.html#application-uis -> /sdks-tools/development-tools/localnet#application-uis (the local LocalNet page has the matching ## Application UIs section with the same wallet/sv/ scan endpoints) Other docs.sync.global / docs.dev.sync.global / docs.digitalasset.com references in the remaining 13 files are deliberate pointers to authoritative upstream Splice/Canton operator documentation (release notes, validator onboarding, OIDC configs, exchange-integration guide) that have no equivalent local content. Those are left intact. --- docs-main/appdev/quickstart/running-the-demo.mdx | 2 +- docs-main/global-synchronizer/deployment/configuration.mdx | 2 +- docs-main/sdks-tools/sdks/wallet-sdk.mdx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs-main/appdev/quickstart/running-the-demo.mdx b/docs-main/appdev/quickstart/running-the-demo.mdx index 17ca7c685..4bf8359f0 100644 --- a/docs-main/appdev/quickstart/running-the-demo.mdx +++ b/docs-main/appdev/quickstart/running-the-demo.mdx @@ -194,7 +194,7 @@ To make payment, navigate to the Canton Wallet at [http://wallet.localhost:2000/ You can find the wallet's location by: -1. Reading the [Splice Local Network docs](https://docs.dev.sync.global/app_dev/testing/localnet.html#application-uis). +1. Reading the [LocalNet Application UIs reference](/sdks-tools/development-tools/localnet#application-uis). 2. Navigating to the App Provider's "Tenants" menu. ![AppProvider Tenants menu](/images/docs_website/app-provider-tenants.png) diff --git a/docs-main/global-synchronizer/deployment/configuration.mdx b/docs-main/global-synchronizer/deployment/configuration.mdx index 222823c34..75586f23d 100644 --- a/docs-main/global-synchronizer/deployment/configuration.mdx +++ b/docs-main/global-synchronizer/deployment/configuration.mdx @@ -176,7 +176,7 @@ KMS configuration is set in the participant's HOCON config: -See the [Splice documentation on external KMS](https://docs.sync.global) for provider-specific configuration details. +See [KMS Operations](/global-synchronizer/production-operations/kms-operations) for provider-specific configuration details. ## HTTP proxy configuration diff --git a/docs-main/sdks-tools/sdks/wallet-sdk.mdx b/docs-main/sdks-tools/sdks/wallet-sdk.mdx index 385019326..b76b24e08 100644 --- a/docs-main/sdks-tools/sdks/wallet-sdk.mdx +++ b/docs-main/sdks-tools/sdks/wallet-sdk.mdx @@ -183,7 +183,7 @@ The SDK can decode prepared transactions into human-readable JSON for display to ## Further resources - [Splice Wallet Kernel GitHub repository](https://github.com/hyperledger-labs/splice-wallet-kernel) -- source code, API specs, and example scripts -- [Token standard documentation](https://docs.sync.global/app_dev/token_standard/index.html) -- full token standard reference +- [Token standard documentation](/overview/reference/cip-0056) -- Canton Network Token Standard reference - [CIP-0056](https://github.com/global-synchronizer-foundation/cips/blob/main/cip-0056/cip-0056.md) -- Canton Network token standard specification - [External signing onboarding](/appdev/deep-dives/external-signing-onboarding) -- step-by-step external signing walkthrough - [CN Quickstart](https://github.com/digital-asset/cn-quickstart) -- reference application for getting started with Canton Network development From dd893f3d41a72bc1ef49a7f93d3eb82cabe67726 Mon Sep 17 00:00:00 2001 From: 8bitpal Date: Mon, 4 May 2026 22:10:02 +0200 Subject: [PATCH 6/6] Group A: replace 10 upstream Splice doc links with internal pages All ten target the same three local pages, which already cover the upstream content fully: Validator onboarding (7 hits) -> /global-synchronizer/deployment/onboarding-process - appdev/faq.mdx (MainNet onboarding step) - global-synchronizer/faq.mdx (MainNet onboarding step) - global-synchronizer/troubleshooting.mdx (MainNet network access process) - integrations/wallet/guidance.mdx (validator onboarding context, twice) - appdev/quickstart/prerequisites.mdx (SV Node link + Resources list) Operator documentation hub (2 hits) -> /global-synchronizer - appdev/faq.mdx ("Where can I learn Canton Network development?") - global-synchronizer/faq.mdx (same) Canton Coin tokenomics (1 hit) -> /overview/reference/canton-coin-tokenomics - integrations/wallet/guidance.mdx (rewards round-aggregation context) Per the upstream-link gap analysis (.internal/upstream-link-gap-analysis-2026-05-04.md): Group A had 10 hits with no content gap; this commit closes them all. 11 residual hits remain (Groups B + C) -- those need either small content additions in target pages or genuine content authoring before they can be migrated. --- docs-main/appdev/faq.mdx | 4 ++-- docs-main/appdev/quickstart/prerequisites.mdx | 4 ++-- docs-main/global-synchronizer/faq.mdx | 4 ++-- docs-main/global-synchronizer/troubleshooting.mdx | 2 +- docs-main/integrations/wallet/guidance.mdx | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs-main/appdev/faq.mdx b/docs-main/appdev/faq.mdx index afc242283..6fb217282 100644 --- a/docs-main/appdev/faq.mdx +++ b/docs-main/appdev/faq.mdx @@ -665,7 +665,7 @@ If a course doesn't mention Canton Network or Daml 3.x, or covers only Daml 2.x 1. **Official Documentation:** - [Build Documentation](/appdev/get-started/choose-your-path) - - [Operator Documentation](https://docs.sync.global) + - [Operator Documentation](/global-synchronizer) 2. **Hands-on:** - [Canton Quickstart](https://github.com/digital-asset/cn-quickstart) @@ -759,7 +759,7 @@ Redact sensitive information (private keys, passwords, JWTs) before sharing logs **TestNet → MainNet:** 1. Complete MainNet validator onboarding 2. Request MainNet IP whitelisting -3. Follow [MainNet onboarding documentation](https://docs.sync.global/validator_operator/validator_onboarding.html) +3. Follow [MainNet onboarding documentation](/global-synchronizer/deployment/onboarding-process) 4. Deploy with production configuration diff --git a/docs-main/appdev/quickstart/prerequisites.mdx b/docs-main/appdev/quickstart/prerequisites.mdx index 13db40186..2ac7be717 100644 --- a/docs-main/appdev/quickstart/prerequisites.mdx +++ b/docs-main/appdev/quickstart/prerequisites.mdx @@ -214,7 +214,7 @@ The `LocalNet` deployment connects to a local validator which is in turn connect The Canton Network provides three synchronizer pools. The production network is `MainNet`; the production staging network is `TestNet`. As a developer you will mostly be connecting to the development staging network `DevNet`. -Access to [a SV Node](https://docs.dev.sync.global/validator_operator/validator_onboarding.html) that is whitelisted on the CN is required to connect to DevNet. The GSF publishes a [list of SV nodes](https://sync.global/sv-network/) who have the ability to sponsor a Validator node. To access `DevNet`, contact your sponsoring SV agent for VPN connection information. +Access to [a SV Node](/global-synchronizer/deployment/onboarding-process) that is whitelisted on the CN is required to connect to DevNet. The GSF publishes a [list of SV nodes](https://sync.global/sv-network/) who have the ability to sponsor a Validator node. To access `DevNet`, contact your sponsoring SV agent for VPN connection information. ## Resources @@ -226,7 +226,7 @@ Access to [a SV Node](https://docs.dev.sync.global/validator_operator/validator_ - [Digital Asset Docker](https://console.cloud.google.com/artifacts/docker/da-images/europe/public) - [Nix](https://nixos.org/download/) - [Quickstart GitHub repository](https://github.com/digital-asset/cn-quickstart) -- [Validator onboarding documentation](https://docs.dev.sync.global/validator_operator/validator_onboarding.html) +- [Validator onboarding documentation](/global-synchronizer/deployment/onboarding-process) - [WSL 2](https://learn.microsoft.com/en-us/windows/wsl/install) diff --git a/docs-main/global-synchronizer/faq.mdx b/docs-main/global-synchronizer/faq.mdx index 2948b35db..9c38ee6e7 100644 --- a/docs-main/global-synchronizer/faq.mdx +++ b/docs-main/global-synchronizer/faq.mdx @@ -665,7 +665,7 @@ If a course doesn't mention Canton Network or Daml 3.x, or covers only Daml 2.x 1. **Official Documentation:** - [What is Canton?](/overview/understand/what-is-canton) - - [Operator Documentation](https://docs.sync.global) + - [Operator Documentation](/global-synchronizer) 2. **Hands-on:** - [Canton Quickstart](https://github.com/digital-asset/cn-quickstart) @@ -759,7 +759,7 @@ Redact sensitive information (private keys, passwords, JWTs) before sharing logs **TestNet → MainNet:** 1. Complete MainNet validator onboarding 2. Request MainNet IP whitelisting -3. Follow [MainNet onboarding documentation](https://docs.sync.global/validator_operator/validator_onboarding.html) +3. Follow [MainNet onboarding documentation](/global-synchronizer/deployment/onboarding-process) 4. Deploy with production configuration diff --git a/docs-main/global-synchronizer/troubleshooting.mdx b/docs-main/global-synchronizer/troubleshooting.mdx index 5f0829e39..84155f2c1 100644 --- a/docs-main/global-synchronizer/troubleshooting.mdx +++ b/docs-main/global-synchronizer/troubleshooting.mdx @@ -692,7 +692,7 @@ JFrog access is required for Canton Enterprise licenses and certain quickstart a **Process:** 1. **DevNet:** Contact your Super Validator sponsor for VPN credentials and IP whitelisting 2. **TestNet:** Submit IP whitelisting request via support portal -3. **MainNet:** Follow the [validator onboarding documentation](https://docs.sync.global/validator_operator/validator_onboarding.html) +3. **MainNet:** Follow the [validator onboarding documentation](/global-synchronizer/deployment/onboarding-process) **Information Required:** - Static IP address(es) for your validator diff --git a/docs-main/integrations/wallet/guidance.mdx b/docs-main/integrations/wallet/guidance.mdx index c362fd119..d7313cc16 100644 --- a/docs-main/integrations/wallet/guidance.mdx +++ b/docs-main/integrations/wallet/guidance.mdx @@ -223,11 +223,11 @@ The Wallet integration guide is tailored to work with a LocalNet setup ([https:/ ## Connecting to a Synchronizer -For onboarding a validator with the global synchronizer it is recommended to read the Splice documentation here: [https://docs.dev.sync.global/validator_operator/validator_onboarding.html](https://docs.dev.sync.global/validator_operator/validator_onboarding.html) +For onboarding a validator with the global synchronizer, see the [validator onboarding process](/global-synchronizer/deployment/onboarding-process). ## Supporting Tokens and Applications -To integrate and support tokens, it is recommended to use the Splice documentation here: [https://docs.sync.global/validator_operator/validator_onboarding.html](https://docs.sync.global/validator_operator/validator_onboarding.html) +To integrate and support tokens, see the [validator onboarding process](/global-synchronizer/deployment/onboarding-process). If you are interested in building your own application, a good first place would be to utilize the CN quickstart: [https://github.com/digital-asset/cn-quickstart](https://github.com/digital-asset/cn-quickstart) @@ -1265,7 +1265,7 @@ Just for being online and growing the network, Canton Coin tokenomics enable val For external parties, automation needs to be developed to call `AmuletRules_Transfer` at least once per round with all activity records as inputs. -You can find more information about the tokenomics of Canton Coin at [https://docs.dev.sync.global/overview/overview.html#tokenomics](https://docs.dev.sync.global/overview/overview.html#tokenomics). +You can find more information about the tokenomics of Canton Coin at [Canton Coin tokenomics](/overview/reference/canton-coin-tokenomics). *All rewards and coupons are mintable the follow mining round, if rewards are not redemed then they are lost*